[CalendarServer-changes] [9757] CalendarServer/branches/users/gaya/inviteclean

source_changes at macosforge.org source_changes at macosforge.org
Thu Aug 30 15:48:55 PDT 2012


Revision: 9757
          http://trac.macosforge.org/projects/calendarserver/changeset/9757
Author:   gaya at apple.com
Date:     2012-08-30 15:48:54 -0700 (Thu, 30 Aug 2012)
Log Message:
-----------
sync with trunk

Modified Paths:
--------------
    CalendarServer/branches/users/gaya/inviteclean/calendarserver/accesslog.py
    CalendarServer/branches/users/gaya/inviteclean/calendarserver/tap/caldav.py
    CalendarServer/branches/users/gaya/inviteclean/calendarserver/tap/test/test_caldav.py
    CalendarServer/branches/users/gaya/inviteclean/calendarserver/tools/backup_pg.py
    CalendarServer/branches/users/gaya/inviteclean/calendarserver/tools/bootstrapdatabase.py
    CalendarServer/branches/users/gaya/inviteclean/calendarserver/tools/principals.py
    CalendarServer/branches/users/gaya/inviteclean/calendarserver/tools/shell/directory.py
    CalendarServer/branches/users/gaya/inviteclean/calendarserver/webadmin/resource.py
    CalendarServer/branches/users/gaya/inviteclean/calendarserver/webadmin/test/test_resource.py
    CalendarServer/branches/users/gaya/inviteclean/conf/caldavd-apple.plist
    CalendarServer/branches/users/gaya/inviteclean/contrib/migration/calendarmigrator.py
    CalendarServer/branches/users/gaya/inviteclean/contrib/migration/test/test_migrator.py
    CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/config.dist.plist
    CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/config.plist
    CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/ical.py
    CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/population.py
    CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/profiles.py
    CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/sim.py
    CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/test_ical.py
    CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/test_population.py
    CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/test_profiles.py
    CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/thresholds.json
    CalendarServer/branches/users/gaya/inviteclean/contrib/performance/stats.py
    CalendarServer/branches/users/gaya/inviteclean/contrib/performance/test_stats.py
    CalendarServer/branches/users/gaya/inviteclean/contrib/tools/protocolanalysis.py
    CalendarServer/branches/users/gaya/inviteclean/contrib/tools/request_monitor.py
    CalendarServer/branches/users/gaya/inviteclean/setup.py
    CalendarServer/branches/users/gaya/inviteclean/support/Makefile.Apple
    CalendarServer/branches/users/gaya/inviteclean/support/build.sh
    CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/adbapi2.py
    CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/parseschema.py
    CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/syntax.py
    CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/test/test_parseschema.py
    CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/test/test_sqlsyntax.py
    CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/ienterprise.py
    CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/test/test_adbapi2.py
    CalendarServer/branches/users/gaya/inviteclean/twext/internet/threadutils.py
    CalendarServer/branches/users/gaya/inviteclean/twext/python/sendmsg.c
    CalendarServer/branches/users/gaya/inviteclean/twext/web2/dav/resource.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/customxml.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/database.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/datafilters/peruserdata.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/datafilters/test/test_peruserdata.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/aggregate.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/appleopendirectory.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/calendaruserproxy.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/directory.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/idirectory.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/ldapdirectory.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/principal.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/test/test_buildquery.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/test/test_directory.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/test/test_ldapdirectory.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/test/test_livedirectory.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/extensions.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/mail.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/method/put_addressbook_common.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/method/put_common.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/resource.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/sharing.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/stdconfig.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/storebridge.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_database.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_extensions.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_mail.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_resource.py
    CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_sharing.py
    CalendarServer/branches/users/gaya/inviteclean/txdav/base/datastore/file.py
    CalendarServer/branches/users/gaya/inviteclean/txdav/base/datastore/util.py
    CalendarServer/branches/users/gaya/inviteclean/txdav/caldav/datastore/sql.py
    CalendarServer/branches/users/gaya/inviteclean/txdav/caldav/datastore/test/common.py
    CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/file.py
    CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/sql.py
    CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/sql_schema/current.sql
    CalendarServer/branches/users/gaya/inviteclean/txdav/idav.py

Added Paths:
-----------
    CalendarServer/branches/users/gaya/inviteclean/calendarserver/controlsocket.py
    CalendarServer/branches/users/gaya/inviteclean/contrib/performance/LogNormalVisualization.numbers
    CalendarServer/branches/users/gaya/inviteclean/contrib/tools/readStats.py
    CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/record.py
    CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/test/test_record.py
    CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/fixtures.py
    CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/queue.py
    CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/test/test_fixtures.py
    CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_11_to_12.sql
    CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_11_to_12.sql

Property Changed:
----------------
    CalendarServer/branches/users/gaya/inviteclean/


Property changes on: CalendarServer/branches/users/gaya/inviteclean
___________________________________________________________________
Modified: svn:mergeinfo
   - /CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/wsanchez/transations:5515-5593
/CalendarServer/trunk:9408-9546
   + /CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/q:9560-9688
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/wsanchez/transations:5515-5593
/CalendarServer/trunk:9408-9756

Modified: CalendarServer/branches/users/gaya/inviteclean/calendarserver/accesslog.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/calendarserver/accesslog.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/calendarserver/accesslog.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -25,11 +25,17 @@
     "AMPLoggingFactory",
 ]
 
+import collections
 import datetime
+import json
 import os
+try:
+    import psutil
+except ImportError:
+    psutil = None
 import time
 
-from twisted.internet import protocol
+from twisted.internet import protocol, task
 from twisted.protocols import amp
 from twext.web2 import iweb
 from txdav.xml import element as davxml
@@ -60,11 +66,10 @@
 
     def emit(self, eventDict):
 
+        format = None
+        formatArgs = None
         if eventDict.get("interface") is iweb.IRequest:
             
-            if config.GlobalStatsLoggingFrequency is not 0: 
-                self.logGlobalHit()
-
             request = eventDict["request"]
             response = eventDict["response"]
             loginfo = eventDict["loginfo"]
@@ -120,13 +125,27 @@
                 ' "%(referer)s" "%(userAgent)s"'
             )
 
+            formatArgs = {
+                "host"                : request.remoteAddr.host,
+                "uid"                 : uid,
+                "date"                : self.logDateString(response.headers.getHeader("date", 0)),
+                "method"              : method,
+                "uri"                 : request.uri.replace('"', "%22"),
+                "protocolVersion"     : ".".join(str(x) for x in request.clientproto),
+                "statusCode"          : response.code,
+                "bytesSent"           : loginfo.bytesSent,
+                "referer"             : request.headers.getHeader("referer", "-"),
+                "userAgent"           : request.headers.getHeader("user-agent", "-"),
+            }
+
+            # Add extended items to format and formatArgs
             if config.EnableExtendedAccessLog:
-                formats = [
-                    format,
-                    # Performance monitoring extensions
-                    'i=%(serverInstance)s or=%(outstandingRequests)s',
-                ]
-
+                format += ' i=%(serverInstance)s'
+                formatArgs["serverInstance"] = config.LogID if config.LogID else "0"
+                
+                format += ' or=%(outstandingRequests)s'
+                formatArgs["outstandingRequests"] = request.chanRequest.channel.factory.outstandingRequests
+                
                 # Tags for time stamps collected along the way - the first one in the list is the initial
                 # time for request creation - we use that to track the entire request/response time
                 nowtime = time.time()
@@ -134,13 +153,16 @@
                     basetime = request.timeStamps[0][1]
                     request.timeStamps[0] = ("t", time.time(),)
                     for tag, timestamp in request.timeStamps:
-                        formats.append("%s=%.1f" % (tag, (timestamp - basetime) * 1000))
+                        format += " %s=%%(%s).1f" % (tag, tag,)
+                        formatArgs[tag] = (timestamp - basetime) * 1000
                         if tag != "t":
                             basetime = timestamp
                     if len(request.timeStamps) > 1:
-                        formats.append("%s=%.1f" % ("t-log", (nowtime - basetime) * 1000))
+                        format += " t-log=%(t-log).1f"
+                        formatArgs["t-log"] = (timestamp - basetime) * 1000
                 else:
-                    formats.append("%s=%.1f" % ("t", (nowtime - request.timeStamps[0][1]) * 1000))
+                    format += " t=%(t).1f"
+                    formatArgs["t"] = (nowtime - request.timeStamps[0][1]) * 1000
 
                 if hasattr(request, "extendedLogItems"):
                     for k, v in request.extendedLogItems.iteritems():
@@ -148,36 +170,56 @@
                         v = str(v).replace('"', "%22")
                         if " " in v:
                             v = '"%s"' % (v,)
-                        formats.append("%s=%s" % (k, v))
+                        format += " %s=%%(%s)s" % (k, k,)
+                        formatArgs[k] = v
 
+                # Add the name of the XML error element for debugging purposes
+                if hasattr(response, "error"):
+                    format += " err=%(err)s"
+                    formatArgs["err"] = response.error.qname()[1]
+
                 fwdHeaders = request.headers.getRawHeaders("x-forwarded-for", "")
                 if fwdHeaders:
                     # Limit each x-forwarded-header to 50 in case someone is
                     # trying to overwhelm the logs
                     forwardedFor = ",".join([hdr[:50] for hdr in fwdHeaders])
                     forwardedFor = forwardedFor.replace(" ", "")
-                    formats.append("fwd=%(fwd)s")
-                else:
-                    forwardedFor = ""
+                    format += " fwd=%(fwd)s"
+                    formatArgs["fwd"] = forwardedFor
 
-                format = " ".join(formats)
+        elif "overloaded" in eventDict:
+            overloaded = eventDict.get("overloaded")
 
+            format = (
+                '%(host)s - %(uid)s [%(date)s]'
+                ' "%(method)s"'
+                ' %(statusCode)s %(bytesSent)d'
+                ' "%(referer)s" "%(userAgent)s"'
+            )
+
             formatArgs = {
-                "host"                : request.remoteAddr.host,
-                "uid"                 : uid,
-                "date"                : self.logDateString(response.headers.getHeader("date", 0)),
-                "method"              : method,
-                "uri"                 : request.uri.replace('"', "%22"),
-                "protocolVersion"     : ".".join(str(x) for x in request.clientproto),
-                "statusCode"          : response.code,
-                "bytesSent"           : loginfo.bytesSent,
-                "referer"             : request.headers.getHeader("referer", "-"),
-                "userAgent"           : request.headers.getHeader("user-agent", "-"),
-                "serverInstance"      : config.LogID,
-                "outstandingRequests" : request.chanRequest.channel.factory.outstandingRequests,
-                "fwd"                 : forwardedFor,
+                "host"                : overloaded.transport.hostname,
+                "uid"                 : "-",
+                "date"                : self.logDateString(time.time()),
+                "method"              : "???",
+                "uri"                 : "",
+                "protocolVersion"     : "",
+                "statusCode"          : 503,
+                "bytesSent"           : 0,
+                "referer"             : "-",
+                "userAgent"           : "-",
             }
 
+            if config.EnableExtendedAccessLog:
+                format += ' p=%(serverPort)s'
+                formatArgs["serverPort"] = overloaded.transport.server.port
+                
+                format += ' or=%(outstandingRequests)s'
+                formatArgs["outstandingRequests"] = overloaded.outstandingRequests
+
+
+        # Write anything we got to the log and stats
+        if format is not None:
             # sanitize output to mitigate log injection
             for k,v in formatArgs.items():
                 if not isinstance(v, basestring):
@@ -186,38 +228,26 @@
                 v = v.replace("\n", "\\n")
                 v = v.replace("\"", "\\\"")
                 formatArgs[k] = v
+    
+            formatArgs["type"] = "access-log"
+            formatArgs["log-format"] = format
+            self.logStats(formatArgs)
 
-            self.logMessage(format % formatArgs)
-
-        elif "overloaded" in eventDict:
-            overloaded = eventDict.get("overloaded")
-            format_str = '%s - - [%s] "???" 503 0 "-" "-" [0.0 ms]'
-            format_data = (
-                overloaded.transport.hostname,
-                self.logDateString(time.time()),
-            )
-            if config.EnableExtendedAccessLog:
-                format_str += " [%s %s]"
-                format_data += (
-                    overloaded.transport.server.port,
-                    overloaded.outstandingRequests,
-                )
-            self.logMessage(format_str % format_data)
-
 class RotatingFileAccessLoggingObserver(CommonAccessLoggingObserverExtensions):
     """
     Class to do "apache" style access logging to a rotating log file. The log
     file is rotated after midnight each day.
+    
+    This class also currently handles the collection of system and log statistics.
     """
 
     def __init__(self, logpath):
-        self.logpath = logpath
-        self.globalHitCount = 0 
-        self.globalHitHistory = [] 
-        for _ignore in range(0, config.GlobalStatsLoggingFrequency + 1): 
-            self.globalHitHistory.append({"time":int(time.time()), "hits":0})
+        self.logpath = logpath        
 
-    def logMessage(self, message, allowrotate=True):
+        self.systemStats = None
+        self.statsByMinute = []
+
+    def accessLog(self, message, allowrotate=True):
         """
         Log a message to the file and possibly rotate if date has changed.
 
@@ -231,21 +261,6 @@
             self.rotate()
         self.f.write(message + "\n")
 
-    def rotateGlobalHitHistoryStats(self): 
-        """ 
-        Roll the global hit history array: push the current stats as 
-        the last element; pop the first (oldest) element and reschedule the task. 
-        """ 
-
-        self.globalHitHistory.append({"time":int(time.time()), "hits":self.globalHitCount}) 
-        del self.globalHitHistory[0] 
-        log.debug("rotateGlobalHitHistoryStats: %s" % (self.globalHitHistory,))
-        if config.GlobalStatsLoggingFrequency is not 0: 
-            self.reactor.callLater(
-                config.GlobalStatsLoggingPeriod * 60 / config.GlobalStatsLoggingFrequency, 
-                self.rotateGlobalHitHistoryStats
-            ) 
-
     def start(self):
         """
         Start logging. Open the log file and log an "open" message.
@@ -253,21 +268,19 @@
 
         super(RotatingFileAccessLoggingObserver, self).start()
         self._open()
-        self.logMessage("Log opened - server start: [%s]." % (datetime.datetime.now().ctime(),))
- 
-        # Need a reactor for the callLater() support for rotateGlobalHitHistoryStats() 
-        from twisted.internet import reactor 
-        self.reactor = reactor 
-        self.rotateGlobalHitHistoryStats() 
+        self.accessLog("Log opened - server start: [%s]." % (datetime.datetime.now().ctime(),))
 
     def stop(self):
         """
         Stop logging. Close the log file and log an "open" message.
         """
 
-        self.logMessage("Log closed - server stop: [%s]." % (datetime.datetime.now().ctime(),), False)
+        self.accessLog("Log closed - server stop: [%s]." % (datetime.datetime.now().ctime(),), False)
         super(RotatingFileAccessLoggingObserver, self).stop()
         self._close()
+        
+        if self.systemStats is not None:
+            self.systemStats.stop()
 
     def _open(self):
         """
@@ -336,100 +349,292 @@
         if os.path.exists(newpath):
             log.msg("Cannot rotate log file to %s because it already exists." % (newpath,))
             return
-        self.logMessage("Log closed - rotating: [%s]." % (datetime.datetime.now().ctime(),), False)
+        self.accessLog("Log closed - rotating: [%s]." % (datetime.datetime.now().ctime(),), False)
         log.msg("Rotating log file to: %s" % (newpath,), system="Logging")
         self.f.close()
         os.rename(self.logpath, newpath)
         self._open()
-        self.logMessage("Log opened - rotated: [%s]." % (datetime.datetime.now().ctime(),), False)
+        self.accessLog("Log opened - rotated: [%s]." % (datetime.datetime.now().ctime(),), False)
 
-    def logGlobalHit(self): 
+    def logStats(self, stats): 
         """ 
-        Increment the service-global hit counter 
+        Update stats
         """ 
 
-        self.globalHitCount += 1 
+        if self.systemStats is None:
+            self.systemStats = SystemMonitor()
 
-    def getGlobalHits(self): 
+        # Currently only storing stats for access log type
+        if "type" not in stats or stats["type"] != "access-log":
+            return
+    
+        currentStats = self.ensureSequentialStats()
+        self.updateStats(currentStats, stats)
+        
+        if stats["type"] == "access-log":
+            self.accessLog(stats["log-format"] % stats)
+
+    def getStats(self): 
         """ 
-        Return the global hit stats 
-        """ 
+        Return the stats 
+        """
+        
+        if self.systemStats is None:
+            self.systemStats = SystemMonitor()
 
-        stats = '<?xml version="1.0" encoding="UTF-8"?><plist version="1.0">' 
-        stats += "<dict><key>totalHits</key><integer>%d</integer>" 
-        stats += "<key>recentHits</key><dict>" 
-        stats += "<key>count</key><integer>%d</integer>" 
-        stats += "<key>since</key><integer>%d</integer>" 
-        stats += "<key>period</key><integer>%d</integer>" 
-        stats += "<key>frequency</key><integer>%d</integer>" 
-        stats += "</dict></dict></plist>" 
-        return stats % (
-            self.globalHitCount,
-            self.globalHitCount - self.globalHitHistory[0]["hits"], 
-            self.globalHitHistory[0]["time"],
-            config.GlobalStatsLoggingPeriod,
-            config.GlobalStatsLoggingFrequency
-        ) 
+        # The current stats
+        currentStats = self.ensureSequentialStats()
 
-class LogMessage(amp.Command):
+        # Get previous minute details
+        index = min(2, len(self.statsByMinute))
+        if index > 0:
+            previousMinute = self.statsByMinute[-index][1]
+        else:
+            previousMinute = self.initStats()
+
+        # Do five minute aggregate
+        fiveMinutes = self.initStats()
+        index = min(6, len(self.statsByMinute))
+        for i in range(-index, -1):
+            stat = self.statsByMinute[i][1]
+            self.mergeStats(fiveMinutes, stat)
+
+        # Do one hour aggregate
+        oneHour = self.initStats()
+        index = min(61, len(self.statsByMinute))
+        for i in range(-index, -1):
+            stat = self.statsByMinute[i][1]
+            self.mergeStats(oneHour, stat)
+
+        printStats = {
+            "System":self.systemStats.items,
+            "Current":currentStats,
+            "1 Minute":previousMinute,
+            "5 Minutes":fiveMinutes,
+            "1 Hour":oneHour,
+        }
+        return json.dumps(printStats)
+
+    def ensureSequentialStats(self):
+        """
+        Make sure the list of timed stats is contiguous wrt time. 
+        """
+        dtindex = int(time.time() / 60.0) * 60
+
+        if len(self.statsByMinute) > 0:
+            if self.statsByMinute[-1][0] != dtindex:
+                oldindex = self.statsByMinute[-1][0]
+                while oldindex != dtindex:
+                    oldindex += 60
+                    self.statsByMinute.append((oldindex, self.initStats(),))
+        else:
+            self.statsByMinute.append((dtindex, self.initStats(),))
+        return self.statsByMinute[-1][1]
+
+    def initStats(self):
+        
+        def initTimeHistogram():
+            return {
+                "<10ms": 0,
+                "10ms<->100ms" : 0,
+                "100ms<->1s"   : 0,
+                "1s<->10s"     : 0,
+                "10s<->30s"    : 0,
+                "30s<->60s"    : 0,
+                ">60s"         : 0,
+                "Over 1s"      : 0,
+                "Over 10s"     : 0,
+            }
+
+        return {
+            "requests" : 0,
+            "method"   : collections.defaultdict(int),
+            "uid"      : collections.defaultdict(int),
+            "500"      : 0,
+            "t"        : 0.0,
+            "t-resp-wr": 0.0,
+            "slots"    : 0,
+            "T"        : initTimeHistogram(),
+            "T-RESP-WR": initTimeHistogram(),
+            "T-MAX"    : 0.0,
+            "cpu"      : self.systemStats.items["cpu use"],
+        }
+
+    def updateStats(self, current, stats):
+        # Gather specific information and aggregate into our persistent stats
+        if current["requests"] == 0:
+            current["cpu"] = 0.0
+        current["requests"] += 1
+        current["method"][stats["method"]] += 1
+        current["uid"][stats["uid"]] += 1
+        if stats["statusCode"] >= 500:
+            current["500"] += 1
+        current["t"] += stats.get("t", 0.0)
+        current["t-resp-wr"] += stats.get("t-resp-wr", 0.0)
+        current["slots"] += stats.get("outstandingRequests", 0)
+        current["cpu"] += self.systemStats.items["cpu use"]
+        
+        def histogramUpdate(t, key):
+            if t >= 60000.0:
+                current[key][">60s"] += 1
+            elif t >= 30000.0:
+                current[key]["30s<->60s"] += 1
+            elif t >= 10000.0:
+                current[key]["10s<->30s"] += 1
+            elif t >= 1000.0:
+                current[key]["1s<->10s"] += 1
+            elif t >= 100.0:
+                current[key]["100ms<->1s"] += 1
+            elif t >= 10.0:
+                current[key]["10ms<->100ms"] += 1
+            else:
+                current[key]["<10ms"] += 1
+            if t >= 1000.0:
+                current[key]["Over 1s"] += 1
+            elif t >= 10000.0:
+                current[key]["Over 10s"] += 1
+            
+        t = stats.get("t", None)
+        if t is not None:
+            histogramUpdate(t, "T")
+        current["T-MAX"] = max(current["T-MAX"], t)
+        t = stats.get("t-resp-wr", None)
+        if t is not None:
+            histogramUpdate(t, "T-RESP-WR")
+
+    def mergeStats(self, current, stats):
+        # Gather specific information and aggregate into our persistent stats
+        if current["requests"] == 0:
+            current["cpu"] = 0.0
+        current["requests"] += stats["requests"]
+        for method in stats["method"].keys():
+            current["method"][method] += stats["method"][method]
+        for uid in stats["uid"].keys():
+            current["uid"][uid] += stats["uid"][uid]
+        current["500"] += stats["500"]
+        current["t"] += stats["t"]
+        current["t-resp-wr"] += stats["t-resp-wr"]
+        current["slots"] += stats["slots"]
+        current["cpu"] += stats["cpu"]
+        
+        def histogramUpdate(t, key):
+            if t >= 60000.0:
+                current[key][">60s"] += 1
+            elif t >= 30000.0:
+                current[key]["30s<->60s"] += 1
+            elif t >= 10000.0:
+                current[key]["10s<->30s"] += 1
+            elif t >= 1000.0:
+                current[key]["1s<->10s"] += 1
+            elif t >= 100.0:
+                current[key]["100ms<->1s"] += 1
+            elif t >= 10.0:
+                current[key]["10ms<->100ms"] += 1
+            else:
+                current[key]["<10ms"] += 1
+            if t >= 1000.0:
+                current[key]["Over 1s"] += 1
+            elif t >= 10000.0:
+                current[key]["Over 10s"] += 1
+        
+        for bin in stats["T"].keys():
+            current["T"][bin] += stats["T"][bin]
+        current["T-MAX"] = max(current["T-MAX"], stats["T-MAX"])
+        for bin in stats["T-RESP-WR"].keys():
+            current["T-RESP-WR"][bin] += stats["T-RESP-WR"][bin]
+
+
+
+class SystemMonitor(object):
+    """
+    Keeps track of system usage information. This installs a reacxtor task to
+    run about once per second and track system use.
+    """
+    
+    CPUStats = collections.namedtuple("CPUStats", ("total", "idle",))
+
+    def __init__(self):
+        self.items = {
+            "cpu count"     : psutil.NUM_CPUS if psutil is not None else -1,
+            "cpu use"       : 0.0,
+            "memory used"   : 0,
+            "memory percent": 0.0,
+            "start time"    : time.time(),
+        }
+        
+        if psutil is not None:
+            times = psutil.cpu_times()
+            self.previous_cpu = SystemMonitor.CPUStats(sum(times), times.idle,)
+        else:
+            self.previous_cpu = SystemMonitor.CPUStats(0, 0)
+        
+        self.task = task.LoopingCall(self.update)
+        self.task.start(1.0)
+    
+    def stop(self):
+        """
+        Just stop the task
+        """
+        self.task.stop()
+
+    def update(self):
+        
+        # CPU usage based on diff'ing CPU times
+        if psutil is not None:
+            times = psutil.cpu_times()
+            cpu_now = SystemMonitor.CPUStats(sum(times), times.idle,)
+            try:
+                self.items["cpu use"] = 100.0 * (1.0 - (cpu_now.idle - self.previous_cpu.idle) / (cpu_now.total - self.previous_cpu.total))
+            except ZeroDivisionError:
+                self.items["cpu use"] = 0.0
+            self.previous_cpu = cpu_now
+        
+        # Memory usage
+        if psutil is not None:
+            mem = psutil.virtual_memory()
+            self.items["memory used"] = mem.used
+            self.items["memory percent"] = mem.percent
+
+    
+class LogStats(amp.Command):
     arguments = [("message", amp.String())]
 
-class LogGlobalHit(amp.Command): 
-    arguments = [] 
 
 class AMPCommonAccessLoggingObserver(CommonAccessLoggingObserverExtensions):
-    def __init__(self, mode, id):
-        self.mode = mode
-        self.id = id
+    def __init__(self):
         self.protocol = None
         self._buffer = []
 
+
     def flushBuffer(self):
         if self._buffer:
             for msg in self._buffer:
-                self.logMessage(msg)
+                self.logStats(msg)
 
-    def start(self):
-        super(AMPCommonAccessLoggingObserver, self).start()
 
-        from twisted.internet import reactor
+    def addClient(self, connectedClient):
+        """
+        An AMP client connected; hook it up to this observer.
+        """
+        self.protocol = connectedClient
+        self.flushBuffer()
 
-        def _gotProtocol(proto):
-            self.protocol = proto
-            self.flushBuffer()
 
-        self.client = protocol.ClientCreator(reactor, amp.AMP)
-        if self.mode == "AF_UNIX":
-            d = self.client.connectUNIX(self.id)
-        else:
-            d = self.client.connectTCP("localhost", self.id)
-        d.addCallback(_gotProtocol)
+    def logStats(self, message): 
+        """ 
+        Log server stats via the remote AMP Protocol 
+        """ 
 
-
-    def logMessage(self, message):
-        """
-        Log a message to the remote AMP Protocol
-        """
         if self.protocol is not None:
-            # XXX: Yeah we're not waiting for anything to happen here.
-            #      but we will log an error.
+            message=json.dumps(message)
             if isinstance(message, unicode):
                 message = message.encode("utf-8")
-            d = self.protocol.callRemote(LogMessage, message=message)
-            d.addErrback(log.err)
-        else:
+            d = self.protocol.callRemote(LogStats, message=message) 
+            d.addErrback(log.err) 
+        else: 
             self._buffer.append(message)
 
-    def logGlobalHit(self): 
-        """ 
-        Log a server hit via the remote AMP Protocol 
-        """ 
 
-        if self.protocol is not None: 
-            d = self.protocol.callRemote(LogGlobalHit) 
-            d.addErrback(log.err) 
-        else: 
-            log.msg("logGlobalHit() only works with an AMP Protocol") 
 
 class AMPLoggingProtocol(amp.AMP):
     """
@@ -441,27 +646,30 @@
 
         super(AMPLoggingProtocol, self).__init__()
 
-    def logMessage(self, message):
-        self.observer.logMessage(message)
-        return {}
+    def logStats(self, message): 
+        stats = json.loads(message)
+        self.observer.logStats(stats) 
+        return {} 
 
-    LogMessage.responder(logMessage)
+    LogStats.responder(logStats)
 
-    def logGlobalHit(self): 
-        self.observer.logGlobalHit() 
-        return {} 
 
-    LogGlobalHit.responder(logGlobalHit)
 
 class AMPLoggingFactory(protocol.ServerFactory):
     def __init__(self, observer):
         self.observer = observer
 
+
     def doStart(self):
         self.observer.start()
 
+
     def doStop(self):
         self.observer.stop()
 
+
     def buildProtocol(self, addr):
         return AMPLoggingProtocol(self.observer)
+
+
+

Copied: CalendarServer/branches/users/gaya/inviteclean/calendarserver/controlsocket.py (from rev 9756, CalendarServer/trunk/calendarserver/controlsocket.py)
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/calendarserver/controlsocket.py	                        (rev 0)
+++ CalendarServer/branches/users/gaya/inviteclean/calendarserver/controlsocket.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -0,0 +1,128 @@
+##
+# Copyright (c) 2012 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+"""
+Multiplexing control socket.  Currently used for messages related to queueing
+and logging, but extensible to more.
+"""
+
+from zope.interface import implements
+
+from twisted.internet.protocol import Factory
+from twisted.protocols.amp import BinaryBoxProtocol, IBoxReceiver, IBoxSender
+from twisted.application.service import Service
+
+class DispatchingSender(object):
+    implements(IBoxSender)
+
+    def __init__(self, sender, route):
+        self.sender = sender
+        self.route = route
+
+
+    def sendBox(self, box):
+        box['_route'] = self.route
+        self.sender.sendBox(box)
+
+
+    def unhandledError(self, failure):
+        self.sender.unhandledError(failure)
+
+
+
+class DispatchingBoxReceiver(object):
+    implements(IBoxReceiver)
+
+    def __init__(self, receiverMap):
+        self.receiverMap = receiverMap
+
+
+    def startReceivingBoxes(self, boxSender):
+        for key, receiver in self.receiverMap.items():
+            receiver.startReceivingBoxes(DispatchingSender(boxSender, key))
+
+
+    def ampBoxReceived(self, box):
+        self.receiverMap[box['_route']].ampBoxReceived(box)
+
+
+    def stopReceivingBoxes(self, reason):
+        for receiver in self.receiverMap.values():
+            receiver.stopReceivingBoxes(reason)
+
+
+
+class ControlSocket(Factory, object):
+    """
+    An AMP control socket that aggregates other AMP factories.  This is the
+    service that listens in the master process.
+    """
+
+    def __init__(self):
+        """
+        Initialize this L{ControlSocket}.
+        """
+        self._factoryMap = {}
+
+
+    def addFactory(self, key, otherFactory):
+        """
+        Add another L{Factory} - one that returns L{AMP} instances - to this
+        socket.
+        """
+        self._factoryMap[key] = otherFactory
+
+
+    def buildProtocol(self, addr):
+        """
+        Build a thing that will multiplex AMP to all the relevant sockets.
+        """
+        receiverMap = {}
+        for k, f  in self._factoryMap.items():
+            receiverMap[k] = f.buildProtocol(addr)
+        return BinaryBoxProtocol(DispatchingBoxReceiver(receiverMap))
+
+
+    def doStart(self):
+        """
+        Relay start notification to all added factories.
+        """
+        for f in self._factoryMap.values():
+            f.doStart()
+
+
+    def doStop(self):
+        """
+        Relay stop notification to all added factories.
+        """
+        for f in self._factoryMap.values():
+            f.doStop()
+
+
+
+class ControlSocketConnectingService(Service, object):
+
+    def __init__(self, endpointFactory, controlSocket):
+        super(ControlSocketConnectingService, self).__init__()
+        self.endpointFactory = endpointFactory
+        self.controlSocket = controlSocket
+
+
+    def privilegedStartService(self):
+        from twisted.internet import reactor
+        endpoint = self.endpointFactory(reactor)
+        endpoint.connect(self.controlSocket)
+

Modified: CalendarServer/branches/users/gaya/inviteclean/calendarserver/tap/caldav.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/calendarserver/tap/caldav.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/calendarserver/tap/caldav.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -61,6 +61,7 @@
 from twext.web2.channel.http import LimitingHTTPFactory, SSLRedirectRequest
 from twext.web2.metafd import ConnectionLimiter, ReportingHTTPService
 
+from txdav.common.datastore.sql_tables import schema
 from txdav.common.datastore.upgrade.sql.upgrade import (
     UpgradeDatabaseSchemaService, UpgradeDatabaseDataService,
 )
@@ -88,6 +89,13 @@
 
 from calendarserver.tap.util import ConnectionDispenser
 
+from calendarserver.controlsocket import ControlSocket
+from twisted.internet.endpoints import UNIXClientEndpoint, TCP4ClientEndpoint
+
+from calendarserver.controlsocket import ControlSocketConnectingService
+from twisted.protocols.amp import AMP
+from twext.enterprise.queue import WorkerFactory as QueueWorkerFactory
+from twext.enterprise.queue import PeerConnectionPool
 from calendarserver.accesslog import AMPCommonAccessLoggingObserver
 from calendarserver.accesslog import AMPLoggingFactory
 from calendarserver.accesslog import RotatingFileAccessLoggingObserver
@@ -117,6 +125,14 @@
 
 from twisted.python.util import uidFromString, gidFromString
 
+
+# Control socket message-routing constants.
+_LOG_ROUTE = "log"
+_QUEUE_ROUTE = "queue"
+
+_CONTROL_SERVICE_NAME = "control"
+
+
 def getid(uid, gid):
     if uid is not None:
         uid = uidFromString(uid)
@@ -187,7 +203,7 @@
 class CalDAVStatisticsProtocol (Protocol):
 
     def connectionMade(self):
-        stats = self.factory.logger.observer.getGlobalHits()
+        stats = self.factory.logger.observer.getStats()
         self.transport.write("%s\r\n" % (stats,))
         self.transport.loseConnection()
 
@@ -762,22 +778,43 @@
         #
         self.log_info("Setting up service")
 
+        bonusServices = []
+
         if config.ProcessType == "Slave":
+            logObserver = AMPCommonAccessLoggingObserver()
+
             if config.ControlSocket:
-                mode = "AF_UNIX"
                 id = config.ControlSocket
-                self.log_info("Logging via AF_UNIX: %s" % (id,))
+                self.log_info("Control via AF_UNIX: %s" % (id,))
+                endpointFactory = lambda reactor: UNIXClientEndpoint(
+                    reactor, id)
             else:
-                mode = "AF_INET"
                 id = int(config.ControlPort)
-                self.log_info("Logging via AF_INET: %d" % (id,))
-
-            logObserver = AMPCommonAccessLoggingObserver(mode, id)
-
+                self.log_info("Control via AF_INET: %d" % (id,))
+                endpointFactory = lambda reactor: TCP4ClientEndpoint(
+                    reactor, "127.0.0.1", id)
+            controlSocketClient = ControlSocket()
+            class LogClient(AMP):
+                def startReceivingBoxes(self, sender):
+                    super(LogClient, self).startReceivingBoxes(sender)
+                    logObserver.addClient(self)
+            f = Factory()
+            f.protocol = LogClient
+            controlSocketClient.addFactory(_LOG_ROUTE, f)
+            from txdav.common.datastore.sql import CommonDataStore as SQLStore
+            if isinstance(store, SQLStore):
+                def queueMasterAvailable(connectionFromMaster):
+                    store.queuer = connectionFromMaster
+                queueFactory = QueueWorkerFactory(store.newTransaction, schema,
+                                                  queueMasterAvailable)
+                controlSocketClient.addFactory(_QUEUE_ROUTE, queueFactory)
+            controlClient = ControlSocketConnectingService(
+                endpointFactory, controlSocketClient
+            )
+            bonusServices.append(controlClient)
         elif config.ProcessType == "Single":
             # Make sure no old socket files are lying around.
             self.deleteStaleSocketFiles()
-
             logObserver = RotatingFileAccessLoggingObserver(
                 config.AccessLogFile,
             )
@@ -785,18 +822,20 @@
         self.log_info("Configuring access log observer: %s" % (logObserver,))
 
         service = CalDAVService(logObserver)
+        for bonus in bonusServices:
+            bonus.setServiceParent(service)
 
         rootResource = getRootResource(config, store, additional)
         service.rootResource = rootResource
 
         underlyingSite = Site(rootResource)
-        
-        # Need to cache SSL port info here so we can access it in a Request to deal with the
-        # possibility of being behind an SSL decoder
+
+        # Need to cache SSL port info here so we can access it in a Request to
+        # deal with the possibility of being behind an SSL decoder
         underlyingSite.EnableSSL = config.EnableSSL
         underlyingSite.SSLPort = config.SSLPort
         underlyingSite.BindSSLPorts = config.BindSSLPorts
-        
+
         requestFactory = underlyingSite
 
         if config.RedirectHTTPToHTTPS:
@@ -1102,7 +1141,8 @@
             try:
                 gid = getgrnam(config.GroupName).gr_gid
             except KeyError:
-                raise ConfigurationError("Invalid group name: %s" % (config.GroupName,))
+                raise ConfigurationError("Invalid group name: %s" %
+                                         (config.GroupName,))
         else:
             gid = os.getgid()
 
@@ -1110,20 +1150,24 @@
             try:
                 uid = getpwnam(config.UserName).pw_uid
             except KeyError:
-                raise ConfigurationError("Invalid user name: %s" % (config.UserName,))
+                raise ConfigurationError("Invalid user name: %s" %
+                                         (config.UserName,))
         else:
             uid = os.getuid()
 
+
+        controlSocket = ControlSocket()
+        controlSocket.addFactory(_LOG_ROUTE, logger)
         if config.ControlSocket:
-            loggingService = GroupOwnedUNIXServer(
-                gid, config.ControlSocket, logger, mode=0660
+            controlSocketService = GroupOwnedUNIXServer(
+                gid, config.ControlSocket, controlSocket, mode=0660
             )
         else:
-            loggingService = ControlPortTCPServer(
-                config.ControlPort, logger, interface="127.0.0.1"
+            controlSocketService = ControlPortTCPServer(
+                config.ControlPort, controlSocket, interface="127.0.0.1"
             )
-        loggingService.setName("logging")
-        loggingService.setServiceParent(s)
+        controlSocketService.setName(_CONTROL_SERVICE_NAME)
+        controlSocketService.setServiceParent(s)
 
         monitor = DelayedStartupProcessMonitor()
         s.processMonitor = monitor
@@ -1210,7 +1254,7 @@
         # the current stats on the server.
         stats = CalDAVStatisticsServer(logger)
         statsService = GroupOwnedUNIXServer(
-            gid, config.GlobalStatsSocket, stats, mode=0440
+            gid, config.GlobalStatsSocket, stats, mode=0660
         )
         statsService.setName("stats")
         statsService.setServiceParent(s)
@@ -1244,12 +1288,22 @@
         # filesystem to the database (if that's necessary, and there is
         # filesystem data in need of upgrading).
         def spawnerSvcCreator(pool, store):
+            from twisted.internet import reactor
+            pool = PeerConnectionPool(reactor, store.newTransaction,
+                                      7654, schema)
+            controlSocket.addFactory(_QUEUE_ROUTE,
+                                     pool.workerListenerFactory())
+            # TODO: now that we have the shared control socket, we should get
+            # rid of the connection dispenser and make a shared / async
+            # connection pool implementation that can dispense transactions
+            # synchronously as the interface requires.
             if pool is not None and config.SharedConnectionPool:
                 self.log_warn("Using Shared Connection Pool")
                 dispenser = ConnectionDispenser(pool)
             else:
                 dispenser = None
             multi = MultiService()
+            pool.setServiceParent(multi)
             spawner = SlaveSpawnerService(
                 self, monitor, dispenser, dispatcher, options["config"],
                 inheritFDs=inheritFDs, inheritSSLFDs=inheritSSLFDs

Modified: CalendarServer/branches/users/gaya/inviteclean/calendarserver/tap/test/test_caldav.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/calendarserver/tap/test/test_caldav.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/calendarserver/tap/test/test_caldav.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -57,7 +57,8 @@
 
 from calendarserver.tap.caldav import (
     CalDAVOptions, CalDAVServiceMaker, CalDAVService, GroupOwnedUNIXServer,
-    DelayedStartupProcessMonitor, DelayedStartupLineLogger, TwistdSlaveProcess
+    DelayedStartupProcessMonitor, DelayedStartupLineLogger, TwistdSlaveProcess,
+    _CONTROL_SERVICE_NAME
 )
 from calendarserver.provision.root import RootResource
 from StringIO import StringIO
@@ -460,7 +461,7 @@
         self.config["ProcessType"] = "Combined"
         self.writeConfig()
         svc = self.makeService()
-        for serviceName in ["logging"]:
+        for serviceName in [_CONTROL_SERVICE_NAME]:
             socketService = svc.getServiceNamed(serviceName)
             self.assertIsInstance(socketService, GroupOwnedUNIXServer)
             m = socketService.kwargs.get("mode", 0666)
@@ -472,9 +473,9 @@
         for serviceName in ["stats"]:
             socketService = svc.getServiceNamed(serviceName)
             self.assertIsInstance(socketService, GroupOwnedUNIXServer)
-            m = socketService.kwargs.get("mode", 0444)
+            m = socketService.kwargs.get("mode", 0666)
             self.assertEquals(
-                m, int("440", 8),
+                m, int("660", 8),
                 "Wrong mode on %s: %s" % (serviceName, oct(m))
             )
             self.assertEquals(socketService.gid, alternateGroup)

Modified: CalendarServer/branches/users/gaya/inviteclean/calendarserver/tools/backup_pg.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/calendarserver/tools/backup_pg.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/calendarserver/tools/backup_pg.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -36,6 +36,7 @@
 
 PSQL          = "%s/usr/bin/psql" % (SIPP,)
 PGDUMP        = "%s/usr/bin/pg_dump" % (SIPP,)
+PGSOCKETDIR   = "/Library/Server/PostgreSQL For Server Services/Socket"
 
 def usage(e=None):
     name = os.path.basename(sys.argv[0])
@@ -67,6 +68,7 @@
 
     cmdArgs = [
         PGDUMP,
+        "-h", PGSOCKETDIR,
         "--username=%s" % (USERNAME,),
         "--clean",
         "--no-privileges",
@@ -96,6 +98,7 @@
 
     cmdArgs = [
         PSQL,
+        "-h", PGSOCKETDIR,
         "--username=%s" % (USERNAME,),
         "--file=%s" % (dumpFile,)
     ]

Modified: CalendarServer/branches/users/gaya/inviteclean/calendarserver/tools/bootstrapdatabase.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/calendarserver/tools/bootstrapdatabase.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/calendarserver/tools/bootstrapdatabase.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -23,6 +23,7 @@
 CONNECTNAME   = "_postgres"
 USERNAME      = "caldav"
 DATABASENAME  = "caldav"
+PGSOCKETDIR   = "/Library/Server/PostgreSQL For Server Services/Socket"
 SCHEMAFILE    = "/Applications/Server.app/Contents/ServerRoot/usr/share/caldavd/lib/python/txdav/common/datastore/sql_schema/current.sql"
 
 # Executables:
@@ -56,6 +57,7 @@
 
     cmdArgs = [
         CREATEUSER,
+        "-h", PGSOCKETDIR,
         "--username=%s" % (CONNECTNAME,),
         USERNAME,
         "--no-superuser",
@@ -90,6 +92,7 @@
 
     cmdArgs = [
         CREATEDB,
+        "-h", PGSOCKETDIR,
         "--username=%s" % (USERNAME,),
         DATABASENAME,
     ]
@@ -120,6 +123,7 @@
 
     cmdArgs = [
         PSQL,
+        "-h", PGSOCKETDIR,
         "-d", DATABASENAME,
         "-U", USERNAME,
         "-t",
@@ -157,6 +161,7 @@
 
     cmdArgs = [
         PSQL,
+        "-h", PGSOCKETDIR,
         "-U", USERNAME,
         "-f", SCHEMAFILE,
     ]

Modified: CalendarServer/branches/users/gaya/inviteclean/calendarserver/tools/principals.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/calendarserver/tools/principals.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/calendarserver/tools/principals.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -390,7 +390,7 @@
         for fieldName in ("fullName", "firstName", "lastName", "emailAddresses"):
             fields.append((fieldName, searchTerm, True, "contains"))
 
-        records = list((yield config.directory.recordsMatchingFields(fields)))
+        records = list((yield config.directory.recordsMatchingTokens(searchTerm.strip().split())))
         if records:
             records.sort(key=operator.attrgetter('fullName'))
             print "%d matches found:" % (len(records),)

Modified: CalendarServer/branches/users/gaya/inviteclean/calendarserver/tools/shell/directory.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/calendarserver/tools/shell/directory.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/calendarserver/tools/shell/directory.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -37,18 +37,10 @@
 
 @inlineCallbacks
 def findRecords(directory, terms):
-    for term in terms:
-        searchFieldNames = ("fullName", "firstName", "lastName", "emailAddresses")
-        searchFields = tuple(
-            (fieldName, term, True, "contains")
-            for fieldName in searchFieldNames
-        )
+    records = tuple((yield directory.recordsMatchingTokens(terms)))
+    returnValue(sorted(records, key=operator.attrgetter("fullName")))
 
-    records = (yield directory.recordsMatchingFields(searchFields))
 
-    returnValue(sorted(tuple(records), key=operator.attrgetter("fullName")))
-
-
 @inlineCallbacks
 def recordInfo(directory, record):
     """

Modified: CalendarServer/branches/users/gaya/inviteclean/calendarserver/webadmin/resource.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/calendarserver/webadmin/resource.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/calendarserver/webadmin/resource.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -685,10 +685,5 @@
 
     @inlineCallbacks
     def search(self, searchStr):
-        fields = []
-        for fieldName in ("fullName", "firstName", "lastName",
-                          "emailAddresses"):
-            fields.append((fieldName, searchStr, True, "contains"))
-
-        records = list((yield self.directory.recordsMatchingFields(fields)))
+        records = list((yield self.directory.recordsMatchingTokens(searchStr.strip().split())))
         returnValue(records)

Modified: CalendarServer/branches/users/gaya/inviteclean/calendarserver/webadmin/test/test_resource.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/calendarserver/webadmin/test/test_resource.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/calendarserver/webadmin/test/test_resource.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -31,6 +31,7 @@
 from calendarserver.tap.util import FakeRequest
 from twisted.internet.defer import inlineCallbacks
 from twisted.internet.defer import returnValue
+from twisted.internet.defer import succeed
 from calendarserver.webadmin.resource import WebAdminResource
 
 from txdav.xml.rfc3744 import GroupMemberSet
@@ -53,20 +54,14 @@
         Expect that a search will be issued via with the given fields, and will
         yield the given result.
         """
-        fields = []
-        for field in 'fullName', 'firstName', 'lastName', 'emailAddresses':
-            fields.append((field, searchString, True, "contains"))
-        self.expectedSearches[tuple(fields)] = result
+        self.expectedSearches[(searchString,)] = result
 
 
-    def recordsMatchingFields(self, fields):
+    def recordsMatchingTokens(self, tokens):
         """
         Pretend to be a directory object for the purposes of testing.
         """
-        # 'fields' will be a list of 4-tuples of (fieldName, searchStr, True,
-        # "contains"; implement this for tests which will want to call
-        # 'search()')
-        return self.expectedSearches.pop(tuple(fields))
+        return succeed(self.expectedSearches.pop(tuple(tokens)))
 
 
     def setUp(self):

Modified: CalendarServer/branches/users/gaya/inviteclean/conf/caldavd-apple.plist
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/conf/caldavd-apple.plist	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/conf/caldavd-apple.plist	2012-08-30 22:48:54 UTC (rev 9757)
@@ -98,7 +98,7 @@
     <key>DBType</key>
     <string>postgres</string>
     <key>DSN</key>
-    <string>:caldav:caldav:::</string>
+    <string>/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::</string>
 
     <!-- Data root -->
     <key>DataRoot</key>

Modified: CalendarServer/branches/users/gaya/inviteclean/contrib/migration/calendarmigrator.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/contrib/migration/calendarmigrator.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/contrib/migration/calendarmigrator.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -417,11 +417,27 @@
 
     # Disable XMPPNotifier now that we're directly talking to APNS
     try:
-        if caldav["Notifications"]["Services"]["XMPPNotifier"]["Enabled"]:
-            caldav["Notifications"]["Services"]["XMPPNotifier"]["Enabled"] = False
+        XMPPNotifier = caldav["Notifications"]["Services"]["XMPPNotifier"]
+        if XMPPNotifier["Enabled"]:
+            XMPPNotifier["Enabled"] = False
     except KeyError:
         pass
 
+    # If XMPP was also previously being routed to APNS, enable APNS
+    EnableAPNS = False
+    try:
+        if caldav["Notifications"]["Services"]["XMPPNotifier"]["CalDAV"]["APSBundleID"]:
+            EnableAPNS = True
+    except KeyError:
+        pass
+    try:
+        if caldav["Notifications"]["Services"]["XMPPNotifier"]["CardDAV"]["APSBundleID"]:
+            EnableAPNS = True
+    except KeyError:
+        pass
+    if EnableAPNS:
+        adminChanges.append(["EnableAPNS", "yes"])
+
     # Merge ports
     if not caldav.get("HTTPPort", 0):
         caldav["HTTPPort"] = 8008
@@ -464,6 +480,9 @@
     # If SSL is enabled, redirect HTTP to HTTPS.
     combined["RedirectHTTPToHTTPS"] = enableSSL
 
+    # New DSN value for server-specific Postgres
+    combined["DSN"] = "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::"
+
     return adminChanges
 
 

Modified: CalendarServer/branches/users/gaya/inviteclean/contrib/migration/test/test_migrator.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/contrib/migration/test/test_migrator.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/contrib/migration/test/test_migrator.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -89,6 +89,7 @@
         expected = {
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
+            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
             "EnableSSL" : True,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": True,
@@ -126,6 +127,7 @@
         expected = {
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
+            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
             "EnableSSL" : False,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": False,
@@ -163,6 +165,7 @@
         expected = {
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
+            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
             "EnableSSL" : True,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": True,
@@ -200,6 +203,7 @@
         expected = {
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
+            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
             "EnableSSL" : True,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": True,
@@ -237,6 +241,7 @@
         expected = {
             "BindHTTPPorts": [1111, 2222, 4444, 5555, 7777, 8888],
             "BindSSLPorts": [3333, 6666, 9999, 11111],
+            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
             "EnableSSL" : True,
             "HTTPPort": 8888,
             "RedirectHTTPToHTTPS": True,
@@ -271,6 +276,7 @@
         expected = {
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
+            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
             "EnableSSL" : False,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": False,
@@ -300,6 +306,7 @@
         expected = {
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
+            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
             "EnableSSL" : True,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": True,
@@ -320,6 +327,7 @@
         expected = {
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
+            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
             "EnableSSL" : False,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": False,
@@ -366,6 +374,7 @@
             },
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
+            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
             "EnableSSL" : False,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": False,
@@ -404,6 +413,7 @@
             },
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
+            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
             "EnableSSL" : False,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": False,
@@ -422,11 +432,18 @@
 
         # Ensure XMPPNotifier is disabled
 
+        # Both CalDAV and CardDAV push enabled:
         oldCalDAV = {
             "Notifications": {
                 "Services" : {
                     "XMPPNotifier" : {
                         "Enabled" : True,
+                        "CalDAV" : {
+                            "APSBundleID" : "com.apple.calendar.XServer",
+                        },
+                        "CardDAV" : {
+                            "APSBundleID" : "com.apple.contact.XServer",
+                        },
                     },
                 },
             },
@@ -437,11 +454,18 @@
                 "Services" : {
                     "XMPPNotifier" : {
                         "Enabled" : False,
+                        "CalDAV" : {
+                            "APSBundleID" : "com.apple.calendar.XServer",
+                        },
+                        "CardDAV" : {
+                            "APSBundleID" : "com.apple.contact.XServer",
+                        },
                     },
                 },
             },
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
+            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
             "EnableSSL" : False,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": False,
@@ -452,10 +476,126 @@
         }
         newCombined = { }
         adminChanges = mergePlist(oldCalDAV, oldCardDAV, newCombined)
+        self.assertEquals(adminChanges, [["EnableAPNS", "yes"]])
+        self.assertEquals(newCombined, expected)
+
+        # Only with CalDAV push enabled:
+        oldCalDAV = {
+            "Notifications": {
+                "Services" : {
+                    "XMPPNotifier" : {
+                        "Enabled" : True,
+                        "CalDAV" : {
+                            "APSBundleID" : "com.apple.calendar.XServer",
+                        },
+                    },
+                },
+            },
+        }
+        oldCardDAV = { }
+        expected = {
+            "Notifications": {
+                "Services" : {
+                    "XMPPNotifier" : {
+                        "Enabled" : False,
+                        "CalDAV" : {
+                            "APSBundleID" : "com.apple.calendar.XServer",
+                        },
+                    },
+                },
+            },
+            "BindHTTPPorts": [8008, 8800],
+            "BindSSLPorts": [8443, 8843],
+            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
+            "EnableSSL" : False,
+            "HTTPPort": 8008,
+            "RedirectHTTPToHTTPS": False,
+            "SSLAuthorityChain": "",
+            "SSLCertificate": "",
+            "SSLPort": 8443,
+            "SSLPrivateKey": "",
+        }
+        newCombined = { }
+        adminChanges = mergePlist(oldCalDAV, oldCardDAV, newCombined)
+        self.assertEquals(adminChanges, [["EnableAPNS", "yes"]])
+        self.assertEquals(newCombined, expected)
+
+        # Only with CardDAV push enabled:
+        oldCalDAV = {
+            "Notifications": {
+                "Services" : {
+                    "XMPPNotifier" : {
+                        "Enabled" : True,
+                        "CardDAV" : {
+                            "APSBundleID" : "com.apple.contact.XServer",
+                        },
+                    },
+                },
+            },
+        }
+        oldCardDAV = { }
+        expected = {
+            "Notifications": {
+                "Services" : {
+                    "XMPPNotifier" : {
+                        "Enabled" : False,
+                        "CardDAV" : {
+                            "APSBundleID" : "com.apple.contact.XServer",
+                        },
+                    },
+                },
+            },
+            "BindHTTPPorts": [8008, 8800],
+            "BindSSLPorts": [8443, 8843],
+            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
+            "EnableSSL" : False,
+            "HTTPPort": 8008,
+            "RedirectHTTPToHTTPS": False,
+            "SSLAuthorityChain": "",
+            "SSLCertificate": "",
+            "SSLPort": 8443,
+            "SSLPrivateKey": "",
+        }
+        newCombined = { }
+        adminChanges = mergePlist(oldCalDAV, oldCardDAV, newCombined)
+        self.assertEquals(adminChanges, [["EnableAPNS", "yes"]])
+        self.assertEquals(newCombined, expected)
+
+        # APNS push was not previously enabled:
+        oldCalDAV = {
+            "Notifications": {
+                "Services" : {
+                    "XMPPNotifier" : {
+                        "Enabled" : True,
+                    },
+                },
+            },
+        }
+        oldCardDAV = { }
+        expected = {
+            "Notifications": {
+                "Services" : {
+                    "XMPPNotifier" : {
+                        "Enabled" : False,
+                    },
+                },
+            },
+            "BindHTTPPorts": [8008, 8800],
+            "BindSSLPorts": [8443, 8843],
+            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
+            "EnableSSL" : False,
+            "HTTPPort": 8008,
+            "RedirectHTTPToHTTPS": False,
+            "SSLAuthorityChain": "",
+            "SSLCertificate": "",
+            "SSLPort": 8443,
+            "SSLPrivateKey": "",
+        }
+        newCombined = { }
+        adminChanges = mergePlist(oldCalDAV, oldCardDAV, newCombined)
         self.assertEquals(adminChanges, [])
         self.assertEquals(newCombined, expected)
 
-
     def test_examinePreviousSystem(self):
         """
         Set up a virtual system in various configurations, then ensure the

Copied: CalendarServer/branches/users/gaya/inviteclean/contrib/performance/LogNormalVisualization.numbers (from rev 9756, CalendarServer/trunk/contrib/performance/LogNormalVisualization.numbers)
===================================================================
(Binary files differ)

Modified: CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/config.dist.plist
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/config.dist.plist	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/config.dist.plist	2012-08-30 22:48:54 UTC (rev 9757)
@@ -136,6 +136,10 @@
 				<!-- Arguments to use to initialize the OS_X_10_7 instance. -->
 				<key>params</key>
 				<dict>
+					<!-- Name that appears in logs. -->
+					<key>title</key>
+					<string>10.7</string>
+
 					<!-- OS_X_10_7 can poll the calendar home at some interval. This is 
 						in seconds. -->
 					<key>calendarHomePollInterval</key>
@@ -335,11 +339,18 @@
 								</dict>
 							</dict>
 
-							<!-- Define the distribution of who will be invited to an event. Each 
-								set of credentials loaded by the load tester has an index; samples from this 
-								distribution will be added to that index to arrive at the index of some other 
-								credentials, which will be the target of the invitation. -->
-							<key>inviteeDistanceDistribution</key>
+							<!-- Define the distribution of who will be invited to an event.
+							
+								When inviteeClumping is turned on each invitee is based on a sample of
+								users "close to" the organizer based on account index. If the clumping
+								is too "tight" for the requested number of attendees, then invites for
+								those larger numbers will simply fail (the sim will report that situation).
+								
+								When inviteeClumping is off invitees will be sampled across an entire
+								range of account indexes. In this case the distribution ought to be a
+								UniformIntegerDistribution with min=0 and max set to the number of accounts.
+							-->
+							<key>inviteeDistribution</key>
 							<dict>
 								<key>type</key>
 								<string>contrib.performance.stats.UniformIntegerDistribution</string>
@@ -354,18 +365,17 @@
 								</dict>
 							</dict>
 
+							<key>inviteeClumping</key>
+							<true/>
+
 							<!-- Define the distribution of how many attendees will be invited to an event.
-							    Experience shows that sigma should equal sqrt(mu) to give a peak at around 1.
-							    mu = 0.5  sigma = 0.71 gives an average of 1.6 attendees
-							    mu = 0.75 sigma = 0.87 gives an average of 2.6 attendees
-							    mu = 1.0  sigma = 1.0 gives an average of 4 attendees
-							    mu = 1.1  sigma = 1.05 gives an average of 4.7 attendees
-							    mu = 1.2  sigma = 1.1 gives an average of 5.5 attendees
-							    mu = 1.3  sigma = 1.14 gives an average of 6.5 attendees
-							    mu = 1.4  sigma = 1.18 gives an average of 7.6 attendees
-							    mu = 1.5  sigma = 1.22 gives an average of 8.8 attendees
-							    mu = 1.75  sigma = 1.32 gives an average of 12.5 attendees
-							    mu = 2.0  sigma = 1.41 gives an average of 17.4 attendees
+							
+								LogNormal is the best fit to observed data.
+
+
+								For LogNormal "mode" is the peak, "mean" is the mean value.	For invites,
+								mode should typically be 1, and mean whatever matches the user behavior.
+								Our typical mean is 6. 							
 							     -->
 							<key>inviteeCountDistribution</key>
 							<dict>
@@ -373,12 +383,12 @@
 								<string>contrib.performance.stats.LogNormalDistribution</string>
 								<key>params</key>
 								<dict>
-									<!-- mean -->
-									<key>mu</key>
-									<real>1.3</real>
-									<!-- standard deviation -->
-									<key>sigma</key>
-									<real>1.14</real>
+									<!-- mode - peak-->
+									<key>mode</key>
+									<integer>1</integer>
+									<!-- mean - average-->
+									<key>median</key>
+									<integer>6</integer>
 									<!-- maximum -->
 									<key>maximum</key>
 									<real>100</real>
@@ -484,20 +494,24 @@
 							<key>enabled</key>
 							<true/>
 
-							<!-- Define how long to wait after seeing a new invitation before 
-								accepting it. -->
+							<!-- Define how long to wait after seeing a new invitation before
+								accepting it.
+
+								For LogNormal "mode" is the peak, "median" is the 50% cummulative value
+								(i.e., half of the user have accepted by that time).								
+							-->
 							<key>acceptDelayDistribution</key>
 							<dict>
 								<key>type</key>
-								<string>contrib.performance.stats.NormalDistribution</string>
+								<string>contrib.performance.stats.LogNormalDistribution</string>
 								<key>params</key>
 								<dict>
-									<!-- mean -->
-									<key>mu</key>
-									<integer>360</integer>
-									<!-- standard deviation -->
-									<key>sigma</key>
-									<integer>60</integer>
+									<!-- mode - peak-->
+									<key>mode</key>
+									<integer>300</integer>
+									<!-- median - 50% done-->
+									<key>median</key>
+									<integer>1800</integer>
 								</dict>
 							</dict>
 						</dict>

Modified: CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/config.plist
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/config.plist	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/config.plist	2012-08-30 22:48:54 UTC (rev 9757)
@@ -123,6 +123,10 @@
 				<!-- Arguments to use to initialize the OS_X_10_7 instance. -->
 				<key>params</key>
 				<dict>
+					<!-- Name that appears in logs. -->
+					<key>title</key>
+					<string>10.7</string>
+	
 					<!-- OS_X_10_7 can poll the calendar home at some interval. This is
 						in seconds. -->
 					<key>calendarHomePollInterval</key>
@@ -329,11 +333,18 @@
 								</dict>
 							</dict>
 
-							<!-- Define the distribution of who will be invited to an event. Each
-								set of credentials loaded by the load tester has an index; samples from this
-								distribution will be added to that index to arrive at the index of some other
-								credentials, which will be the target of the invitation. -->
-							<key>inviteeDistanceDistribution</key>
+							<!-- Define the distribution of who will be invited to an event.
+							
+								When inviteeClumping is turned on each invitee is based on a sample of
+								users "close to" the organizer based on account index. If the clumping
+								is too "tight" for the requested number of attendees, then invites for
+								those larger numbers will simply fail (the sim will report that situation).
+								
+								When inviteeClumping is off invitees will be sampled across an entire
+								range of account indexes. In this case the distribution ought to be a
+								UniformIntegerDistribution with min=0 and max set to the number of accounts.
+							-->
+							<key>inviteeDistribution</key>
 							<dict>
 								<key>type</key>
 								<string>contrib.performance.stats.UniformIntegerDistribution</string>
@@ -348,18 +359,17 @@
 								</dict>
 							</dict>
 
+							<key>inviteeClumping</key>
+							<true/>
+
 							<!-- Define the distribution of how many attendees will be invited to an event.
-							    Experience shows that sigma should equal sqrt(mu) to give a peak at around 1.
-							    mu = 0.5  sigma = 0.71 gives an average of 1.6 attendees
-							    mu = 0.75 sigma = 0.87 gives an average of 2.6 attendees
-							    mu = 1.0  sigma = 1.0 gives an average of 4 attendees
-							    mu = 1.1  sigma = 1.05 gives an average of 4.7 attendees
-							    mu = 1.2  sigma = 1.1 gives an average of 5.5 attendees
-							    mu = 1.3  sigma = 1.14 gives an average of 6.5 attendees
-							    mu = 1.4  sigma = 1.18 gives an average of 7.6 attendees
-							    mu = 1.5  sigma = 1.22 gives an average of 8.8 attendees
-							    mu = 1.75  sigma = 1.32 gives an average of 12.5 attendees
-							    mu = 2.0  sigma = 1.41 gives an average of 17.4 attendees
+							
+								LogNormal is the best fit to observed data.
+
+
+								For LogNormal "mode" is the peak, "mean" is the mean value.	For invites,
+								mode should typically be 1, and mean whatever matches the user behavior.
+								Our typical mean is 6. 							
 							     -->
 							<key>inviteeCountDistribution</key>
 							<dict>
@@ -367,12 +377,12 @@
 								<string>contrib.performance.stats.LogNormalDistribution</string>
 								<key>params</key>
 								<dict>
-									<!-- mean -->
-									<key>mu</key>
-									<real>1.3</real>
-									<!-- standard deviation -->
-									<key>sigma</key>
-									<real>1.14</real>
+									<!-- mode - peak-->
+									<key>mode</key>
+									<integer>1</integer>
+									<!-- mean - average-->
+									<key>median</key>
+									<integer>6</integer>
 									<!-- maximum -->
 									<key>maximum</key>
 									<real>100</real>
@@ -479,19 +489,23 @@
 							<true/>
 
 							<!-- Define how long to wait after seeing a new invitation before
-								accepting it. -->
+								accepting it.
+
+								For LogNormal "mode" is the peak, "median" is the 50% cummulative value
+								(i.e., half of the user have accepted by that time).								
+							-->
 							<key>acceptDelayDistribution</key>
 							<dict>
 								<key>type</key>
-								<string>contrib.performance.stats.NormalDistribution</string>
+								<string>contrib.performance.stats.LogNormalDistribution</string>
 								<key>params</key>
 								<dict>
-									<!-- mean -->
-									<key>mu</key>
-									<integer>60</integer>
-									<!-- standard deviation -->
-									<key>sigma</key>
-									<integer>60</integer>
+									<!-- mode - peak-->
+									<key>mode</key>
+									<integer>300</integer>
+									<!-- median - 50% done-->
+									<key>median</key>
+									<integer>1800</integer>
 								</dict>
 							</dict>
 						</dict>

Modified: CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/ical.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/ical.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/ical.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -97,51 +97,102 @@
         self.pushkey = pushkey
 
 
+
 def u2str(data):
     return data.encode("utf-8") if type(data) is unicode else data
-    
+
+
+
 class Event(object):
-    def __init__(self, url, etag, vevent=None):
+    def __init__(self, serializeBasePath, url, etag, component=None):
+        self.serializeBasePath = serializeBasePath
         self.url = url
         self.etag = etag
         self.scheduleTag = None
-        self.vevent = vevent
+        if component is not None:
+            self.component = component
+        self.uid = component.resourceUID() if component is not None else None
 
 
     def getUID(self):
         """
-        Return the UID from the vevent, if there is one.
+        Return the UID of the calendar resource.
         """
-        if self.vevent is not None:
-            return self.vevent.resourceUID()
-        return None
+        return self.uid
 
 
+    def serializePath(self):
+        if self.serializeBasePath:
+            calendar = os.path.join(self.serializeBasePath, self.url.split("/")[-2])
+            if not os.path.exists(calendar):
+                os.makedirs(calendar)
+            return os.path.join(calendar, self.url.split("/")[-1])
+        else:
+            return None
+
     def serialize(self):
         """
         Create a dict of the data so we can serialize as JSON.
         """
         
         result = {}
-        for attr in ("url", "etag", "scheduleTag"):
+        for attr in ("url", "etag", "scheduleTag", "uid",):
             result[attr] = getattr(self, attr)
-        result["icalendar"] = str(self.vevent)
         return result
 
+
     @staticmethod
-    def deserialize(data):
+    def deserialize(serializeLocation, data):
         """
         Convert dict (deserialized from JSON) into an L{Event}.
         """
         
-        event = Event(None, None)
-        for attr in ("url", "etag", "scheduleTag"):
+        event = Event(serializeLocation, None, None)
+        for attr in ("url", "etag", "scheduleTag", "uid",):
             setattr(event, attr, u2str(data[attr]))
-        event.vevent = Component.fromString(data["icalendar"])
         return event
 
 
+    @property
+    def component(self):
+        """
+        Data always read from disk - never cached in the object.
+        """
+        path = self.serializePath()
+        if path and os.path.exists(path):
+            f = open(path)
+            comp = Component.fromString(f.read())
+            f.close()
+            return comp
+        else:
+            return None
 
+
+    @component.setter
+    def component(self, component):
+        """
+        Data always written to disk - never cached on the object.
+        """
+        path = self.serializePath()
+        if path:
+            if component is None:
+                os.remove(path)
+            else:
+                f = open(path, "w")
+                f.write(str(component))
+                f.close()
+        self.uid = component.resourceUID() if component is not None else None
+
+
+    def removed(self):
+        """
+        Resource no longer exists on the server - remove associated data.
+        """
+        path = self.serializePath()
+        if path and os.path.exists(path):
+            os.remove(path)
+
+
 class Calendar(object):
     def __init__(self, resourceType, componentTypes, name, url, changeToken):
         self.resourceType = resourceType
@@ -168,7 +219,7 @@
     @staticmethod
     def deserialize(data, events):
         """
-        Convert dict (deserialized from JSON) into an L{Event}.
+        Convert dict (deserialized from JSON) into an L{Calendar}.
         """
         
         calendar = Calendar(None, None, None, None, None)
@@ -205,27 +256,28 @@
         Cache the provided event
         """
         self._events[href] = event
-        calendar, uid = href.rsplit('/', 1)
-        self._calendars[calendar + '/'].events[uid] = event
+        calendar, basePath = href.rsplit('/', 1)
+        self._calendars[calendar + '/'].events[basePath] = event
 
 
     def _removeEvent(self, href):
         """
         Remove event from local cache.
         """
+        self._events[href].removed()
         del self._events[href]
-        calendar, uid = href.rsplit('/', 1)
-        del self._calendars[calendar + '/'].events[uid]
+        calendar, basePath = href.rsplit('/', 1)
+        del self._calendars[calendar + '/'].events[basePath]
 
 
-    def addEvent(self, href, vcalendar):
+    def addEvent(self, href, calendar):
         """
         Called when a profile needs to add an event (no scheduling).
         """
         raise NotImplementedError("%r does not implement addEvent" % (self.__class__,))
 
 
-    def addInvite(self, href, vcalendar):
+    def addInvite(self, href, calendar):
         """
         Called when a profile needs to add a new invite. The iCalendar data will already
         contain ATTENDEEs.
@@ -339,6 +391,7 @@
         serializePath,
         record,
         auth,
+        title=None,
         calendarHomePollInterval=None,
         supportPush=True,
         supportAmpPush=True,
@@ -359,6 +412,8 @@
         self.principalPathTemplate = principalPathTemplate
         self.record = record
 
+        self.title = title if title else self._client_type
+
         if calendarHomePollInterval is None:
             calendarHomePollInterval = self.CALENDAR_HOME_POLL_INTERVAL
         self.calendarHomePollInterval = calendarHomePollInterval
@@ -417,7 +472,8 @@
         client specific things, Accept etc.
         """
         headers.setRawHeaders('User-Agent', [self.USER_AGENT])
-        
+
+
     @inlineCallbacks
     def _request(self, expectedResponseCodes, method, url, headers=None, body=None, method_label=None):
         """
@@ -433,7 +489,7 @@
             method=method_label if method_label else method,
             url=url,
             user=self.record.uid,
-            client_type=self._client_type,
+            client_type=self.title,
             client_id=self._client_id,
         )
 
@@ -455,7 +511,7 @@
             body=body,
             code=response.code,
             user=self.record.uid,
-            client_type=self._client_type,
+            client_type=self.title,
             client_id=self._client_id,
             duration=(after - before),
             url=url,
@@ -814,7 +870,7 @@
             # Differentiate a remove vs new/update result
             if result[responseHref].getStatus() / 100 == 2:
                 if responseHref not in self._events:
-                    self._setEvent(responseHref, Event(responseHref, None))
+                    self._setEvent(responseHref, Event(self.serializeLocation(), responseHref, None))
                     
                 event = self._events[responseHref]
                 if event.etag != etag:
@@ -860,7 +916,7 @@
                 continue
 
             if responseHref not in self._events:
-                self._setEvent(responseHref, Event(responseHref, None))
+                self._setEvent(responseHref, Event(self.serializeLocation(), responseHref, None))
                 
             event = self._events[responseHref]
             if event.etag != etag:
@@ -909,7 +965,7 @@
         event.etag = etag
         if scheduleTag is not None:
             event.scheduleTag = scheduleTag
-        event.vevent = Component.fromString(body)
+        event.component = Component.fromString(body)
         self.catalog["eventChanged"].issue(href)
 
                 
@@ -921,8 +977,10 @@
         label_suffix = "small"
         if len(events) > 5:
             label_suffix = "medium"
-        if len(events) > 15:
+        if len(events) > 20:
             label_suffix = "large"
+        if len(events) > 75:
+            label_suffix = "huge"
 
         return self._report(
             calendar,
@@ -1075,7 +1133,7 @@
             type="operation",
             phase="start",
             user=self.record.uid, 
-            client_type=self._client_type,
+            client_type=self.title,
             client_id=self._client_id,
             label=label,
         )
@@ -1098,7 +1156,7 @@
             phase="end",
             duration=after - before,
             user=self.record.uid,
-            client_type=self._client_type,
+            client_type=self.title,
             client_id=self._client_id,
             label=label,
             success=success,
@@ -1165,7 +1223,7 @@
                 raise MissingCalendarHome
             yield self._checkCalendarsForEvents(calendarHome, firstTime=True)
             returnValue(calendarHome)
-        calendarHome = yield self._newOperation("startup: %s" % (self._client_type,), startup())
+        calendarHome = yield self._newOperation("startup: %s" % (self.title,), startup())
 
         self.started = True
 
@@ -1196,16 +1254,30 @@
         return self._unsubscribePubSub()
 
 
+    def serializeLocation(self):
+        """
+        Return the path to the directory where data for this user is serialized.
+        """
+        if self.serializePath is None or not os.path.isdir(self.serializePath):
+            return None
+        
+        key = "%s-%s" % (self.record.uid, self.title.replace(" ", "_"))
+        path = os.path.join(self.serializePath, key)
+        if not os.path.exists(path):
+            os.mkdir(path)
+        elif not os.path.isdir(path):
+            return None
+        
+        return path
+        
     def serialize(self):
         """
         Write current state to disk.
         """
         
-        if self.serializePath is None or not os.path.isdir(self.serializePath):
+        path = self.serializeLocation()
+        if path is None:
             return
-        
-        key = "%s-%s.json" % (self.record.uid, self._client_type.replace(" ", "_"))
-        path = os.path.join(self.serializePath, key)
 
         # Create dict for all the data we need to store
         data = {
@@ -1215,7 +1287,7 @@
         }
 
         # Write JSON data
-        json.dump(data, open(path, "w"), indent=2)
+        json.dump(data, open(os.path.join(path, "index.json"), "w"), indent=2)
         
 
     def deserialize(self):
@@ -1223,17 +1295,16 @@
         Read state from disk.
         """
         
-        if self.serializePath is None or not os.path.isdir(self.serializePath):
-            return
-        
         self._calendars = {}
         self._events = {}
 
+        path = self.serializeLocation()
+        if path is None:
+            return
+        
         # Parse JSON data for calendars
-        key = "%s-%s.json" % (self.record.uid, self._client_type.replace(" ", "_"))
-        path = os.path.join(self.serializePath, key)
         try:
-            data = json.load(open(path))
+            data = json.load(open(os.path.join(path, "index.json")))
         except IOError:
             return
 
@@ -1241,7 +1312,7 @@
 
         # Extract all the events first, then do the calendars (which reference the events)
         for event in data["events"]:
-            event = Event.deserialize(event)
+            event = Event.deserialize(self.serializeLocation(), event)
             self._events[event.url] = event
         for calendar in data["calendars"]:
             calendar = Calendar.deserialize(calendar, self._events)
@@ -1276,27 +1347,29 @@
     def addEventAttendee(self, href, attendee):
 
         event = self._events[href]
-        vevent = event.vevent
+        component = event.component
 
         # Trigger auto-complete behavior
-        yield self._attendeeAutoComplete(vevent, attendee)
+        yield self._attendeeAutoComplete(component, attendee)
 
         # If the event has no attendees, add ourselves as an attendee.
-        attendees = list(vevent.mainComponent().properties('ATTENDEE'))
+        attendees = list(component.mainComponent().properties('ATTENDEE'))
         if len(attendees) == 0:
             # First add ourselves as a participant and as the
             # organizer.  In the future for this event we should
             # already have those roles.
-            vevent.mainComponent().addProperty(self._makeSelfOrganizer())
-            vevent.mainComponent().addProperty(self._makeSelfAttendee())
+            component.mainComponent().addProperty(self._makeSelfOrganizer())
+            component.mainComponent().addProperty(self._makeSelfAttendee())
         attendees.append(attendee)
-        vevent.mainComponent().addProperty(attendee)
+        component.mainComponent().addProperty(attendee)
 
         label_suffix = "small"
         if len(attendees) > 5:
             label_suffix = "medium"
-        if len(attendees) > 15:
+        if len(attendees) > 20:
             label_suffix = "large"
+        if len(attendees) > 75:
+            label_suffix = "huge"
         
         # At last, upload the new event definition
         response = yield self._request(
@@ -1306,7 +1379,7 @@
             Headers({
                     'content-type': ['text/calendar'],
                     'if-match': [event.etag]}),
-            StringProducer(vevent.getTextWithTimezones(includeTimezones=True)),
+            StringProducer(component.getTextWithTimezones(includeTimezones=True)),
             method_label="PUT{organizer-%s}" % (label_suffix,)
         )
 
@@ -1315,7 +1388,7 @@
 
 
     @inlineCallbacks
-    def _attendeeAutoComplete(self, vevent, attendee):
+    def _attendeeAutoComplete(self, component, attendee):
 
         if self._ATTENDEE_LOOKUPS:
             # Temporarily use some non-test names (some which will return
@@ -1348,21 +1421,21 @@
     
             # Now learn about the attendee's availability
             yield self.requestAvailability(
-                vevent.mainComponent().getStartDateUTC(),
-                vevent.mainComponent().getEndDateUTC(),
+                component.mainComponent().getStartDateUTC(),
+                component.mainComponent().getEndDateUTC(),
                 [self.email, u'mailto:' + email],
-                [vevent.resourceUID()]
+                [component.resourceUID()]
             )
 
 
     @inlineCallbacks
     def changeEventAttendee(self, href, oldAttendee, newAttendee):
         event = self._events[href]
-        vevent = event.vevent
+        component = event.component
 
         # Change the event to have the new attendee instead of the old attendee
-        vevent.mainComponent().removeProperty(oldAttendee)
-        vevent.mainComponent().addProperty(newAttendee)
+        component.mainComponent().removeProperty(oldAttendee)
+        component.mainComponent().addProperty(newAttendee)
         okCodes = NO_CONTENT
         headers = Headers({
                 'content-type': ['text/calendar'],
@@ -1371,20 +1444,24 @@
             headers.addRawHeader('if-schedule-tag-match', event.scheduleTag)
             okCodes = (NO_CONTENT, PRECONDITION_FAILED,)
 
-        attendees = list(vevent.mainComponent().properties('ATTENDEE'))
+        attendees = list(component.mainComponent().properties('ATTENDEE'))
         label_suffix = "small"
         if len(attendees) > 5:
             label_suffix = "medium"
-        if len(attendees) > 15:
+        if len(attendees) > 20:
             label_suffix = "large"
+        if len(attendees) > 75:
+            label_suffix = "huge"
         
         response = yield self._request(
             okCodes,
             'PUT',
             self.root + href.encode('utf-8'),
-            headers, StringProducer(vevent.getTextWithTimezones(includeTimezones=True)),
+            headers, StringProducer(component.getTextWithTimezones(includeTimezones=True)),
             method_label="PUT{attendee-%s}" % (label_suffix,),
         )
+
+        # Finally, re-retrieve the event to update the etag
         self._updateEvent(response, href)
 
 
@@ -1407,53 +1484,55 @@
 
 
     @inlineCallbacks
-    def addEvent(self, href, vcalendar, invite=False):
+    def addEvent(self, href, component, invite=False):
         headers = Headers({
                 'content-type': ['text/calendar'],
                 })
 
-        attendees = list(vcalendar.mainComponent().properties('ATTENDEE'))
+        attendees = list(component.mainComponent().properties('ATTENDEE'))
         label_suffix = "small"
         if len(attendees) > 5:
             label_suffix = "medium"
-        if len(attendees) > 15:
+        if len(attendees) > 20:
             label_suffix = "large"
+        if len(attendees) > 75:
+            label_suffix = "huge"
 
         response = yield self._request(
             CREATED,
             'PUT',
             self.root + href.encode('utf-8'),
             headers,
-            StringProducer(vcalendar.getTextWithTimezones(includeTimezones=True)),
+            StringProducer(component.getTextWithTimezones(includeTimezones=True)),
             method_label="PUT{organizer-%s}" % (label_suffix,) if invite else "PUT{event}",
         )
-        self._localUpdateEvent(response, href, vcalendar)
+        self._localUpdateEvent(response, href, component)
 
 
     @inlineCallbacks
-    def addInvite(self, href, vevent):
+    def addInvite(self, href, component):
         """
         Add an event that is an invite - i.e., has attendees. We will do attendee lookups and freebusy
         checks on each attendee to simulate what happens when an organizer creates a new invite.
         """
         
         # Do lookup and free busy of each attendee (not self)
-        attendees = list(vevent.mainComponent().properties('ATTENDEE'))
+        attendees = list(component.mainComponent().properties('ATTENDEE'))
         for attendee in attendees:
             if attendee.value() in (self.uuid, self.email):
                 continue
-            yield self._attendeeAutoComplete(vevent, attendee)
+            yield self._attendeeAutoComplete(component, attendee)
         
         # Now do a normal PUT
-        yield self.addEvent(href, vevent, invite=True)
+        yield self.addEvent(href, component, invite=True)
 
 
-    def _localUpdateEvent(self, response, href, vcalendar):
+    def _localUpdateEvent(self, response, href, component):
         headers = response.headers
         etag = headers.getRawHeaders("etag", [None])[0]
         scheduleTag = headers.getRawHeaders("schedule-tag", [None])[0]
 
-        event = Event(href, etag, vcalendar)
+        event = Event(self.serializeLocation(), href, etag, component)
         event.scheduleTag = scheduleTag
         self._setEvent(href, event)
 
@@ -1526,8 +1605,10 @@
         label_suffix = "small"
         if len(users) > 5:
             label_suffix = "medium"
-        if len(users) > 15:
+        if len(users) > 20:
             label_suffix = "large"
+        if len(users) > 75:
+            label_suffix = "huge"
 
         response = yield self._request(
             OK, 'POST', outbox,
@@ -1544,7 +1625,7 @@
                     'start': start,
                     'end': end,
                     'now': now}),
-            method_label="POST{fb}-%s" % (label_suffix,),
+            method_label="POST{fb-%s}" % (label_suffix,),
         )
         body = yield readBody(response)
         returnValue(body)

Modified: CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/population.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/population.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/population.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -28,6 +28,7 @@
 from datetime import datetime
 from urllib2 import HTTPBasicAuthHandler
 from urllib2 import HTTPDigestAuthHandler
+import collections
 import json
 import os
 
@@ -285,7 +286,12 @@
                     where.path,))
 
 
+    def _simFailure(self, reason, reactor):
+        if not self._stopped:
+            msg(type="sim-failure", reason=reason)
 
+
+
 class SmoothRampUp(object):
     def __init__(self, reactor, groups, groupSize, interval, clientsPerUser):
         self.reactor = reactor
@@ -308,6 +314,8 @@
             self.eventReceived(event)
         elif event.get('type') == 'client-failure':
             self.clientFailure(event)
+        elif event.get('type') == 'sim-failure':
+            self.simFailure(event)
 
 
     def report(self, output):
@@ -322,8 +330,9 @@
 class SimpleStatistics(StatisticsBase):
     def __init__(self):
         self._times = []
+        self._failures = collections.defaultdict(int)
+        self._simFailures = collections.defaultdict(int)
 
-
     def eventReceived(self, event):
         self._times.append(event['duration'])
         if len(self._times) == 200:
@@ -335,9 +344,13 @@
 
 
     def clientFailure(self, event):
-        pass
+        self._failures[event] += 1
 
 
+    def simFailure(self, event):
+        self._simFailures[event] += 1
+
+
 class ReportStatistics(StatisticsBase, SummarizingMixin):
     """
 
@@ -376,6 +389,7 @@
         self._users = set()
         self._clients = set()
         self._failed_clients = []
+        self._failed_sim = collections.defaultdict(int)
         self._startTime = datetime.now()
 
         # Load parameters from config 
@@ -411,6 +425,10 @@
         return len(self._failed_clients)
 
 
+    def countSimFailures(self):
+        return len(self._failed_sim)
+
+
     def eventReceived(self, event):
         dataset = self._perMethodTimes.setdefault(event['method'], [])
         dataset.append((event['success'], event['duration']))
@@ -422,6 +440,10 @@
         self._failed_clients.append(event['reason'])
 
 
+    def simFailure(self, event):
+        self._failed_sim[event['reason']] += 1
+
+
     def printMiscellaneous(self, output, items):
         maxColumnWidth = str(len(max(items.iterkeys(), key=len)))
         fmt = "%"+maxColumnWidth+"s : %-s\n"
@@ -453,6 +475,9 @@
             items['Failed clients'] = self.countClientFailures()
             for ctr, reason in enumerate(self._failed_clients, 1):
                 items['Failure #%d' % (ctr,)] = reason
+        if self.countSimFailures() > 0:
+            for reason, count in self._failed_sim.items():
+                items['Failed operation'] = "%s : %d times" % (reason, count,)
         self.printMiscellaneous(output, items)
         output.write("\n")
         self.printHeader(output, [

Modified: CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/profiles.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/profiles.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/profiles.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -95,7 +95,7 @@
             type="operation",
             phase="start",
             user=self._client.record.uid,
-            client_type=self._client._client_type,
+            client_type=self._client.title,
             client_id=self._client._client_id,
             label=label,
             lag=lag,
@@ -112,7 +112,7 @@
                 phase="end",
                 duration=after - before,
                 user=self._client.record.uid,
-                client_type=self._client._client_type,
+                client_type=self._client.title,
                 client_id=self._client._client_id,
                 label=label,
                 success=success,
@@ -120,10 +120,25 @@
             return passthrough
         deferred.addBoth(finished)
         return deferred
-        
 
 
+    def _failedOperation(self, label, reason):
+        """
+        Helper to emit a log event when an operation fails.
+        """
+        msg(
+            type="operation",
+            phase="failed",
+            user=self._client.record.uid,
+            client_type=self._client.title,
+            client_id=self._client._client_id,
+            label=label,
+            reason=reason,
+        )
+        self._sim._simFailure("%s: %s" % (label, reason,), self._reactor)
 
+
+
 class CannotAddAttendee(Exception):
     """
     Indicates no new attendees can be invited to a particular event.
@@ -156,11 +171,11 @@
         self,
         enabled=True,
         sendInvitationDistribution=NormalDistribution(600, 60),
-        inviteeDistanceDistribution=UniformDiscreteDistribution(range(-10, 11))
+        inviteeDistribution=UniformDiscreteDistribution(range(-10, 11))
     ):
         self.enabled = enabled
         self._sendInvitationDistribution = sendInvitationDistribution
-        self._inviteeDistanceDistribution = inviteeDistanceDistribution
+        self._inviteeDistribution = inviteeDistribution
 
 
     def run(self):
@@ -180,7 +195,7 @@
 
         for _ignore_i in range(10):
             invitee = max(
-                0, self._number + self._inviteeDistanceDistribution.sample())
+                0, self._number + self._inviteeDistribution.sample())
             try:
                 record = self._sim.getUserRecord(invitee)
             except IndexError:
@@ -236,12 +251,12 @@
             while events:
                 uuid = self.random.choice(events)
                 events.remove(uuid)
-                event = calendar.events[uuid].vevent
+                event = calendar.events[uuid].component
                 if event is None:
                     continue
 
-                vevent = event.mainComponent()
-                organizer = vevent.getOrganizerProperty()
+                component = event.mainComponent()
+                organizer = component.getOrganizerProperty()
                 if organizer is not None and not self._isSelfAttendee(organizer):
                     # This event was organized by someone else, don't try to invite someone to it.
                     continue
@@ -249,7 +264,7 @@
                 href = calendar.url + uuid
 
                 # Find out who might attend
-                attendees = tuple(vevent.properties('ATTENDEE'))
+                attendees = tuple(component.properties('ATTENDEE'))
 
                 d = self._addAttendee(event, attendees)
                 d.addCallbacks(
@@ -291,7 +306,8 @@
         self,
         enabled=True,
         sendInvitationDistribution=NormalDistribution(600, 60),
-        inviteeDistanceDistribution=UniformDiscreteDistribution(range(-10, 11)),
+        inviteeDistribution=UniformDiscreteDistribution(range(-10, 11)),
+        inviteeClumping=True,
         inviteeCountDistribution=LogNormalDistribution(1.2, 1.2),
         eventStartDistribution=NearFutureDistribution(),
         eventDurationDistribution=UniformDiscreteDistribution([
@@ -303,7 +319,8 @@
     ):
         self.enabled = enabled
         self._sendInvitationDistribution = sendInvitationDistribution
-        self._inviteeDistanceDistribution = inviteeDistanceDistribution
+        self._inviteeDistribution = inviteeDistribution
+        self._inviteeClumping = inviteeClumping
         self._inviteeCountDistribution = inviteeCountDistribution
         self._eventStartDistribution = eventStartDistribution
         self._eventDurationDistribution = eventDurationDistribution
@@ -326,8 +343,12 @@
             invitees.add(att.value())
 
         for _ignore_i in range(10):
-            invitee = max(
-                0, self._number + self._inviteeDistanceDistribution.sample())
+
+            sample = self._inviteeDistribution.sample()
+            if self._inviteeClumping:
+                sample = self._number + sample
+            invitee = max(0, sample)
+
             try:
                 record = self._sim.getUserRecord(invitee)
             except IndexError:
@@ -401,6 +422,7 @@
                 try:
                     self._addAttendee(vevent, attendees)
                 except CannotAddAttendee:
+                    self._failedOperation("invite", "Cannot add attendee")
                     return succeed(None)
 
             href = '%s%s.ics' % (calendar.url, uid)
@@ -448,10 +470,10 @@
         if href in self._accepting:
             return
 
-        vevent = self._client._events[href].vevent
+        component = self._client._events[href].component
         # Check to see if this user is in the attendee list in the
         # NEEDS-ACTION PARTSTAT.
-        attendees = tuple(vevent.mainComponent().properties('ATTENDEE'))
+        attendees = tuple(component.mainComponent().properties('ATTENDEE'))
         for attendee in attendees:
             if self._isSelfAttendee(attendee):
                 if attendee.parameterValue('PARTSTAT') == 'NEEDS-ACTION':
@@ -465,8 +487,8 @@
         if href in self._accepting:
             return
 
-        vevent = self._client._events[href].vevent
-        method = vevent.propertyValue('METHOD')
+        component = self._client._events[href].component
+        method = component.propertyValue('METHOD')
         if method == "REPLY":
             # Replies are immediately deleted
             self._accepting.add(href)
@@ -705,8 +727,9 @@
     logger.
     """
     formats = {
-        u"start": u"%(user)s - - - - - - - - - - - %(label)8s BEGIN %(lag)s",
-        u"end"  : u"%(user)s - - - - - - - - - - - %(label)8s END [%(duration)5.2f s]",
+        u"start" : u"%(user)s - - - - - - - - - - - %(label)8s BEGIN %(lag)s",
+        u"end"   : u"%(user)s - - - - - - - - - - - %(label)8s END [%(duration)5.2f s]",
+        u"failed": u"%(user)s x x x x x x x x x x x %(label)8s FAILED %(reason)s",
         }
 
     lagFormat = u'{lag %5.2f ms}'

Modified: CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/sim.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/sim.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/sim.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -244,7 +244,6 @@
             configTemplate = None
             server = 'http://127.0.0.1:8008'
             principalPathTemplate = "/principals/users/%s/"
-            webadminPort = None
             serializationPath = None
 
             if 'server' in config:
@@ -253,9 +252,6 @@
             if 'principalPathTemplate' in config:
                 principalPathTemplate = config['principalPathTemplate']
 
-            if 'webadmin' in config:
-                if config['webadmin']['enabled']:
-                    webadminPort = config['webadmin']['HTTPPort']
 
             if 'clientDataSerialization' in config:
                 if config['clientDataSerialization']['Enabled']:
@@ -289,7 +285,6 @@
             # Manager / observer process.
             server = ''
             principalPathTemplate = ''
-            webadminPort = None
             serializationPath = None
             arrival = None
             parameters = None
@@ -297,6 +292,11 @@
             configTemplate = config
             workerCount = 1
 
+        webadminPort = None
+        if 'webadmin' in config:
+            if config['webadmin']['enabled']:
+                webadminPort = config['webadmin']['HTTPPort']
+
         observers = []
         if 'observers' in config:
             for observer in config['observers']:

Modified: CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/test_ical.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/test_ical.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/test_ical.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -412,7 +412,7 @@
         When the C{vevent} attribute of an L{Event} instance is set,
         L{Event.getUID} returns the UID value from it.
         """
-        event = Event(u'/foo/bar', u'etag', Component.fromString(EVENT))
+        event = Event(None, u'/foo/bar', u'etag', Component.fromString(EVENT))
         self.assertEquals(event.getUID(), EVENT_UID)
 
 
@@ -421,7 +421,7 @@
         When an L{Event} has a C{vevent} attribute set to C{None},
         L{Event.getUID} returns C{None}.
         """
-        event = Event(u'/bar/baz', u'etag')
+        event = Event(None, u'/bar/baz', u'etag')
         self.assertIdentical(event.getUID(), None)
 
 
@@ -1158,11 +1158,13 @@
         TimezoneCache.create()
         self.record = _DirectoryRecord(
             u"user91", u"user91", u"User 91", u"user91 at example.org")
+        serializePath = self.mktemp()
+        os.mkdir(serializePath)
         self.client = OS_X_10_6(
             None,
             "http://127.0.0.1",
             "/principals/users/%s/",
-            None,
+            serializePath,
             self.record,
             None,
         )
@@ -1282,7 +1284,7 @@
         old = attendees[0]
         new = old.duplicate()
         new.setParameter('CN', 'Some Other Guy')
-        event = Event(u'/some/calendar/1234.ics', None, vevent)
+        event = Event(None, u'/some/calendar/1234.ics', None, vevent)
         self.client._events[event.url] = event
         self.client.changeEventAttendee(event.url, old, new)
 
@@ -1445,7 +1447,7 @@
         requests = self.interceptRequests()
 
         calendar = Calendar(caldavxml.calendar, set(('VEVENT',)), u'calendar', u'/foo/', None)
-        event = Event(calendar.url + u'bar.ics', None)
+        event = Event(None, calendar.url + u'bar.ics', None)
         self.client._calendars[calendar.url] = calendar
         self.client._setEvent(event.url, event)
 
@@ -1474,9 +1476,43 @@
         """
         L{OS_X_10_6.serialize} properly generates a JSON document.
         """
+        clientPath = os.path.join(self.client.serializePath, "user91-OS_X_10.6")
+        self.assertFalse(os.path.exists(clientPath))
+        indexPath = os.path.join(clientPath, "index.json")
+        self.assertFalse(os.path.exists(indexPath))
+
+        cal1 = """BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//Apple Inc.//iCal 4.0.3//EN
+BEGIN:VEVENT
+UID:004f8e41-b071-4b30-bb3b-6aada4adcc10
+DTSTART:20120817T113000
+DTEND:20120817T114500
+DTSTAMP:20120815T154420Z
+SEQUENCE:2
+SUMMARY:Simple event
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n")
+        cal2 = """BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+METHOD:REQUEST
+PRODID:-//Apple Inc.//iCal 4.0.3//EN
+BEGIN:VEVENT
+UID:00a79cad-857b-418e-a54a-340b5686d747
+DTSTART:20120817T113000
+DTEND:20120817T114500
+DTSTAMP:20120815T154420Z
+SEQUENCE:2
+SUMMARY:Simple event
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n")
         events = (
-            Event(u'/home/calendar/1.ics', u'123.123', "BEGIN:VALENDAR\r\nEND:VCALENDAR\r\n"),
-            Event(u'/home/inbox/i1.ics', u'123.123', "BEGIN:VALENDAR\r\nMETHOD:REQUEST\r\nEND:VCALENDAR\r\n"),
+            Event(self.client.serializeLocation(), u'/home/calendar/1.ics', u'123.123', Component.fromString(cal1)),
+            Event(self.client.serializeLocation(), u'/home/inbox/i1.ics', u'123.123', Component.fromString(cal2)),
         )
         self.client._events.update(dict([[event.url, event] for event in events]))
 
@@ -1488,16 +1524,11 @@
         self.client._calendars.update(dict([[calendar.url, calendar] for calendar in calendars]))
         self.client._calendars["/home/calendar/"].events["1.ics"] = events[0]
         self.client._calendars["/home/inbox/"].events["i1.ics"] = events[1]
-    
-        tmp = self.mktemp()
-        os.mkdir(tmp)
-        self.client.serializePath = tmp
-        tmpPath = os.path.join(tmp, "user91-OS_X_10.6.json")
-        self.assertFalse(os.path.exists(tmpPath))
 
         self.client.serialize()
-        self.assertTrue(os.path.exists(tmpPath))
-        self.assertEqual(open(tmpPath).read(), """{
+        self.assertTrue(os.path.exists(clientPath))
+        self.assertTrue(os.path.exists(indexPath))
+        self.assertEqual(open(indexPath).read(), """{
   "calendars": [
     {
       "changeToken": "123", 
@@ -1541,28 +1572,65 @@
       "url": "/home/calendar/1.ics", 
       "scheduleTag": null, 
       "etag": "123.123", 
-      "icalendar": "BEGIN:VALENDAR\\r\\nEND:VCALENDAR\\r\\n"
+      "uid": "004f8e41-b071-4b30-bb3b-6aada4adcc10"
     }, 
     {
       "url": "/home/inbox/i1.ics", 
       "scheduleTag": null, 
       "etag": "123.123", 
-      "icalendar": "BEGIN:VALENDAR\\r\\nMETHOD:REQUEST\\r\\nEND:VCALENDAR\\r\\n"
+      "uid": "00a79cad-857b-418e-a54a-340b5686d747"
     }
   ]
 }""")
 
+        event1Path = os.path.join(clientPath, "calendar", "1.ics")
+        self.assertTrue(os.path.exists(event1Path))
+        self.assertEqual(open(event1Path).read(), cal1)
 
+        event2Path = os.path.join(clientPath, "inbox", "i1.ics")
+        self.assertTrue(os.path.exists(event2Path))
+        self.assertEqual(open(event2Path).read(), cal2)
+
+
     def test_deserialization(self):
         """
         L{OS_X_10_6.deserailize} properly parses a JSON document.
         """
 
-        tmp = self.mktemp()
-        os.mkdir(tmp)
-        self.client.serializePath = tmp
-        tmpPath = os.path.join(tmp, "user91-OS_X_10.6.json")
-        open(tmpPath, "w").write("""{
+        cal1 = """BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//Apple Inc.//iCal 4.0.3//EN
+BEGIN:VEVENT
+UID:004f8e41-b071-4b30-bb3b-6aada4adcc10
+DTSTART:20120817T113000
+DTEND:20120817T114500
+DTSTAMP:20120815T154420Z
+SEQUENCE:2
+SUMMARY:Simple event
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n")
+        cal2 = """BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+METHOD:REQUEST
+PRODID:-//Apple Inc.//iCal 4.0.3//EN
+BEGIN:VEVENT
+UID:00a79cad-857b-418e-a54a-340b5686d747
+DTSTART:20120817T113000
+DTEND:20120817T114500
+DTSTAMP:20120815T154420Z
+SEQUENCE:2
+SUMMARY:Simple event
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n")
+
+        clientPath = os.path.join(self.client.serializePath, "user91-OS_X_10.6")
+        os.mkdir(clientPath)
+        indexPath = os.path.join(clientPath, "index.json")
+        open(indexPath, "w").write("""{
   "calendars": [
     {
       "changeToken": "321", 
@@ -1606,17 +1674,24 @@
       "url": "/home/calendar/2.ics", 
       "scheduleTag": null, 
       "etag": "321.321", 
-      "icalendar": "BEGIN:VCALENDAR\\r\\nVERSION:2.0\\r\\nPRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN\\r\\nBEGIN:VEVENT\\r\\nUID:put-1 at example.com\\r\\nDTSTART:20110427\\r\\nDURATION:P1DT\\r\\nDTSTAMP:20051222T205953Z\\r\\nSUMMARY:event 1\\r\\nEND:VEVENT\\r\\nEND:VCALENDAR\\r\\n"
+      "uid": "004f8e41-b071-4b30-bb3b-6aada4adcc10"
     }, 
     {
       "url": "/home/inbox/i2.ics", 
       "scheduleTag": null, 
       "etag": "987.987", 
-      "icalendar": "BEGIN:VCALENDAR\\r\\nVERSION:2.0\\r\\nMETHOD:REQUEST\\r\\nPRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN\\r\\nBEGIN:VEVENT\\r\\nUID:put-1 at example.com\\r\\nDTSTART:20110427\\r\\nDURATION:P1DT\\r\\nDTSTAMP:20051222T205953Z\\r\\nSUMMARY:event 1\\r\\nEND:VEVENT\\r\\nEND:VCALENDAR\\r\\n"
+      "uid": "00a79cad-857b-418e-a54a-340b5686d747"
     }
   ]
 }""")
 
+        os.mkdir(os.path.join(clientPath, "calendar"))
+        event1Path = os.path.join(clientPath, "calendar", "2.ics")
+        open(event1Path, "w").write(cal1)
+        os.mkdir(os.path.join(clientPath, "inbox"))
+        event1Path = os.path.join(clientPath, "inbox", "i2.ics")
+        open(event1Path, "w").write(cal2)
+
         self.client.deserialize()
 
         self.assertEqual(len(self.client._calendars), 3)
@@ -1632,8 +1707,13 @@
         self.assertTrue("/home/calendar/2.ics" in self.client._events)
         self.assertEqual(self.client._events["/home/calendar/2.ics"].scheduleTag, None)
         self.assertEqual(self.client._events["/home/calendar/2.ics"].etag, "321.321")
-        self.assertEqual(self.client._events["/home/calendar/2.ics"].getUID(), "put-1 at example.com")
+        self.assertEqual(self.client._events["/home/calendar/2.ics"].getUID(), "004f8e41-b071-4b30-bb3b-6aada4adcc10")
+        self.assertEqual(str(self.client._events["/home/calendar/2.ics"].component), cal1)
         self.assertTrue("/home/inbox/i2.ics" in self.client._events)
+        self.assertEqual(self.client._events["/home/inbox/i2.ics"].scheduleTag, None)
+        self.assertEqual(self.client._events["/home/inbox/i2.ics"].etag, "987.987")
+        self.assertEqual(self.client._events["/home/inbox/i2.ics"].getUID(), "00a79cad-857b-418e-a54a-340b5686d747")
+        self.assertEqual(str(self.client._events["/home/inbox/i2.ics"].component), cal2)
 
 
 class UpdateCalendarTests(OS_X_10_6Mixin, TestCase):

Modified: CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/test_population.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/test_population.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/test_population.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -57,8 +57,8 @@
 
     def test_clientFailures(self):
         """
-        L{ReportStatistics.countClients} returns the number of clients observed to
-        have acted in the simulation.
+        L{ReportStatistics.countClientFailures} returns the number of clients observed to
+        have failed in the simulation.
         """
         logger = ReportStatistics()
         clients = ['c01', 'c02', 'c03']
@@ -68,6 +68,19 @@
         self.assertEqual(len(clients), logger.countClientFailures())
 
 
+    def test_simFailures(self):
+        """
+        L{ReportStatistics.countSimFailures} returns the number of clients observed to
+        have caused an error in the simulation.
+        """
+        logger = ReportStatistics()
+        clients = ['c01', 'c02', 'c03']
+        for client in clients:
+            logger.observe(dict(
+                    type='sim-failure', reason="testing %s" % (client,)))
+        self.assertEqual(len(clients), logger.countSimFailures())
+
+
     def test_noFailures(self):
         """
         If fewer than 1% of requests fail, fewer than 1% of requests take 5
@@ -163,7 +176,7 @@
 
     def test_bucketRequest(self):
         """
-        PUT(xxx-large/medium/small} have different thresholds. Test that requests straddling
+        PUT(xxx-huge/large/medium/small} have different thresholds. Test that requests straddling
         each of those are correctly determined to be failures or not.
         """
         
@@ -175,6 +188,7 @@
                     "PUT{organizer-small}": [ 100.0,  50.0,  25.0,   5.0,   1.0,   0.5,   0.0],
                     "PUT{organizer-medium}":[ 100.0, 100.0,  50.0,  25.0,   5.0,   1.0,   0.5],
                     "PUT{organizer-large}": [ 100.0, 100.0, 100.0,  50.0,  25.0,   5.0,   1.0],
+                    "PUT{organizer-huge}":  [ 100.0, 100.0, 100.0, 100.0, 100.0,  50.0,  25.0],
                 }
             }
         }
@@ -290,3 +304,40 @@
             logger.failures()
         )
 
+        # -huge below 10.0 threshold
+        logger = ReportStatistics(thresholds=_thresholds)
+        logger.observe(dict(
+                type='response', method='PUT{organizer-huge}', success=True,
+                duration=12.0, user='user01', client_type="test", client_id="1234"))
+        logger.observe(dict(
+                type='response', method='PUT{organizer-huge}', success=True,
+                duration=8, user='user01', client_type="test", client_id="1234"))
+        logger.observe(dict(
+                type='response', method='PUT{organizer-huge}', success=True,
+                duration=11.0, user='user01', client_type="test", client_id="1234"))
+        logger.observe(dict(
+                type='response', method='PUT{organizer-huge}', success=True,
+                duration=9.0, user='user01', client_type="test", client_id="1234"))
+        self.assertEqual(
+            [],
+            logger.failures()
+        )
+        
+        # -huge above 10.0 threshold
+        logger = ReportStatistics(thresholds=_thresholds)
+        logger.observe(dict(
+                type='response', method='PUT{organizer-huge}', success=True,
+                duration=12.0, user='user01', client_type="test", client_id="1234"))
+        logger.observe(dict(
+                type='response', method='PUT{organizer-huge}', success=True,
+                duration=9.0, user='user01', client_type="test", client_id="1234"))
+        logger.observe(dict(
+                type='response', method='PUT{organizer-huge}', success=True,
+                duration=12.0, user='user01', client_type="test", client_id="1234"))
+        logger.observe(dict(
+                type='response', method='PUT{organizer-huge}', success=True,
+                duration=42.42, user='user01', client_type="test", client_id="1234"))
+        self.assertEqual(
+            ["Greater than 50% PUT{organizer-huge} exceeded 10 second response time"],
+            logger.failures()
+        )

Modified: CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/test_profiles.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/test_profiles.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/test_profiles.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -37,6 +37,8 @@
 from contrib.performance.loadtest.ical import IncorrectResponseCode, Calendar, Event, BaseClient
 from contrib.performance.loadtest.sim import _DirectoryRecord
 
+import os
+
 SIMPLE_EVENT = """\
 BEGIN:VCALENDAR
 VERSION:2.0
@@ -212,7 +214,10 @@
         attendee changes due to a changed schedule tag.
     @ivar _pendingFailures: dict mapping URLs to failure objects
     """
-    def __init__(self, number):
+    def __init__(self, number, serializePath):
+        self.serializePath = serializePath
+        os.mkdir(self.serializePath)
+        self.title = "StubClient"
         self._events = {}
         self._calendars = {}
         self._pendingFailures = {}
@@ -232,8 +237,25 @@
         self._pendingFailures[href] = failureObject
 
 
+    def serializeLocation(self):
+        """
+        Return the path to the directory where data for this user is serialized.
+        """
+        if self.serializePath is None or not os.path.isdir(self.serializePath):
+            return None
+        
+        key = "%s-%s" % (self.record.uid, "StubClient")
+        path = os.path.join(self.serializePath, key)
+        if not os.path.exists(path):
+            os.mkdir(path)
+        elif not os.path.isdir(path):
+            return None
+        
+        return path
+
+    
     def addEvent(self, href, vevent):
-        self._events[href] = Event(href, None, vevent)
+        self._events[href] = Event(self.serializePath, href, None, vevent)
         return succeed(None)
 
 
@@ -257,8 +279,9 @@
 
 
     def addEventAttendee(self, href, attendee):
-        vevent = self._events[href].vevent
+        vevent = self._events[href].component
         vevent.mainComponent().addProperty(attendee)
+        self._events[href].component = vevent
 
 
     def changeEventAttendee(self, href, old, new):
@@ -269,9 +292,10 @@
                         ('HTTP', 1, 1), PRECONDITION_FAILED,
                         'Precondition Failed', None, None)))
 
-        vevent = self._events[href].vevent
+        vevent = self._events[href].component
         vevent.mainComponent().removeProperty(old)
         vevent.mainComponent().addProperty(new)
+        self._events[href].component = vevent
         return succeed(None)
 
 
@@ -320,21 +344,24 @@
 
 
     def _simpleAccount(self, userNumber, eventText):
+        client = StubClient(userNumber, self.mktemp())
+
         vevent = Component.fromString(eventText)
         calendar = Calendar(
             caldavxml.calendar, set(('VEVENT',)), u'calendar', u'/cal/', None)
-        event = Event(calendar.url + u'1234.ics', None, vevent)
-        calendar.events = {u'1234.ics': event}
-        client = StubClient(userNumber)
-        client._events.update({event.url: event})
         client._calendars.update({calendar.url: calendar})
 
+        event = Event(client.serializeLocation(), calendar.url + u'1234.ics', None, vevent)
+
+        client._events.update({event.url: event})
+        calendar.events = {u'1234.ics': event}
+
         return vevent, event, calendar, client
 
 
     def test_enabled(self):
         userNumber = 13
-        client = StubClient(userNumber)
+        client = StubClient(userNumber, self.mktemp())
 
         inviter = Inviter(None, self.sim, client, userNumber, **{"enabled":False})
         self.assertEqual(inviter.enabled, False)
@@ -342,6 +369,7 @@
         inviter = Inviter(None, self.sim, client, userNumber, **{"enabled":True})
         self.assertEqual(inviter.enabled, True)
 
+
     def test_doNotAddAttendeeToInbox(self):
         """
         When the only calendar with any events is a schedule inbox, no
@@ -362,7 +390,7 @@
         does nothing.
         """
         userNumber = 13
-        client = StubClient(userNumber)
+        client = StubClient(userNumber, self.mktemp())
         inviter = Inviter(None, self.sim, client, userNumber)
         inviter._invite()
         self.assertEquals(client._events, {})
@@ -378,7 +406,7 @@
         userNumber = 19
         _ignore_vevent, event, calendar, client = self._simpleAccount(
             userNumber, SIMPLE_EVENT)
-        event.vevent = event.etag = event.scheduleTag = None
+        event.component = event.etag = event.scheduleTag = None
         inviter = Inviter(None, self.sim, client, userNumber)
         inviter._invite()
         self.assertEquals(client._events, {event.url: event})
@@ -391,12 +419,12 @@
         attendee to it.
         """
         userNumber = 16
-        vevent, _ignore_event, _ignore_calendar, client = self._simpleAccount(
+        _ignore_vevent, event, _ignore_calendar, client = self._simpleAccount(
             userNumber, SIMPLE_EVENT)
         inviter = Inviter(Clock(), self.sim, client, userNumber)
-        inviter.setParameters(inviteeDistanceDistribution=Deterministic(1))
+        inviter.setParameters(inviteeDistribution=Deterministic(1))
         inviter._invite()
-        attendees = tuple(vevent.mainComponent().properties('ATTENDEE'))
+        attendees = tuple(event.component.mainComponent().properties('ATTENDEE'))
         self.assertEquals(len(attendees), 1)
         for paramname, paramvalue in {
             'CN': 'User %d' % (userNumber + 1,),
@@ -416,16 +444,16 @@
         the attendee list, a different user is added instead.
         """
         selfNumber = 12
-        vevent, _ignore_event, _ignore_calendar, client = self._simpleAccount(
+        _ignore_vevent, event, _ignore_calendar, client = self._simpleAccount(
             selfNumber, SIMPLE_EVENT)
 
         otherNumber = 20
         values = [selfNumber - selfNumber, otherNumber - selfNumber]
 
         inviter = Inviter(Clock(), self.sim, client, selfNumber)
-        inviter.setParameters(inviteeDistanceDistribution=SequentialDistribution(values))
+        inviter.setParameters(inviteeDistribution=SequentialDistribution(values))
         inviter._invite()
-        attendees = tuple(vevent.mainComponent().properties('ATTENDEE'))
+        attendees = tuple(event.component.mainComponent().properties('ATTENDEE'))
         self.assertEquals(len(attendees), 1)
         for paramname, paramvalue in {
             'CN': 'User %d' % (otherNumber,),
@@ -445,18 +473,18 @@
         invitee on the event, a different user is added instead.
         """
         selfNumber = 1
-        vevent, _ignore_event, _ignore_calendar, client = self._simpleAccount(
+        _ignore_vevent, event, _ignore_calendar, client = self._simpleAccount(
             selfNumber, INVITED_EVENT)
 
-        invitee = tuple(vevent.mainComponent().properties('ATTENDEE'))[0]
+        invitee = tuple(event.component.mainComponent().properties('ATTENDEE'))[0]
         inviteeNumber = int(invitee.parameterValue('CN').split()[1])
         anotherNumber = inviteeNumber + 5
         values = [inviteeNumber - selfNumber, anotherNumber - selfNumber]
 
         inviter = Inviter(Clock(), self.sim, client, selfNumber)
-        inviter.setParameters(inviteeDistanceDistribution=SequentialDistribution(values))
+        inviter.setParameters(inviteeDistribution=SequentialDistribution(values))
         inviter._invite()
-        attendees = tuple(vevent.mainComponent().properties('ATTENDEE'))
+        attendees = tuple(event.component.mainComponent().properties('ATTENDEE'))
         self.assertEquals(len(attendees), 3)
         for paramname, paramvalue in {
             'CN': 'User %02d' % (anotherNumber,),
@@ -480,7 +508,7 @@
             selfNumber, INVITED_EVENT)
         inviter = Inviter(Clock(), self.sim, client, selfNumber)
         # Always return a user number which has already been invited.
-        inviter.setParameters(inviteeDistanceDistribution=Deterministic(2 - selfNumber))
+        inviter.setParameters(inviteeDistribution=Deterministic(2 - selfNumber))
         inviter._invite()
         attendees = tuple(vevent.mainComponent().properties('ATTENDEE'))
         self.assertEquals(len(attendees), 2)
@@ -517,12 +545,12 @@
 
 
     def _simpleAccount(self, userNumber, eventText):
+        client = StubClient(userNumber, self.mktemp())
         vevent = Component.fromString(eventText)
         calendar = Calendar(
             caldavxml.calendar, set(('VEVENT',)), u'calendar', u'/cal/', None)
-        event = Event(calendar.url + u'1234.ics', None, vevent)
+        event = Event(client.serializeLocation(), calendar.url + u'1234.ics', None, vevent)
         calendar.events = {u'1234.ics': event}
-        client = StubClient(userNumber)
         client._events.update({event.url: event})
         client._calendars.update({calendar.url: calendar})
 
@@ -531,7 +559,7 @@
 
     def test_enabled(self):
         userNumber = 13
-        client = StubClient(userNumber)
+        client = StubClient(userNumber, self.mktemp())
 
         inviter = RealisticInviter(None, self.sim, client, userNumber, **{"enabled":False})
         self.assertEqual(inviter.enabled, False)
@@ -547,7 +575,7 @@
         calendar = Calendar(
             caldavxml.schedule_inbox, set(), u'inbox', u'/sched/inbox', None)
         userNumber = 13
-        client = StubClient(userNumber)
+        client = StubClient(userNumber, self.mktemp())
         client._calendars.update({calendar.url: calendar})
 
         inviter = RealisticInviter(None, self.sim, client, userNumber, **{"enabled":False})
@@ -562,7 +590,7 @@
         does nothing.
         """
         userNumber = 13
-        client = StubClient(userNumber)
+        client = StubClient(userNumber, self.mktemp())
         inviter = RealisticInviter(None, self.sim, client, userNumber)
         inviter._invite()
         self.assertEquals(client._events, {})
@@ -576,16 +604,18 @@
         calendar = Calendar(
             caldavxml.calendar, set(('VEVENT',)), u'personal stuff', u'/cals/personal', None)
         userNumber = 16
-        client = StubClient(userNumber)
+        serializePath = self.mktemp()
+        os.mkdir(serializePath)
+        client = StubClient(userNumber, self.mktemp())
         client._calendars.update({calendar.url: calendar})
         inviter = RealisticInviter(Clock(), self.sim, client, userNumber)
         inviter.setParameters(
-            inviteeDistanceDistribution=Deterministic(1),
+            inviteeDistribution=Deterministic(1),
             inviteeCountDistribution=Deterministic(1)
         )
         inviter._invite()
         self.assertEquals(len(client._events), 1)
-        attendees = tuple(client._events.values()[0].vevent.mainComponent().properties('ATTENDEE'))
+        attendees = tuple(client._events.values()[0].component.mainComponent().properties('ATTENDEE'))
         expected = set(("mailto:user%02d at example.com" %  (userNumber,), "mailto:user%02d at example.com" %  (userNumber + 1,),))
         for attendee in attendees:
             expected.remove(attendee.value())
@@ -601,7 +631,7 @@
         calendar = Calendar(
             caldavxml.calendar, set(('VEVENT',)), u'personal stuff', u'/cals/personal', None)
         selfNumber = 12
-        client = StubClient(selfNumber)
+        client = StubClient(selfNumber, self.mktemp())
         client._calendars.update({calendar.url: calendar})
 
         otherNumber = 20
@@ -609,12 +639,12 @@
 
         inviter = RealisticInviter(Clock(), self.sim, client, selfNumber)
         inviter.setParameters(
-            inviteeDistanceDistribution=SequentialDistribution(values),
+            inviteeDistribution=SequentialDistribution(values),
             inviteeCountDistribution=Deterministic(1)
         )
         inviter._invite()
         self.assertEquals(len(client._events), 1)
-        attendees = tuple(client._events.values()[0].vevent.mainComponent().properties('ATTENDEE'))
+        attendees = tuple(client._events.values()[0].component.mainComponent().properties('ATTENDEE'))
         expected = set(("mailto:user%02d at example.com" %  (selfNumber,), "mailto:user%02d at example.com" %  (otherNumber,),))
         for attendee in attendees:
             expected.remove(attendee.value())
@@ -630,7 +660,7 @@
         calendar = Calendar(
             caldavxml.calendar, set(('VEVENT',)), u'personal stuff', u'/cals/personal', None)
         selfNumber = 1
-        client = StubClient(selfNumber)
+        client = StubClient(selfNumber, self.mktemp())
         client._calendars.update({calendar.url: calendar})
 
         inviteeNumber = 20
@@ -639,12 +669,12 @@
 
         inviter = RealisticInviter(Clock(), self.sim, client, selfNumber)
         inviter.setParameters(
-            inviteeDistanceDistribution=SequentialDistribution(values),
+            inviteeDistribution=SequentialDistribution(values),
             inviteeCountDistribution=Deterministic(2)
         )
         inviter._invite()
         self.assertEquals(len(client._events), 1)
-        attendees = tuple(client._events.values()[0].vevent.mainComponent().properties('ATTENDEE'))
+        attendees = tuple(client._events.values()[0].component.mainComponent().properties('ATTENDEE'))
         expected = set((
             "mailto:user%02d at example.com" %  (selfNumber,),
             "mailto:user%02d at example.com" %  (inviteeNumber,),
@@ -664,11 +694,11 @@
         calendar = Calendar(
             caldavxml.calendar, set(('VEVENT',)), u'personal stuff', u'/cals/personal', None)
         userNumber = 1
-        client = StubClient(userNumber)
+        client = StubClient(userNumber, self.mktemp())
         client._calendars.update({calendar.url: calendar})
         inviter = RealisticInviter(Clock(), self.sim, client, userNumber)
         inviter.setParameters(
-            inviteeDistanceDistribution=Deterministic(1),
+            inviteeDistribution=Deterministic(1),
             inviteeCountDistribution=Deterministic(2)
         )
         inviter._invite()
@@ -687,7 +717,7 @@
 
     def test_enabled(self):
         userNumber = 13
-        client = StubClient(userNumber)
+        client = StubClient(userNumber, self.mktemp())
 
         accepter = Accepter(None, self.sim, client, userNumber, **{"enabled":False})
         self.assertEqual(accepter.enabled, False)
@@ -700,7 +730,7 @@
         If an event on an unknown calendar changes, it is ignored.
         """
         userNumber = 13
-        client = StubClient(userNumber)
+        client = StubClient(userNumber, self.mktemp())
         accepter = Accepter(None, self.sim, client, userNumber)
         accepter.eventChanged('/some/calendar/1234.ics')
 
@@ -714,7 +744,7 @@
         calendarURL = '/some/calendar/'
         calendar = Calendar(
             csxml.dropbox_home, set(), u'notification', calendarURL, None)
-        client = StubClient(userNumber)
+        client = StubClient(userNumber, self.mktemp())
         client._calendars[calendarURL] = calendar
         accepter = Accepter(None, self.sim, client, userNumber)
         accepter.eventChanged(calendarURL + '1234.ics')
@@ -731,9 +761,9 @@
         calendarURL = '/some/calendar/'
         calendar = Calendar(
             caldavxml.calendar, set(('VEVENT',)), u'calendar', calendarURL, None)
-        client = StubClient(userNumber)
+        client = StubClient(userNumber, self.mktemp())
         client._calendars[calendarURL] = calendar
-        event = Event(calendarURL + u'1234.ics', None, vevent)
+        event = Event(client.serializeLocation(), calendarURL + u'1234.ics', None, vevent)
         client._events[event.url] = event
         accepter = Accepter(None, self.sim, client, userNumber)
         accepter.eventChanged(event.url)
@@ -754,9 +784,9 @@
         calendarURL = '/some/calendar/'
         calendar = Calendar(
             caldavxml.calendar, set(('VEVENT',)), u'calendar', calendarURL, None)
-        client = StubClient(userNumber)
+        client = StubClient(userNumber, self.mktemp())
         client._calendars[calendarURL] = calendar
-        event = Event(calendarURL + u'1234.ics', None, vevent)
+        event = Event(client.serializeLocation(), calendarURL + u'1234.ics', None, vevent)
         client._events[event.url] = event
         accepter = Accepter(clock, self.sim, client, userNumber)
         accepter.random = Deterministic()
@@ -777,10 +807,10 @@
         vevent = Component.fromString(INBOX_REPLY)
         inbox = Calendar(
             caldavxml.schedule_inbox, set(), u'the inbox', inboxURL, None)
-        client = StubClient(userNumber)
+        client = StubClient(userNumber, self.mktemp())
         client._calendars[inboxURL] = inbox
 
-        inboxEvent = Event(inboxURL + u'4321.ics', None, vevent)
+        inboxEvent = Event(client.serializeLocation(), inboxURL + u'4321.ics', None, vevent)
         client._setEvent(inboxEvent.url, inboxEvent)
         accepter = Accepter(clock, self.sim, client, userNumber) 
         accepter.eventChanged(inboxEvent.url)
@@ -801,10 +831,10 @@
         vevent = Component.fromString(INBOX_REPLY)
         inbox = Calendar(
             caldavxml.schedule_inbox, set(), u'the inbox', inboxURL, None)
-        client = StubClient(userNumber)
+        client = StubClient(userNumber, self.mktemp())
         client._calendars[inboxURL] = inbox
 
-        inboxEvent = Event(inboxURL + u'4321.ics', None, vevent)
+        inboxEvent = Event(client.serializeLocation(), inboxURL + u'4321.ics', None, vevent)
         client._setEvent(inboxEvent.url, inboxEvent)
         client._failDeleteWithObject(inboxEvent.url, IncorrectResponseCode(
                     NO_CONTENT,
@@ -830,7 +860,7 @@
         vevent = Component.fromString(INVITED_EVENT)
         attendees = tuple(vevent.mainComponent().properties('ATTENDEE'))
         userNumber = int(attendees[1].parameterValue('CN').split(None, 1)[1])
-        client = StubClient(userNumber)
+        client = StubClient(userNumber, self.mktemp())
 
         calendarURL = '/some/calendar/'
         calendar = Calendar(
@@ -842,10 +872,10 @@
             caldavxml.schedule_inbox, set(), u'the inbox', inboxURL, None)
         client._calendars[inboxURL] = inbox
 
-        event = Event(calendarURL + u'1234.ics', None, vevent)
+        event = Event(client.serializeLocation(), calendarURL + u'1234.ics', None, vevent)
         client._setEvent(event.url, event)
 
-        inboxEvent = Event(inboxURL + u'4321.ics', None, vevent)
+        inboxEvent = Event(client.serializeLocation(), inboxURL + u'4321.ics', None, vevent)
         client._setEvent(inboxEvent.url, inboxEvent)
 
         accepter = Accepter(clock, self.sim, client, userNumber)
@@ -853,7 +883,7 @@
         accepter.eventChanged(event.url)
         clock.advance(randomDelay)
 
-        vevent = client._events[event.url].vevent
+        vevent = client._events[event.url].component
         attendees = tuple(vevent.mainComponent().properties('ATTENDEE'))
         self.assertEquals(len(attendees), 2)
         self.assertEquals(
@@ -880,9 +910,9 @@
         calendarURL = '/some/calendar/'
         calendar = Calendar(
             caldavxml.calendar, set(('VEVENT',)), u'calendar', calendarURL, None)
-        client = StubClient(userNumber)
+        client = StubClient(userNumber, self.mktemp())
         client._calendars[calendarURL] = calendar
-        event = Event(calendarURL + u'1234.ics', None, vevent)
+        event = Event(client.serializeLocation(), calendarURL + u'1234.ics', None, vevent)
         client._events[event.url] = event
         accepter = Accepter(clock, self.sim, client, userNumber)
         accepter.setParameters(acceptDelayDistribution=Deterministic(randomDelay))
@@ -890,14 +920,14 @@
         clock.advance(randomDelay)
 
         # Now re-set the event so it has to be accepted again
-        event.vevent = Component.fromString(INVITED_EVENT)
+        event.component = Component.fromString(INVITED_EVENT)
 
         # And now re-deliver it
         accepter.eventChanged(event.url)
         clock.advance(randomDelay)
 
         # And ensure that it was accepted again
-        vevent = client._events[event.url].vevent
+        vevent = client._events[event.url].component
         attendees = tuple(vevent.mainComponent().properties('ATTENDEE'))
         self.assertEquals(len(attendees), 2)
         self.assertEquals(
@@ -915,7 +945,7 @@
         """
         clock = Clock()
         userNumber = 2
-        client = StubClient(userNumber)
+        client = StubClient(userNumber, self.mktemp())
         randomDelay = 3
 
         calendarURL = '/some/calendar/'
@@ -924,7 +954,7 @@
         client._calendars[calendarURL] = calendar
 
         vevent = Component.fromString(INVITED_EVENT)
-        event = Event(calendarURL + u'1234.ics', None, vevent)
+        event = Event(client.serializeLocation(), calendarURL + u'1234.ics', None, vevent)
         client._setEvent(event.url, event)
 
         accepter = Accepter(clock, self.sim, client, userNumber)
@@ -951,7 +981,7 @@
 
     def test_enabled(self):
         userNumber = 13
-        client = StubClient(userNumber)
+        client = StubClient(userNumber, self.mktemp())
 
         eventer = Eventer(None, self.sim, client, None, **{"enabled":False})
         self.assertEqual(eventer.enabled, False)
@@ -966,7 +996,7 @@
         """
         calendar = Calendar(
             caldavxml.schedule_inbox, set(), u'inbox', u'/sched/inbox', None)
-        client = StubClient(21)
+        client = StubClient(21, self.mktemp())
         client._calendars.update({calendar.url: calendar})
 
         eventer = Eventer(None, self.sim, client, None)
@@ -982,7 +1012,7 @@
         """
         calendar = Calendar(
             caldavxml.calendar, set(('VEVENT',)), u'personal stuff', u'/cals/personal', None)
-        client = StubClient(31)
+        client = StubClient(31, self.mktemp())
         client._calendars.update({calendar.url: calendar})
 
         eventer = Eventer(Clock(), self.sim, client, None)

Modified: CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/thresholds.json
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/thresholds.json	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/contrib/performance/loadtest/thresholds.json	2012-08-30 22:48:54 UTC (rev 9757)
@@ -8,23 +8,26 @@
 			"GET{event}"                    : [ 100.0, 100.0, 100.0,  75.0,  50.0,  25.0,   5.0],
 	
 			"PUT{event}"                    : [ 100.0, 100.0, 100.0,  75.0,  50.0,  25.0,   0.5],
-			"PUT{attendee-small}"           : [ 100.0, 100.0, 100.0,  75.0,  50.0,  25.0,   0.5],
+			"PUT{attendee-small}"           : [ 100.0, 100.0, 100.0,  75.0,  50.0,  25.0,   5.0],
 			"PUT{attendee-medium}"          : [ 100.0, 100.0, 100.0,  75.0,  50.0,  25.0,   5.0], 
 			"PUT{attendee-large}"           : [ 100.0, 100.0, 100.0,  75.0,  50.0,  25.0,   5.0],
-			"PUT{organizer-small}"          : [ 100.0, 100.0, 100.0,  75.0,  50.0,  25.0,   0.5], 
-			"PUT{organizer-medium}"         : [ 100.0, 100.0, 100.0,  75.0,  50.0,  25.0,   0.5],
+			"PUT{attendee-huge}"            : [ 100.0, 100.0, 100.0, 100.0, 100.0,  50.0,  25.0],
+			"PUT{organizer-small}"          : [ 100.0, 100.0, 100.0,  75.0,  50.0,  25.0,   5.0], 
+			"PUT{organizer-medium}"         : [ 100.0, 100.0, 100.0,  75.0,  50.0,  25.0,   5.0],
 			"PUT{organizer-large}"          : [ 100.0, 100.0, 100.0, 100.0, 100.0,  75.0,  25.0],
+			"PUT{organizer-huge}"           : [ 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0],
 	
-			"DELETE{event}"                 : [ 100.0,  50.0,  25.0,   5.0,   1.0,   0.5,   0.0],
+			"DELETE{event}"                 : [ 100.0, 100.0,  50.0,  25.0,  10.0,   5.0,   1.0],
 	
 			"POST{fb-small}"                : [ 100.0, 100.0, 100.0,  75.0,  50.0,  25.0,   5.0],
 			"POST{fb-medium}"               : [ 100.0, 100.0, 100.0,  75.0,  50.0,  25.0,   5.0], 
 			"POST{fb-large}"                : [ 100.0, 100.0, 100.0,  75.0,  50.0,  25.0,   5.0], 
+			"POST{fb-huge}"                 : [ 100.0, 100.0, 100.0, 100.0,  75.0,  50.0,  25.0], 
 	
 			"PROPFIND{well-known}"          : [ 100.0, 100.0, 100.0,   5.0,   1.0,   0.5,   0.0],
 			"PROPFIND{find-principal}"      : [ 100.0, 100.0, 100.0,   5.0,   1.0,   0.5,   0.0],
 			"PROPFIND{principal}"           : [ 100.0, 100.0, 100.0,   5.0,   1.0,   0.5,   0.0],
-			"PROPFIND{home}"                : [ 100.0, 100.0, 100.0,  50.0,  10.0,   0.5,   0.0],
+			"PROPFIND{home}"                : [ 100.0, 100.0, 100.0,  75.0,  50.0,  25.0,  10.0],
 			"PROPFIND{calendar}"            : [ 100.0, 100.0, 100.0,   5.0,   1.0,   0.5,   0.0],
 			"PROPFIND{notification}"        : [ 100.0, 100.0, 100.0,   5.0,   1.0,   0.5,   0.0],
 			"PROPFIND{notification-items}"  : [ 100.0, 100.0, 100.0,   5.0,   1.0,   0.5,   0.0],
@@ -35,12 +38,13 @@
 			"REPORT{expand}"                : [ 100.0, 100.0, 100.0,   5.0,   1.0,   0.5,   0.0],
 			"REPORT{psearch}"               : [ 100.0, 100.0, 100.0,   5.0,   1.0,   0.5,   0.0],
 			"REPORT{sync-init}"             : [ 100.0, 100.0, 100.0,   5.0,   1.0,   0.5,   0.0],
-			"REPORT{sync}"                  : [ 100.0, 100.0, 100.0,   5.0,   1.0,   0.5,   0.0],
+			"REPORT{sync}"                  : [ 100.0, 100.0, 100.0,  50.0,  10.0,   5.0,   1.0],
 			"REPORT{vevent}"                : [ 100.0, 100.0, 100.0,   5.0,   1.0,   0.5,   0.0],
 			"REPORT{vtodo}"                 : [ 100.0, 100.0, 100.0,   5.0,   1.0,   0.5,   0.0],
-			"REPORT{multiget-small}"        : [ 100.0,  50.0,  25.0,   5.0,   1.0,   0.5,   0.0],
-			"REPORT{multiget-medium}"       : [ 100.0, 100.0,  50.0,  25.0,   5.0,   1.0,   0.5],
-			"REPORT{multiget-large}"        : [ 100.0, 100.0, 100.0,  75.0,  50.0,  10.0,   5.0]
+			"REPORT{multiget-small}"        : [ 100.0, 100.0,  50.0,  25.0,  10.0,   5.0,   1.0],
+			"REPORT{multiget-medium}"       : [ 100.0, 100.0,  75.0,  50.0,  25.0,  10.0,   5.0],
+			"REPORT{multiget-large}"        : [ 100.0, 100.0, 100.0,  75.0,  50.0,  25.0,  10.0],
+			"REPORT{multiget-huge}"         : [ 100.0, 100.0, 100.0, 100.0,  75.0,  50.0,  25.0]
         }
     },
     "operations": {

Modified: CalendarServer/branches/users/gaya/inviteclean/contrib/performance/stats.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/contrib/performance/stats.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/contrib/performance/stats.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -25,6 +25,7 @@
 from pycalendar.duration import PyCalendarDuration
 from pycalendar.timezone import PyCalendarTimezone
 from pycalendar.property import PyCalendarProperty
+from math import log, sqrt
 
 NANO = 1000000000.0
 
@@ -261,17 +262,40 @@
 
     compareAttributes = ['_mu', '_sigma', '_maximum']
 
-    def __init__(self, mu, sigma, maximum=None):
+    def __init__(self, mu=None, sigma=None, mean=None, mode=None, median=None, maximum=None):
+        
+        if mu is not None and sigma is not None:
+            scale = 1.0
+        elif not (mu is None and sigma is None):
+            raise ValueError("mu and sigma must both be defined or both not defined")
+        elif mode is None:
+            raise ValueError("When mu and sigma are not defined, mode must be defined")
+        elif median is not None:
+            scale = mode
+            median /= mode
+            mode = 1.0
+            mu = log(median)
+            sigma = sqrt(log(median) - log(mode))
+        elif mean is not None:
+            scale = mode
+            mean /= mode
+            mode = 1.0
+            mu = log(mean) + log(mode) / 2.0
+            sigma = sqrt(log(mean) - log(mode) / 2.0)
+        else:
+            raise ValueError("When using mode one of median or mean must be defined")
+               
         self._mu = mu
         self._sigma = sigma
+        self._scale = scale
         self._maximum = maximum
 
 
     def sample(self):
-        result = random.lognormvariate(self._mu, self._sigma)
+        result = self._scale * random.lognormvariate(self._mu, self._sigma)
         if self._maximum is not None and result > self._maximum:
             for _ignore in range(10):
-                result = random.lognormvariate(self._mu, self._sigma)
+                result = self._scale * random.lognormvariate(self._mu, self._sigma)
                 if result <= self._maximum:
                     break
             else:
@@ -279,7 +303,21 @@
         return result
 
 
+class FixedDistribution(object, FancyEqMixin):
+    """
+    """
+    implements(IPopulation)
 
+    compareAttributes = ['_value']
+
+    def __init__(self, value):
+        self._value = value
+
+
+    def sample(self):
+        return self._value
+
+
 class NearFutureDistribution(object, FancyEqMixin):
     compareAttributes = ['_offset']
 

Modified: CalendarServer/branches/users/gaya/inviteclean/contrib/performance/test_stats.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/contrib/performance/test_stats.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/contrib/performance/test_stats.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -49,14 +49,33 @@
 
 class DistributionTests(TestCase):
     def test_lognormal(self):
-        dist = LogNormalDistribution(1, 1)
+        dist = LogNormalDistribution(mu=1, sigma=1)
         for _ignore_i in range(100):
             value = dist.sample()
             self.assertIsInstance(value, float)
             self.assertTrue(value >= 0.0, "negative value %r" % (value,))
             self.assertTrue(value <= 1000, "implausibly high value %r" % (value,))
 
+        dist = LogNormalDistribution(mode=1, median=2)
+        for _ignore_i in range(100):
+            value = dist.sample()
+            self.assertIsInstance(value, float)
+            self.assertTrue(value >= 0.0, "negative value %r" % (value,))
+            self.assertTrue(value <= 1000, "implausibly high value %r" % (value,))
 
+        dist = LogNormalDistribution(mode=1, mean=2)
+        for _ignore_i in range(100):
+            value = dist.sample()
+            self.assertIsInstance(value, float)
+            self.assertTrue(value >= 0.0, "negative value %r" % (value,))
+            self.assertTrue(value <= 1000, "implausibly high value %r" % (value,))
+
+        self.assertRaises(ValueError, LogNormalDistribution, mu=1)
+        self.assertRaises(ValueError, LogNormalDistribution, sigma=1)
+        self.assertRaises(ValueError, LogNormalDistribution, mode=1)
+        self.assertRaises(ValueError, LogNormalDistribution, mean=1)
+        self.assertRaises(ValueError, LogNormalDistribution, median=1)
+        
     def test_uniformdiscrete(self):
         population = [1, 5, 6, 9]
         counts = dict.fromkeys(population, 0)

Modified: CalendarServer/branches/users/gaya/inviteclean/contrib/tools/protocolanalysis.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/contrib/tools/protocolanalysis.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/contrib/tools/protocolanalysis.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -596,7 +596,7 @@
             extended["or"] = line[startPos:endPos]
         else:
             items = line[startPos:].split()
-            extended = dict([item.split('=') for item in items])
+            extended = dict([item.split('=') for item in items if item.find("=") != -1])
     
         self.currentLine = CalendarServerLogAnalyzer.LogLine(ipaddr, userid, logDateTime, logTime, method, uri, status, reqbytes, referrer, client, extended)
     

Copied: CalendarServer/branches/users/gaya/inviteclean/contrib/tools/readStats.py (from rev 9756, CalendarServer/trunk/contrib/tools/readStats.py)
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/contrib/tools/readStats.py	                        (rev 0)
+++ CalendarServer/branches/users/gaya/inviteclean/contrib/tools/readStats.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -0,0 +1,201 @@
+##
+# Copyright (c) 2012 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from StringIO import StringIO
+import datetime
+import json
+import socket
+import tables
+import time
+import sys
+import getopt
+
+"""
+This tool reads data from the server's statistics socket and prints a summary.
+"""
+
+def safeDivision(value, total, factor=1):
+    return value * factor / total if total else 0
+
+def readSock(sockname):
+    s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+    s.connect(sockname)
+    data = ""
+    while True:
+        d = s.recv(1024)
+        if d:
+            data += d
+        else:
+            break
+    s.close()
+    return data
+
+def printStats(data):
+    
+    stats = json.loads(data)
+    print "- " * 40
+    print datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
+    print "Service Uptime: %s" % (datetime.timedelta(seconds=(int(time.time() - stats["System"]["start time"]))),)
+    if stats["System"]["cpu count"] > 0:
+        print "Current CPU: %.1f%% (%d CPUs)" % (
+            stats["System"]["cpu use"],
+            stats["System"]["cpu count"],
+        )
+        print "Current Memory Used: %d bytes (%.1f GB) (%.1f%% of total)" % (
+            stats["System"]["memory used"],
+            stats["System"]["memory used"] / (1024.0 * 1024 * 1024),
+            stats["System"]["memory percent"],
+        )
+    else:
+        print "Current CPU: Unavailable"
+        print "Current Memory Used: Unavailable"
+    print
+    printRequestSummary(stats)
+    printHistogramSummary(stats["5 Minutes"])
+
+def printFailedStats(message):
+    
+    print "- " * 40
+    print datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
+    print message
+    print
+
+def printRequestSummary(stats):
+    table = tables.Table()
+    table.addHeader(
+        ("Period", "Requests", "Av. Requests", "Av. Response", "Av. Response", "Max. Response",    "Slot",     "CPU", "500's"),
+    )
+    table.addHeader(
+        (      "",         "",   "per second",         "(ms)", "no write(ms)",          "(ms)", "Average", "Average",      ""),
+    )
+    table.setDefaultColumnFormats(
+       (
+            tables.Table.ColumnFormat("%s", tables.Table.ColumnFormat.LEFT_JUSTIFY), 
+            tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+            tables.Table.ColumnFormat("%.1f", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+            tables.Table.ColumnFormat("%.1f", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+            tables.Table.ColumnFormat("%.1f", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+            tables.Table.ColumnFormat("%.1f", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+            tables.Table.ColumnFormat("%.2f", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+            tables.Table.ColumnFormat("%.1f%%", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+            tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+        )
+    )
+    
+    for key, seconds in (("Current", 60,), ("1 Minute", 60,), ("5 Minutes", 5*60,), ("1 Hour", 60*60,),):
+
+        stat = stats[key]
+        table.addRow((
+            key,
+            stat["requests"],
+            safeDivision(float(stat["requests"]), seconds),
+            safeDivision(stat["t"], stat["requests"]),
+            safeDivision(stat["t"] - stat["t-resp-wr"], stat["requests"]),
+            stat["T-MAX"],
+            safeDivision(float(stat["slots"]), stat["requests"]),
+            safeDivision(stat["cpu"], stat["requests"]),
+            stat["500"],
+        ))
+        
+    os = StringIO()
+    table.printTable(os=os)
+    print os.getvalue()
+
+def printHistogramSummary(stat):
+    
+    print "5 minute average response histogram"
+    table = tables.Table()
+    table.addHeader(
+        ("", "<10ms", "10ms<->100ms", "100ms<->1s", "1s<->10s", "10s<->30s", "30s<->60s", ">60s",  "Over 1s", "Over 10s"),
+    )
+    table.setDefaultColumnFormats(
+       (
+            tables.Table.ColumnFormat("%s", tables.Table.ColumnFormat.CENTER_JUSTIFY), 
+            tables.Table.ColumnFormat("%d (%.1f%%)", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+            tables.Table.ColumnFormat("%d (%.1f%%)", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+            tables.Table.ColumnFormat("%d (%.1f%%)", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+            tables.Table.ColumnFormat("%d (%.1f%%)", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+            tables.Table.ColumnFormat("%d (%.1f%%)", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+            tables.Table.ColumnFormat("%d (%.1f%%)", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+            tables.Table.ColumnFormat("%d (%.1f%%)", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+            tables.Table.ColumnFormat("%.1f%%", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+            tables.Table.ColumnFormat("%.1f%%", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+        )
+    )
+    for i in ("T", "T-RESP-WR",):
+        table.addRow((
+            "Overall Response" if i == "T" else "Response without Write",
+            (stat[i]["<10ms"], safeDivision(stat[i]["<10ms"], stat["requests"], 100.0)),
+            (stat[i]["10ms<->100ms"], safeDivision(stat[i]["10ms<->100ms"], stat["requests"], 100.0)),
+            (stat[i]["100ms<->1s"], safeDivision(stat[i]["100ms<->1s"], stat["requests"], 100.0)),
+            (stat[i]["1s<->10s"], safeDivision(stat[i]["1s<->10s"], stat["requests"], 100.0)),
+            (stat[i]["10s<->30s"], safeDivision(stat[i]["10s<->30s"], stat["requests"], 100.0)),
+            (stat[i]["30s<->60s"], safeDivision(stat[i]["30s<->60s"], stat["requests"], 100.0)),
+            (stat[i][">60s"], safeDivision(stat[i][">60s"], stat["requests"], 100.0)),
+            safeDivision(stat[i]["Over 1s"], stat["requests"], 100.0),
+            safeDivision(stat[i]["Over 10s"], stat["requests"], 100.0),
+        ))
+    os = StringIO()
+    table.printTable(os=os)
+    print os.getvalue()
+    
+    
+def usage(error_msg=None):
+    if error_msg:
+        print error_msg
+
+    print """Usage: readStats [options]
+Options:
+    -h            Print this help and exit
+    -s            Name of local socket to read from
+    -t            Delay in seconds between each sample [10 seconds]
+
+Description:
+    This utility will print a summary of statistics read from a
+    server continuously with the specified delay.
+
+"""
+
+    if error_msg:
+        raise ValueError(error_msg)
+    else:
+        sys.exit(0)
+
+if __name__ == '__main__':
+    
+    delay = 10
+    sockname = "data/Logs/state/caldavd-stats.sock"
+
+    options, args = getopt.getopt(sys.argv[1:], "hs:t:", [])
+
+    for option, value in options:
+        if option == "-h":
+            usage()
+        elif option == "-s":
+            sockname = value
+        elif option == "-t":
+            delay = int(value)
+
+    while True:
+        try:
+            printStats(readSock(sockname))
+        except socket.error:
+            printFailedStats("Unable to read statistics from server socket: %s" % (sockname,))
+        except KeyError, e:
+            printFailedStats("Unable to find key '%s' in statistics from server socket" % (e,))
+            sys.exit(1)
+
+        time.sleep(delay)

Modified: CalendarServer/branches/users/gaya/inviteclean/contrib/tools/request_monitor.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/contrib/tools/request_monitor.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/contrib/tools/request_monitor.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -266,8 +266,9 @@
         endPos = line.find(' ', startPos)
         extended["or"] = int(line[startPos:endPos])
     else:
+        
         items = line[startPos:].split()
-        extended = dict([item.split('=') for item in items])
+        extended = dict([item.split('=') for item in items if item.find("=") != -1])
 
     return userId, logTime, method, uri, status, bytes, referer, client, extended
 

Modified: CalendarServer/branches/users/gaya/inviteclean/setup.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/setup.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/setup.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -188,7 +188,7 @@
                     if line == "#PYTHONPATH":
                         script.append('PYTHONPATH="%s:$PYTHONPATH"' % (install_lib,))
                     elif line == "#PATH":
-                        script.append('PATH="%s:$PATH"' % (os.path.join(base, "bin"),))
+                        script.append('PATH="%s:$PATH"' % (os.path.join(base, "usr", "bin"),))
                     else:
                         script.append(line)
 
@@ -196,7 +196,7 @@
                     if line == "#PYTHONPATH":
                         script.append('PYTHONPATH="%s"' % (install_lib,))
                     elif line == "#PATH":
-                        script.append('PATH="%s"' % (os.path.join(base, "bin"),))
+                        script.append('PATH="%s"' % (os.path.join(base, "usr", "bin"),))
                     else:
                         script.append(line)
 

Modified: CalendarServer/branches/users/gaya/inviteclean/support/Makefile.Apple
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/support/Makefile.Apple	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/support/Makefile.Apple	2012-08-30 22:48:54 UTC (rev 9757)
@@ -55,25 +55,27 @@
 pycalendar::            $(BuildDirectory)/pycalendar
 PyGreSQL-4.0::          $(BuildDirectory)/PyGreSQL-4.0
 sqlparse-0.1.2::        $(BuildDirectory)/sqlparse-0.1.2
+setproctitle-1.1.6::	$(BuildDirectory)/setproctitle-1.1.6
 $(Project)::            $(BuildDirectory)/$(Project)
 
-build:: PyKerberos pycalendar PyGreSQL-4.0 sqlparse-0.1.2 $(Project)
+build:: PyKerberos pycalendar PyGreSQL-4.0 sqlparse-0.1.2 setproctitle-1.1.6 $(Project)
 
 setup:
 	$(_v) ./run -g
 
-prep:: setup CalDAVTester.tgz PyKerberos.tgz pycalendar.tgz PyGreSQL-4.0.tgz sqlparse-0.1.2.tgz
+prep:: setup CalDAVTester.tgz PyKerberos.tgz pycalendar.tgz PyGreSQL-4.0.tgz sqlparse-0.1.2.tgz setproctitle-1.1.6.tgz
 
-PyKerberos pycalendar PyGreSQL-4.0 sqlparse-0.1.2 $(Project)::
+PyKerberos pycalendar PyGreSQL-4.0 sqlparse-0.1.2 setproctitle-1.1.6 $(Project)::
 	@echo "Building $@..."
 	$(_v) cd $(BuildDirectory)/$@ && $(Environment) $(PYTHON) setup.py build
 
 install:: build
-	$(_v) cd $(BuildDirectory)/$(Project)       && $(Environment) $(PYTHON) setup.py build_ext $(CS_BUILD_EXT_FLAGS) install $(PY_INSTALL_FLAGS) $(CS_INSTALL_FLAGS)
-	$(_v) cd $(BuildDirectory)/PyKerberos       && $(Environment) $(PYTHON) setup.py install $(PY_INSTALL_FLAGS)
-	$(_v) cd $(BuildDirectory)/pycalendar       && $(Environment) $(PYTHON) setup.py install $(PY_INSTALL_FLAGS)
-	$(_v) cd $(BuildDirectory)/PyGreSQL-4.0     && $(Environment) $(PYTHON) setup.py install $(PY_INSTALL_FLAGS)
-	$(_v) cd $(BuildDirectory)/sqlparse-0.1.2   && $(Environment) $(PYTHON) setup.py install $(PY_INSTALL_FLAGS)
+	$(_v) cd $(BuildDirectory)/$(Project)         && $(Environment) $(PYTHON) setup.py build_ext $(CS_BUILD_EXT_FLAGS) install $(PY_INSTALL_FLAGS) $(CS_INSTALL_FLAGS)
+	$(_v) cd $(BuildDirectory)/PyKerberos         && $(Environment) $(PYTHON) setup.py install $(PY_INSTALL_FLAGS)
+	$(_v) cd $(BuildDirectory)/pycalendar         && $(Environment) $(PYTHON) setup.py install $(PY_INSTALL_FLAGS)
+	$(_v) cd $(BuildDirectory)/PyGreSQL-4.0       && $(Environment) $(PYTHON) setup.py install $(PY_INSTALL_FLAGS)
+	$(_v) cd $(BuildDirectory)/sqlparse-0.1.2     && $(Environment) $(PYTHON) setup.py install $(PY_INSTALL_FLAGS)
+	$(_v) cd $(BuildDirectory)/setproctitle-1.1.6 && $(Environment) $(PYTHON) setup.py install $(PY_INSTALL_FLAGS)
 	$(_v) for so in $$(find "$(DSTROOT)$(PY_HOME)/lib" -type f -name '*.so'); do $(STRIP) -Sx "$${so}"; done 
 	$(_v) $(INSTALL_DIRECTORY) "$(DSTROOT)$(SIPP)$(ETCDIR)$(CALDAVDSUBDIR)"
 	$(_v) $(INSTALL_FILE) "$(Sources)/conf/caldavd-apple.plist" "$(DSTROOT)$(SIPP)$(ETCDIR)$(CALDAVDSUBDIR)/caldavd.plist"

Modified: CalendarServer/branches/users/gaya/inviteclean/support/build.sh
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/support/build.sh	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/support/build.sh	2012-08-30 22:48:54 UTC (rev 9757)
@@ -743,7 +743,7 @@
     "${pypi}/p/python-ldap/${ld}.tar.gz";
 
   # XXX actually PyCalendar should be imported in-place.
-  py_dependency -fe -i "src" -r 209 \
+  py_dependency -fe -i "src" -r 211 \
     "pycalendar" "pycalendar" "pycalendar" \
     "http://svn.mulberrymail.com/repos/PyCalendar/branches/server";
 

Modified: CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/adbapi2.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/adbapi2.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/adbapi2.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -484,7 +484,53 @@
 
 
 
-class _SingleTxn(proxyForInterface(iface=IAsyncTransaction,
+class _HookableOperation(object):
+
+    def __init__(self):
+        self._hooks = []
+
+
+    @inlineCallbacks
+    def runHooks(self, ignored):
+        """
+        Callback for C{commit} and C{abort} Deferreds.
+        """
+        for operation in self._hooks:
+            yield operation()
+        returnValue(ignored)
+
+
+    def addHook(self, operation):
+        """
+        Implement L{IAsyncTransaction.postCommit}.
+        """
+        self._hooks.append(operation)
+
+
+
+class _CommitAndAbortHooks(object):
+    """
+    Shared implementation of post-commit and post-abort hooks.
+    """
+    # FIXME: this functionality needs direct tests, although it's pretty well-
+    # covered by txdav's test suite.
+
+    def __init__(self):
+        self._commit = _HookableOperation()
+        self._abort = _HookableOperation()
+
+
+    def postCommit(self, operation):
+        return self._commit.addHook(operation)
+
+
+    def postAbort(self, operation):
+        return self._abort.addHook(operation)
+
+
+
+class _SingleTxn(_CommitAndAbortHooks,
+                 proxyForInterface(iface=IAsyncTransaction,
                                    originalAttribute='_baseTxn')):
     """
     A L{_SingleTxn} is a single-use wrapper for the longer-lived
@@ -505,6 +551,7 @@
     """
 
     def __init__(self, pool, baseTxn):
+        super(_SingleTxn, self).__init__()
         self._pool           = pool
         self._baseTxn        = baseTxn
         self._completed      = False
@@ -601,9 +648,9 @@
             # We're in the process of executing a block of commands.  Wait until
             # they're done.  (Commit will be repeated in _checkNextBlock.)
             return self._blockedQueue.commit()
-
         self._markComplete()
-        return super(_SingleTxn, self).commit()
+        return (super(_SingleTxn, self).commit()
+                .addCallback(self._commit.runHooks))
 
 
     def abort(self):
@@ -611,6 +658,7 @@
         result = super(_SingleTxn, self).abort()
         if self in self._pool._waiting:
             self._stopWaiting()
+        result.addCallback(self._abort.runHooks)
         return result
 
 
@@ -750,15 +798,40 @@
 
 
 class _ConnectingPseudoTxn(object):
+    """
+    This is a pseudo-Transaction for bookkeeping purposes.
 
+    When a connection has asked to connect, but has not yet completed
+    connecting, the L{ConnectionPool} still needs a way to shut it down.  This
+    object provides that tracking handle, and will be present in the pool's
+    C{busy} list while it is populating the list.
+    """
+
     _retry = None
 
     def __init__(self, pool, holder):
-        self._pool   = pool
-        self._holder = holder
+        """
+        Initialize the L{_ConnectingPseudoTxn}; get ready to connect.
 
+        @param pool: The pool that this connection attempt is participating in.
+        @type pool: L{ConnectionPool}
 
+        @param holder: the L{ThreadHolder} allocated to this connection attempt
+            and subsequent SQL executions for this connection.
+        @type holder: L{ThreadHolder}
+        """
+        self._pool    = pool
+        self._holder  = holder
+        self._aborted = False
+
+
     def abort(self):
+        """
+        Ignore the result of attempting to connect to this database, and
+        instead simply close the connection and free the L{ThreadHolder}
+        allocated for it.
+        """
+        self._aborted = True
         if self._retry is not None:
             self._retry.cancel()
         d = self._holder.stop()
@@ -852,6 +925,7 @@
         L{IReactorThreads} users, the connection work units are very long-lived
         and block until this service has been stopped.
         """
+        super(ConnectionPool, self).startService()
         tp = self.reactor.getThreadPool()
         self.reactor.suggestThreadPoolSize(tp.max + self.maxConnections)
 
@@ -862,6 +936,7 @@
         Forcibly abort any outstanding transactions, and release all resources
         (notably, threads).
         """
+        super(ConnectionPool, self).stopService()
         self._stopping = True
 
         # Phase 1: Cancel any transactions that are waiting so they won't try to
@@ -951,6 +1026,8 @@
             cursor     = connection.cursor()
             return (connection, cursor)
         def finishInit((connection, cursor)):
+            if txn._aborted:
+                return
             baseTxn = _ConnectedTxn(
                 pool=self,
                 threadHolder=holder,
@@ -1187,8 +1264,8 @@
         Initialize a mapping of transaction IDs to transaction objects.
         """
         super(ConnectionPoolConnection, self).__init__()
-        self.pool  = pool
-        self._txns = {}
+        self.pool    = pool
+        self._txns   = {}
         self._blocks = {}
 
 
@@ -1405,7 +1482,7 @@
 
 
 
-class _NetTransaction(object):
+class _NetTransaction(_CommitAndAbortHooks):
     """
     A L{_NetTransaction} is an L{AMP}-protocol-based provider of the
     L{IAsyncTransaction} interface.  It sends SQL statements, query results, and
@@ -1419,6 +1496,7 @@
         Initialize a transaction with a L{ConnectionPoolClient} and a unique
         transaction identifier.
         """
+        super(_NetTransaction, self).__init__()
         self._client        = client
         self._transactionID = transactionID
         self._completed     = False
@@ -1476,11 +1554,12 @@
         def done(whatever):
             self._committed = True
             return whatever
-        return self._complete(Commit).addBoth(done)
+        return (self._complete(Commit).addBoth(done)
+                .addCallback(self._commit.runHooks))
 
 
     def abort(self):
-        return self._complete(Abort)
+        return self._complete(Abort).addCallback(self._abort.runHooks)
 
 
     def commandBlock(self):
@@ -1523,7 +1602,6 @@
         return self._transaction.dialect
 
 
-
     def execSQL(self, sql, args=None, raiseOnZeroRowCount=None):
         """
         Execute some SQL on this command block.

Modified: CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/parseschema.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/parseschema.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/parseschema.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -389,6 +389,10 @@
                     self.table.checkConstraint(self.readExpression(self.next()))
                 elif val.match(Keyword, 'DEFAULT'):
                     theDefault = self.next()
+                    if isinstance(theDefault, Parenthesis):
+                        iDefault = iterSignificant(theDefault)
+                        expect(iDefault, ttype=Punctuation, value="(")
+                        theDefault = iDefault.next()
                     if isinstance(theDefault, Function):
                         thingo = theDefault.tokens[0].get_name()
                         parens = expectSingle(

Copied: CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/record.py (from rev 9756, CalendarServer/trunk/twext/enterprise/dal/record.py)
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/record.py	                        (rev 0)
+++ CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/record.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -0,0 +1,347 @@
+# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
+##
+# Copyright (c) 2012 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+"""
+RECORD: Relational Entity Creation from Objects Representing Data.
+
+This is an asynchronous object-relational mapper based on
+L{twext.enterprise.dal.syntax}.
+"""
+
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twext.enterprise.dal.syntax import (
+    Select, Tuple, Constant, ColumnSyntax, Insert, Update, Delete
+)
+# from twext.enterprise.dal.syntax import ExpressionSyntax
+
+class ReadOnly(AttributeError):
+    """
+    A caller attempted to set an attribute on a database-backed record, rather
+    than updating it through L{Record.update}.
+    """
+
+    def __init__(self, className, attributeName):
+        self.className = className
+        self.attributeName = attributeName
+        super(ReadOnly, self).__init__("SQL-backed attribute '{0}.{1}' is "
+                                       "read-only. Use '.update(...)' to "
+                                       "modify attributes."
+                                       .format(className, attributeName))
+
+
+
+class NoSuchRecord(Exception):
+    """
+    No matching record could be found.
+    """
+
+
+class _RecordMeta(type):
+    """
+    Metaclass for associating a L{fromTable} with a L{Record} at inheritance
+    time.
+    """
+
+    def __new__(cls, name, bases, ns):
+        """
+        Create a new instance of this meta-type.
+        """
+        newbases = []
+        table = None
+        namer = None
+        for base in bases:
+            if isinstance(base, fromTable):
+                if table is not None:
+                    raise RuntimeError(
+                        "Can't define a class from two or more tables at once."
+                    )
+                table = base.table
+            elif getattr(base, "table", None) is not None:
+                raise RuntimeError(
+                    "Can't define a record class by inheriting one already "
+                    "mapped to a table."
+                    # TODO: more info
+                )
+            else:
+                if namer is None:
+                    if isinstance(base, _RecordMeta):
+                        namer = base
+                newbases.append(base)
+        if table is not None:
+            attrmap = {}
+            colmap = {}
+            allColumns = list(table)
+            for column in allColumns:
+                attrname = namer.namingConvention(column.model.name)
+                attrmap[attrname] = column
+                colmap[column] = attrname
+            ns.update(table=table, __attrmap__=attrmap, __colmap__=colmap)
+            ns.update(attrmap)
+        return super(_RecordMeta, cls).__new__(cls, name, tuple(newbases), ns)
+
+
+
+class fromTable(object):
+    """
+    Inherit from this after L{Record} to specify which table your L{Record}
+    subclass is mapped to.
+    """
+
+    def __init__(self, aTable):
+        """
+        @param table: The table to map to.
+        @type table: L{twext.enterprise.dal.syntax.TableSyntax}
+        """
+        self.table = aTable
+
+
+
+class Record(object):
+    """
+    Superclass for all database-backed record classes.  (i.e.  an object mapped
+    from a database record).
+
+    @cvar table: the table that represents this L{Record} in the database.
+    @type table: L{TableSyntax}
+
+    @ivar transaction: The L{IAsyncTransaction} where this record is being
+        loaded.  This may be C{None} if this L{Record} is not participating in
+        a transaction, which may be true if it was instantiated but never
+        saved.
+
+    @cvar __colmap__: map of L{ColumnSyntax} objects to attribute names.
+    @type __colmap__: L{dict}
+
+    @cvar __attrmap__: map of attribute names to L{ColumnSyntax} objects.
+    @type __attrmap__: L{dict}
+    """
+
+    __metaclass__ = _RecordMeta
+
+    transaction = None
+    def __setattr__(self, name, value):
+        """
+        Once the transaction is initialized, this object is immutable.  If you
+        want to change it, use L{Record.update}.
+        """
+        if self.transaction is not None:
+            raise ReadOnly(self.__class__.__name__, name)
+        return super(Record, self).__setattr__(name, value)
+
+
+    @staticmethod
+    def namingConvention(columnName):
+        """
+        Implement the convention for naming-conversion between column names
+        (typically, upper-case database names map to lower-case attribute
+        names).
+        """
+        words = columnName.lower().split("_")
+        def cap(word):
+            if word.lower() == 'id':
+                return word.upper()
+            else:
+                return word.capitalize()
+        return words[0] + "".join(map(cap, words[1:]))
+
+
+    @classmethod
+    def _primaryKeyExpression(cls):
+        return Tuple([ColumnSyntax(c) for c in cls.table.model.primaryKey])
+
+
+    def _primaryKeyValue(self):
+        val = []
+        for col in self._primaryKeyExpression().columns:
+            val.append(getattr(self, self.__class__.__colmap__[col]))
+        return val
+
+
+    @classmethod
+    def _primaryKeyComparison(cls, primaryKey):
+        return (cls._primaryKeyExpression() ==
+                Tuple(map(Constant, primaryKey)))
+
+
+    @classmethod
+    @inlineCallbacks
+    def load(cls, transaction, *primaryKey):
+        self = (yield cls.query(transaction,
+                                cls._primaryKeyComparison(primaryKey)))[0]
+        returnValue(self)
+
+
+    @classmethod
+    @inlineCallbacks
+    def create(cls, transaction, **k):
+        """
+        Create a row.
+
+        Used like this::
+
+            MyRecord.create(transaction, column1=1, column2=u'two')
+        """
+        self = cls()
+        colmap = {}
+        attrtocol = cls.__attrmap__
+        needsCols = []
+        needsAttrs = []
+
+        for attr in attrtocol:
+            col = attrtocol[attr]
+            if attr in k:
+                setattr(self, attr, k[attr])
+                colmap[col] = k.pop(attr)
+            else:
+                if col.model.needsValue():
+                    raise TypeError("required attribute " + repr(attr) +
+                                    " not passed")
+                else:
+                    needsCols.append(col)
+                    needsAttrs.append(attr)
+        if k:
+            raise TypeError("received unknown attribute{0}: {1}".format(
+                "s" if len(k) > 1 else "", ", ".join(sorted(k))
+            ))
+        result = yield (Insert(colmap, Return=needsCols if needsCols else None)
+                        .on(transaction))
+        if needsCols:
+            for neededAttr, neededValue in zip(needsAttrs, result[0]):
+                setattr(self, neededAttr, neededValue)
+        self.transaction = transaction
+        returnValue(self)
+
+
+    def delete(self):
+        """
+        Delete this row from the database.
+
+        @return: a L{Deferred} which fires when the underlying row has been
+            deleted.
+        """
+        return Delete(From=self.table,
+                      Where=self._primaryKeyComparison(self._primaryKeyValue())
+                      ).on(self.transaction)
+
+
+    @inlineCallbacks
+    def update(self, **kw):
+        """
+        Modify the given attributes in the database.
+
+        @return: a L{Deferred} that fires when the updates have been sent to
+            the database.
+        """
+        colmap = {}
+        for k, v in kw.iteritems():
+            colmap[self.__attrmap__[k]] = v
+        yield (Update(colmap,
+                      Where=self._primaryKeyComparison(self._primaryKeyValue()))
+                .on(self.transaction))
+        self.__dict__.update(kw)
+
+
+    @classmethod
+    def pop(cls, transaction, *primaryKey):
+        """
+        Atomically retrieve and remove a row from this L{Record}'s table
+        with a primary key value of C{primaryKey}.
+
+        @return: a L{Deferred} that fires with an instance of C{cls}, or fails
+            with L{NoSuchRecord} if there were no records in the database.
+        @rtype: L{Deferred}
+        """
+        return cls._rowsFromQuery(
+            transaction, Delete(Where=cls._primaryKeyComparison(primaryKey),
+                        From=cls.table, Return=list(cls.table)),
+            lambda : NoSuchRecord()
+        ).addCallback(lambda x: x[0])
+
+
+    @classmethod
+    def query(cls, transaction, expr, order=None, ascending=True):
+        """
+        Query the table that corresponds to C{cls}, and return instances of
+        C{cls} corresponding to the rows that are returned from that table.
+
+        @param expr: An L{ExpressionSyntax} that constraints the results of the
+            query.  This is most easily produced by accessing attributes on the
+            class; for example, C{MyRecordType.query((MyRecordType.col1 >
+            MyRecordType.col2).And(MyRecordType.col3 == 7))}
+
+        @param order: A L{ColumnSyntax} to order the resulting record objects
+            by.
+
+        @param ascending: A boolean; if C{order} is not C{None}, whether to
+            sort in ascending or descending order.
+        """
+        kw = {}
+        if order is not None:
+            kw.update(OrderBy=order, Ascending=ascending)
+        return cls._rowsFromQuery(transaction, Select(list(cls.table),
+                                                      From=cls.table,
+                                                      Where=expr, **kw), None)
+
+
+    @classmethod
+    def all(cls, transaction):
+        """
+        Load all rows from the table that corresponds to C{cls} and return
+        instances of C{cls} corresponding to all.
+        """
+        return cls._rowsFromQuery(transaction,
+                                  Select(list(cls.table),
+                                         From=cls.table,
+                                         OrderBy=cls._primaryKeyExpression()),
+                                  None)
+
+
+    @classmethod
+    @inlineCallbacks
+    def _rowsFromQuery(cls, transaction, qry, rozrc):
+        """
+        Execute the given query, and transform its results into rows.
+
+        @param transaction: an L{IAsyncTransaction} to execute the query on.
+
+        @param qry: a L{_DMLStatement} (XXX: maybe _DMLStatement or some
+            interface that defines 'on' should be public?) whose results are
+            the list of columns in C{self.table}.
+
+        @param rozrc: The C{raiseOnZeroRowCount} argument.
+
+        @return: a L{Deferred} that succeeds with a C{list} or fails with an
+            exception produced by C{rozrc}.
+        """
+        rows = yield qry.on(transaction, raiseOnZeroRowCount=rozrc)
+        selves = []
+        for row in rows:
+            self = cls()
+            for (column, value) in zip(list(cls.table), row):
+                name = cls.__colmap__[column]
+                setattr(self, name, value)
+            self.transaction = transaction
+            selves.append(self)
+        returnValue(selves)
+
+
+
+__all__ = [
+    "ReadOnly",
+    "fromTable",
+    "NoSuchRecord",
+]

Modified: CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/syntax.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/syntax.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/syntax.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -27,11 +27,14 @@
 
 from twisted.internet.defer import succeed
 
-from twext.enterprise.dal.model import Schema, Table, Column, Sequence
-from twext.enterprise.ienterprise import POSTGRES_DIALECT, ORACLE_DIALECT
-from twext.enterprise.ienterprise import IDerivedParameter
+from twext.enterprise.dal.model import Schema, Table, Column, Sequence, SQLType
+from twext.enterprise.ienterprise import (
+    POSTGRES_DIALECT, ORACLE_DIALECT, SQLITE_DIALECT, IDerivedParameter
+)
 from twext.enterprise.util import mapOracleOutputType
 
+from twisted.internet.defer import inlineCallbacks, returnValue
+
 try:
     import cx_Oracle
     cx_Oracle
@@ -40,16 +43,18 @@
 
 class DALError(Exception):
     """
-    Base class for exceptions raised by this module. This can be raised directly for
-    API violations. This exception represents a serious programming error and should
-    normally never be caught or ignored.
+    Base class for exceptions raised by this module.  This can be raised
+    directly for API violations.  This exception represents a serious
+    programming error and should normally never be caught or ignored.
     """
 
+
+
 class QueryPlaceholder(object):
     """
     Representation of the placeholders required to generate some SQL, for a
-    single statement.  Contains information necessary
-    to generate place holder strings based on the database dialect.
+    single statement.  Contains information necessary to generate place holder
+    strings based on the database dialect.
     """
 
     def placeholder(self):
@@ -192,14 +197,13 @@
 
         @param txn: the L{IAsyncTransaction} to execute this on.
 
-        @param raiseOnZeroRowCount: the exception to raise if no data was
-            affected or returned by this query.
+        @param raiseOnZeroRowCount: a 0-argument callable which returns an
+            exception to raise if the executed SQL does not affect any rows.
 
         @param kw: keyword arguments, mapping names of L{Parameter} objects
             located somewhere in C{self}
 
         @return: results from the database.
-
         @rtype: a L{Deferred} firing a C{list} of records (C{tuple}s or
             C{list}s)
         """
@@ -445,6 +449,7 @@
 Len = Function("character_length", "length")
 Upper = Function("upper")
 Lower = Function("lower")
+_sqliteLastInsertRowID = Function("last_insert_rowid")
 
 # Use a specific value here for "the convention for case-insensitive values in
 # the database" so we don't need to keep remembering whether it's upper or
@@ -559,7 +564,14 @@
         integer, baz integer)', 'schemaSyntax.foo.bar' and
         'schemaSyntax.foo.baz'
         """
-        return ColumnSyntax(self.model.columnNamed(attr))
+        try:
+            column = self.model.columnNamed(attr)
+        except KeyError:
+            raise AttributeError("table {0} has no column {1}".format(
+                self.model.name, attr
+            ))
+        else:
+            return ColumnSyntax(column)
 
 
     def __iter__(self):
@@ -602,7 +614,7 @@
     def __contains__(self, columnSyntax):
         if isinstance(columnSyntax, FunctionInvocation):
             columnSyntax = columnSyntax.arg
-        return (columnSyntax.model in self.model.columns)
+        return (columnSyntax.model.table is self.model)
 
 
 
@@ -917,11 +929,14 @@
 
 
 
-class _AllColumns(object):
+class _AllColumns(NamedValue):
 
-    def subSQL(self, queryGenerator, allTables):
-        return SQLFragment('*')
+    def __init__(self):
+        self.name = "*"
 
+    def allColumns(self):
+        return []
+
 ALL_COLUMNS = _AllColumns()
 
 
@@ -945,19 +960,39 @@
 
 
 
-def _columnsMatchTables(columns, tables):
+def _checkColumnsMatchTables(columns, tables):
+    """
+    Verify that the given C{columns} match the given C{tables}; that is, that
+    every L{TableSyntax} referenced by every L{ColumnSyntax} referenced by
+    every L{ExpressionSyntax} in the given C{columns} list is present in the
+    given C{tables} list.
+
+    @param columns: a L{list} of L{ExpressionSyntax}, each of which references
+        some set of L{ColumnSyntax}es via its C{allColumns} method.
+
+    @param tables: a L{list} of L{TableSyntax}
+
+    @return: L{None}
+    @rtype: L{NoneType}
+
+    @raise TableMismatch: if any table referenced by a column is I{not} found
+        in C{tables}
+    """
     for expression in columns:
         for column in expression.allColumns():
             for table in tables:
                 if column in table:
                     break
             else:
-                return False
-    return True
+                raise TableMismatch("{} not found in {}".format(
+                    column, tables
+                ))
+    return None
 
 
-class Tuple(object):
 
+class Tuple(ExpressionSyntax):
+
     def __init__(self, columns):
         self.columns = columns
 
@@ -1064,8 +1099,7 @@
         if columns is None:
             columns = ALL_COLUMNS
         else:
-            if not _columnsMatchTables(columns, From.tables()):
-                raise TableMismatch()
+            _checkColumnsMatchTables(columns, From.tables())
             columns = _SomeColumns(columns)
         self.columns = columns
         
@@ -1263,12 +1297,11 @@
 
     def _returningClause(self, queryGenerator, stmt, allTables):
         """
-        Add a dialect-appropriate 'returning' clause to the end of the given SQL
-        statement.
+        Add a dialect-appropriate 'returning' clause to the end of the given
+        SQL statement.
 
-        @param queryGenerator: describes the database we are generating the statement
-            for.
-
+        @param queryGenerator: describes the database we are generating the
+            statement for.
         @type queryGenerator: L{QueryGenerator}
 
         @param stmt: the SQL fragment generated without the 'returning' clause
@@ -1280,9 +1313,14 @@
         @return: the C{stmt} parameter.
         """
         retclause = self.Return
+        if retclause is None:
+            return stmt
         if isinstance(retclause, (tuple, list)):
             retclause = _CommaList(retclause)
-        if retclause is not None:
+        if queryGenerator.dialect == SQLITE_DIALECT:
+            # sqlite does this another way.
+            return stmt
+        elif retclause is not None:
             stmt.text += ' returning '
             stmt.append(retclause.subSQL(queryGenerator, allTables))
             if queryGenerator.dialect == ORACLE_DIALECT:
@@ -1410,7 +1448,27 @@
         return self._returningClause(queryGenerator, stmt, allTables)
 
 
+    def on(self, txn, *a, **kw):
+        """
+        Override to provide extra logic for L{Insert}s that return values on
+        databases that don't provide return values as part of their C{INSERT}
+        behavior.
+        """
+        result = super(_DMLStatement, self).on(txn, *a, **kw)
+        if self.Return is not None and txn.dialect == SQLITE_DIALECT:
+            table = self._returnAsList()[0].model.table
+            return Select(self._returnAsList(),
+                   # TODO: error reporting when 'return' includes columns
+                   # foreign to the primary table.
+                   From=TableSyntax(table),
+                   Where=ColumnSyntax(Column(table, "rowid",
+                                             SQLType("integer", None))) ==
+                         _sqliteLastInsertRowID()
+                   ).on(txn, *a, **kw)
+        return result
 
+
+
 def _convert(x):
     """
     Convert a value to an appropriate SQL AST node.  (Currently a simple
@@ -1426,6 +1484,12 @@
 class Update(_DMLStatement):
     """
     'update' statement
+
+    @ivar columnMap: A L{dict} mapping L{ColumnSyntax} objects to values to
+        change; values may be simple database values (such as L{str},
+        L{unicode}, L{datetime.datetime}, L{float}, L{int} etc) or L{Parameter}
+        instances.
+    @type columnMap: L{dict}
     """
 
     def __init__(self, columnMap, Where, Return=None):
@@ -1436,6 +1500,37 @@
         self.Return = Return
 
 
+    @inlineCallbacks
+    def on(self, txn, *a, **kw):
+        """
+        Override to provide extra logic for L{Update}s that return values on
+        databases that don't provide return values as part of their C{UPDATE}
+        behavior.
+        """
+        doExtra = self.Return is not None and txn.dialect == SQLITE_DIALECT
+        upcall = lambda: super(_DMLStatement, self).on(txn, *a, **kw)
+
+        if doExtra:
+            table = self._returnAsList()[0].model.table
+            rowidcol = ColumnSyntax(Column(table, "rowid",
+                                           SQLType("integer", None)))
+            prequery = Select([rowidcol], From=TableSyntax(table),
+                              Where=self.Where)
+            preresult = prequery.on(txn, *a, **kw)
+            before = yield preresult
+            yield upcall()
+            result = (yield Select(self._returnAsList(),
+                            # TODO: error reporting when 'return' includes
+                            # columns foreign to the primary table.
+                            From=TableSyntax(table),
+                            Where=reduce(lambda left, right: left.Or(right),
+                                         ((rowidcol == x) for [x] in before))
+                            ).on(txn, *a, **kw))
+            returnValue(result)
+        else:
+            returnValue((yield upcall()))
+
+
     def _toSQL(self, queryGenerator):
         """
         @return: a 'insert' statement with placeholders and arguments
@@ -1459,7 +1554,7 @@
                     for (c, v) in sortedColumns]
             )
         )
-        result.append(SQLFragment( ' where '))
+        result.append(SQLFragment(' where '))
         result.append(self.Where.subSQL(queryGenerator, allTables))
         return self._returningClause(queryGenerator, result, allTables)
 
@@ -1490,7 +1585,19 @@
         return self._returningClause(queryGenerator, result, allTables)
 
 
+    @inlineCallbacks
+    def on(self, txn, *a, **kw):
+        upcall = lambda: super(Delete, self).on(txn, *a, **kw)
+        if txn.dialect == SQLITE_DIALECT and self.Return is not None:
+            result = yield Select(self._returnAsList(), From=self.From,
+                                  Where=self.Where).on(txn, *a, **kw)
+            yield upcall()
+        else:
+            result = yield upcall()
+        returnValue(result)
 
+
+
 class _LockingStatement(_Statement):
     """
     A statement related to lock management, which implicitly has no results.

Modified: CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/test/test_parseschema.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/test/test_parseschema.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/test/test_parseschema.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -129,6 +129,25 @@
                           False)
 
 
+    def test_sequenceDefaultWithParens(self):
+        """
+        SQLite requires 'default' expression to be in parentheses, and that
+        should be equivalent on other databases; we should be able to parse
+        that too.
+        """
+        s = self.schemaFromString(
+            """
+            create sequence alpha;
+            create table foo (
+                bar integer default (nextval('alpha')) not null,
+                qux integer not null
+            );
+            """
+        )
+        self.assertEquals(s.tableNamed("foo").columnNamed("bar").needsValue(),
+                          False)
+
+
     def test_defaultConstantColumns(self):
         """
         Parsing a 'default' column with an appropriate type in it will return

Copied: CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/test/test_record.py (from rev 9756, CalendarServer/trunk/twext/enterprise/dal/test/test_record.py)
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/test/test_record.py	                        (rev 0)
+++ CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/test/test_record.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -0,0 +1,291 @@
+##
+# Copyright (c) 2012 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+"""
+Test cases for L{twext.enterprise.dal.record}.
+"""
+
+from twisted.internet.defer import inlineCallbacks
+
+from twisted.trial.unittest import TestCase
+
+from twext.enterprise.dal.record import (
+    Record, fromTable, ReadOnly, NoSuchRecord
+)
+from twext.enterprise.dal.test.test_parseschema import SchemaTestHelper
+from twext.enterprise.dal.syntax import SchemaSyntax
+from twisted.internet.defer import gatherResults
+from twext.enterprise.fixtures import buildConnectionPool
+
+# from twext.enterprise.dal.syntax import
+
+
+sth = SchemaTestHelper()
+sth.id = lambda : __name__
+schemaString = """
+create table ALPHA (BETA integer primary key, GAMMA text);
+create table DELTA (PHI integer primary key default (nextval('myseq')),
+                    EPSILON text not null);
+"""
+
+# sqlite can be made to support nextval() as a function, but 'create sequence'
+# is syntax and can't.
+parseableSchemaString = """
+create sequence myseq;
+""" + schemaString
+
+testSchema = SchemaSyntax(sth.schemaFromString(parseableSchemaString))
+
+
+
+class TestRecord(Record, fromTable(testSchema.ALPHA)):
+    """
+    A sample test record.
+    """
+
+
+
+class TestAutoRecord(Record, fromTable(testSchema.DELTA)):
+    """
+    A sample test record with default values specified.
+    """
+
+
+
+class TestCRUD(TestCase):
+    """
+    Tests for creation, mutation, and deletion operations.
+    """
+
+    def setUp(self):
+        self.pool = buildConnectionPool(self, schemaString)
+
+
+    @inlineCallbacks
+    def test_simpleLoad(self):
+        """
+        Loading an existing row from the database by its primary key will
+        populate its attributes from columns of the corresponding row in the
+        database.
+        """
+        txn = self.pool.connection()
+        yield txn.execSQL("insert into ALPHA values (:1, :2)", [234, "one"])
+        yield txn.execSQL("insert into ALPHA values (:1, :2)", [456, "two"])
+        rec = yield TestRecord.load(txn, 456)
+        self.assertIsInstance(rec, TestRecord)
+        self.assertEquals(rec.beta, 456)
+        self.assertEquals(rec.gamma, "two")
+        rec2 = yield TestRecord.load(txn, 234)
+        self.assertIsInstance(rec2, TestRecord)
+        self.assertEqual(rec2.beta, 234)
+        self.assertEqual(rec2.gamma, "one")
+
+
+    @inlineCallbacks
+    def test_simpleCreate(self):
+        """
+        When a record object is created, a row with matching column values will
+        be created in the database.
+        """
+        txn = self.pool.connection()
+        rec = yield TestRecord.create(txn, beta=3, gamma=u'epsilon')
+        self.assertEquals(rec.beta, 3)
+        self.assertEqual(rec.gamma, u'epsilon')
+        rows = yield txn.execSQL("select BETA, GAMMA from ALPHA")
+        self.assertEqual(rows, [tuple([3, u'epsilon'])])
+
+
+    @inlineCallbacks
+    def test_simpleDelete(self):
+        """
+        When a record object is deleted, a row with a matching primary key will
+        be created in the database.
+        """
+        txn = self.pool.connection()
+        def mkrow(beta, gamma):
+            return txn.execSQL("insert into ALPHA values (:1, :2)",
+                               [beta, gamma])
+        yield gatherResults([mkrow(123, u"one"), mkrow(234, u"two"),
+                             mkrow(345, u"three")])
+        tr = yield TestRecord.load(txn, 234)
+        yield tr.delete()
+        rows = yield txn.execSQL("select BETA, GAMMA from ALPHA order by BETA")
+        self.assertEqual(rows, [(123, u"one"), (345, u"three")])
+
+
+    @inlineCallbacks
+    def test_cantCreateWithoutRequiredValues(self):
+        """
+        When a L{Record} object is created without required values, it raises a
+        L{TypeError}.
+        """
+        txn = self.pool.connection()
+        te = yield self.failUnlessFailure(TestAutoRecord.create(txn),
+                                          TypeError)
+        self.assertIn("required attribute 'epsilon' not passed", str(te))
+
+
+    @inlineCallbacks
+    def test_tooManyAttributes(self):
+        """
+        When a L{Record} object is created with unknown attributes (those which
+        don't map to any column), it raises a L{TypeError}.
+        """
+        txn = self.pool.connection()
+        te = yield self.failUnlessFailure(TestRecord.create(
+                                        txn, beta=3, gamma=u'three',
+                                        extraBonusAttribute=u'nope',
+                                        otherBonusAttribute=4321,
+                                    ), TypeError)
+        self.assertIn("extraBonusAttribute, otherBonusAttribute", str(te))
+
+
+    @inlineCallbacks
+    def test_createFillsInPKey(self):
+        """
+        If L{Record.create} is called without an auto-generated primary key
+        value for its row, that value will be generated and set on the returned
+        object.
+        """
+        txn = self.pool.connection()
+        tr = yield TestAutoRecord.create(txn, epsilon=u'specified')
+        tr2 = yield TestAutoRecord.create(txn, epsilon=u'also specified')
+        self.assertEquals(tr.phi, 1)
+        self.assertEquals(tr2.phi, 2)
+
+
+    @inlineCallbacks
+    def test_attributesArentMutableYet(self):
+        """
+        Changing attributes on a database object is not supported yet, because
+        it's not entirely clear when to flush the SQL to the database.
+        Instead, for the time being, use C{.update}.  When you attempt to set
+        an attribute, an error will be raised informing you of this fact, so
+        that the error is clear.
+        """
+        txn = self.pool.connection()
+        rec = yield TestRecord.create(txn, beta=7, gamma=u'what')
+        def setit():
+            rec.beta = 12
+        ro = self.assertRaises(ReadOnly, setit)
+        self.assertEqual(rec.beta, 7)
+        self.assertIn("SQL-backed attribute 'TestRecord.beta' is read-only. "
+                      "Use '.update(...)' to modify attributes.", str(ro))
+
+
+    @inlineCallbacks
+    def test_simpleUpdate(self):
+        """
+        L{Record.update} will change the values on the record and in te
+        database.
+        """
+        txn = self.pool.connection()
+        rec = yield TestRecord.create(txn, beta=3, gamma=u'epsilon')
+        yield rec.update(gamma=u'otherwise')
+        self.assertEqual(rec.gamma, u'otherwise')
+        yield txn.commit()
+        # Make sure that it persists.
+        txn = self.pool.connection()
+        rec = yield TestRecord.load(txn, 3)
+        self.assertEqual(rec.gamma, u'otherwise')
+
+
+    @inlineCallbacks
+    def test_simpleQuery(self):
+        """
+        L{Record.query} will allow you to query for a record by its class
+        attributes as columns.
+        """
+        txn = self.pool.connection()
+        for beta, gamma in [(123, u"one"), (234, u"two"), (345, u"three"),
+                            (356, u"three"), (456, u"four")]:
+            yield txn.execSQL("insert into ALPHA values (:1, :2)",
+                              [beta, gamma])
+        records = yield TestRecord.query(txn, TestRecord.gamma == u"three")
+        self.assertEqual(len(records), 2)
+        records.sort(key=lambda x: x.beta)
+        self.assertEqual(records[0].beta, 345)
+        self.assertEqual(records[1].beta, 356)
+
+
+    @inlineCallbacks
+    def test_all(self):
+        """
+        L{Record.all} will return all instances of the record, sorted by
+        primary key.
+        """
+        txn = self.pool.connection()
+        data = [(123, u"one"), (456, u"four"), (345, u"three"),
+                (234, u"two"), (356, u"three")]
+        for beta, gamma in data:
+            yield txn.execSQL("insert into ALPHA values (:1, :2)",
+                              [beta, gamma])
+        self.assertEqual(
+            [(x.beta, x.gamma) for x in (yield TestRecord.all(txn))],
+            sorted(data)
+        )
+
+
+    @inlineCallbacks
+    def test_orderedQuery(self):
+        """
+        L{Record.query} takes an 'order' argument which will allow the objects
+        returned to be ordered.
+        """
+        txn = self.pool.connection()
+        for beta, gamma in [(123, u"one"), (234, u"two"), (345, u"three"),
+                            (356, u"three"), (456, u"four")]:
+            yield txn.execSQL("insert into ALPHA values (:1, :2)",
+                              [beta, gamma])
+
+        records = yield TestRecord.query(txn, TestRecord.gamma == u"three",
+                                         TestRecord.beta)
+        self.assertEqual([record.beta for record in records], [345, 356])
+        records = yield TestRecord.query(txn, TestRecord.gamma == u"three",
+                                         TestRecord.beta, ascending=False)
+        self.assertEqual([record.beta for record in records], [356, 345])
+
+
+    @inlineCallbacks
+    def test_pop(self):
+        """
+        A L{Record} may be loaded and deleted atomically, with L{Record.pop}.
+        """
+        txn = self.pool.connection()
+        for beta, gamma in [(123, u"one"), (234, u"two"), (345, u"three"),
+                            (356, u"three"), (456, u"four")]:
+            yield txn.execSQL("insert into ALPHA values (:1, :2)",
+                              [beta, gamma])
+        rec = yield TestRecord.pop(txn, 234)
+        self.assertEqual(rec.gamma, u'two')
+        self.assertEqual((yield txn.execSQL("select count(*) from ALPHA "
+                                            "where BETA = :1", [234])),
+                         [tuple([0])])
+        yield self.failUnlessFailure(TestRecord.pop(txn, 234), NoSuchRecord)
+
+
+    def test_columnNamingConvention(self):
+        """
+        The naming convention maps columns C{LIKE_THIS} to be attributes
+        C{likeThis}.
+        """
+        self.assertEqual(Record.namingConvention(u"like_this"), "likeThis")
+        self.assertEqual(Record.namingConvention(u"LIKE_THIS"), "likeThis")
+        self.assertEqual(Record.namingConvention(u"LIKE_THIS_ID"), "likeThisID")
+
+
+
+

Modified: CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/test/test_sqlsyntax.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/test/test_sqlsyntax.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/dal/test/test_sqlsyntax.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -25,19 +25,21 @@
     TableMismatch, Parameter, Max, Len, NotEnoughValues,
     Savepoint, RollbackToSavepoint, ReleaseSavepoint, SavepointAction,
     Union, Intersect, Except, SetExpression, DALError,
-    ResultAliasSyntax, Count, QueryGenerator)
+    ResultAliasSyntax, Count, QueryGenerator, ALL_COLUMNS)
 from twext.enterprise.dal.syntax import FixedPlaceholder, NumericPlaceholder
 from twext.enterprise.dal.syntax import Function
 from twext.enterprise.dal.syntax import SchemaSyntax
 from twext.enterprise.dal.test.test_parseschema import SchemaTestHelper
-from twext.enterprise.ienterprise import POSTGRES_DIALECT, ORACLE_DIALECT
+from twext.enterprise.ienterprise import (POSTGRES_DIALECT, ORACLE_DIALECT,
+                                          SQLITE_DIALECT)
 from twext.enterprise.test.test_adbapi2 import ConnectionPoolHelper
 from twext.enterprise.test.test_adbapi2 import NetworkedPoolHelper
-from twext.enterprise.test.test_adbapi2 import resultOf
+from twext.enterprise.test.test_adbapi2 import resultOf, AssertResultHelper
 from twisted.internet.defer import succeed
 from twisted.trial.unittest import TestCase
 
 
+
 class _FakeTransaction(object):
     """
     An L{IAsyncTransaction} that provides the relevant metadata for SQL
@@ -56,7 +58,42 @@
     TIMESTAMP = 'for timestamps!'
 
 
+class CatchSQL(object):
+    """
+    L{IAsyncTransaction} emulator that records the SQL executed on it.
+    """
+    counter = 0
 
+    def __init__(self, dialect=SQLITE_DIALECT, paramstyle='numeric'):
+        self.execed = []
+        self.pendingResults = []
+        self.dialect = SQLITE_DIALECT
+        self.paramstyle = 'numeric'
+
+
+    def nextResult(self, result):
+        """
+        Make it so that the next result from L{execSQL} will be the argument.
+        """
+        self.pendingResults.append(result)
+
+
+    def execSQL(self, sql, args, rozrc):
+        """
+        Implement L{IAsyncTransaction} by recording C{sql} and C{args} in
+        C{self.execed}, and return a L{Deferred} firing either an integer or a
+        value pre-supplied by L{CatchSQL.nextResult}.
+        """
+        self.execed.append([sql, args])
+        self.counter += 1
+        if self.pendingResults:
+            result = self.pendingResults.pop(0)
+        else:
+            result = self.counter
+        return succeed(result)
+
+
+
 class NullTestingOracleTxn(object):
     """
     Fake transaction for testing oracle NULL behavior.
@@ -92,7 +129,7 @@
 
 
 
-class GenerationTests(ExampleSchemaHelper, TestCase):
+class GenerationTests(ExampleSchemaHelper, TestCase, AssertResultHelper):
     """
     Tests for syntactic helpers to generate SQL queries.
     """
@@ -396,6 +433,24 @@
         )
 
 
+    def test_tableIteration(self):
+        """
+        Iterating a L{TableSyntax} iterates its columns, in the order that they
+        are defined.
+        """
+        self.assertEquals(list(self.schema.FOO),
+                          [self.schema.FOO.BAR, self.schema.FOO.BAZ])
+
+
+    def test_noColumn(self):
+        """
+        Accessing an attribute that is not a defined column on a L{TableSyntax}
+        raises an L{AttributeError}.
+        """
+        self.assertRaises(AttributeError,
+                          lambda : self.schema.FOO.NOT_A_COLUMN)
+
+
     def test_columnAliases(self):
         """
         When attributes are set on a L{TableSyntax}, they will be remembered as
@@ -623,7 +678,6 @@
         """
         L{SetExpression} in a From sub-select.
         """
-        
         # Simple UNION
         self.assertEquals(
             Select(
@@ -683,8 +737,8 @@
 
     def test_inSubSelect(self):
         """
-        L{ColumnSyntax.In} returns a sub-expression using the SQL 'in' syntax with
-        a sub-select.
+        L{ColumnSyntax.In} returns a sub-expression using the SQL 'in' syntax
+        with a sub-select.
         """
         wherein = (self.schema.FOO.BAR.In(
                     Select([self.schema.BOZ.QUX], From=self.schema.BOZ)))
@@ -696,10 +750,9 @@
 
     def test_inParameter(self):
         """
-        L{ColumnSyntax.In} returns a sub-expression using the SQL 'in' syntax with
-        parameter list.
+        L{ColumnSyntax.In} returns a sub-expression using the SQL 'in' syntax
+        with parameter list.
         """
-        
         # One item with IN only
         items = set(('A',))
         self.assertEquals(
@@ -778,6 +831,17 @@
                 "select max(QUX) from BOZ"))
 
 
+    def test_countAllCoumns(self):
+        """
+        L{Count}C{(ALL_COLUMNS)} produces an object in the 'columns' clause that
+        renders the 'count' in SQL.
+        """
+        self.assertEquals(
+            Select([Count(ALL_COLUMNS)], From=self.schema.BOZ).toSQL(),
+            SQLFragment(
+                "select count(*) from BOZ"))
+
+
     def test_aggregateComparison(self):
         """
         L{Max}C{(column) > constant} produces an object in the 'columns' clause
@@ -943,6 +1007,137 @@
         )
 
 
+    def test_insertMultiReturnSQLite(self):
+        """
+        In SQLite's SQL dialect, there is no 'returning' clause, but given that
+        SQLite serializes all SQL transactions, you can rely upon 'select'
+        after a write operation to reliably give you exactly what was just
+        modified.  Therefore, although 'toSQL' won't include any indication of
+        the return value, the 'on' method will execute a 'select' statement
+        following the insert to retrieve the value.
+        """
+        insertStatement = Insert({self.schema.FOO.BAR: 39,
+                    self.schema.FOO.BAZ: 82},
+                   Return=(self.schema.FOO.BAR, self.schema.FOO.BAZ)
+        )
+        qg = lambda : QueryGenerator(SQLITE_DIALECT, NumericPlaceholder())
+        self.assertEquals(insertStatement.toSQL(qg()),
+            SQLFragment("insert into FOO (BAR, BAZ) values (:1, :2)",
+                        [39, 82])
+        )
+        result = []
+        csql = CatchSQL()
+        insertStatement.on(csql).addCallback(result.append)
+        self.assertEqual(result, [2])
+        self.assertEqual(
+            csql.execed,
+            [["insert into FOO (BAR, BAZ) values (:1, :2)", [39, 82]],
+             ["select BAR, BAZ from FOO where rowid = last_insert_rowid()", []]]
+        )
+
+
+    def test_insertNoReturnSQLite(self):
+        """
+        Insert a row I{without} a C{Return=} parameter should also work as
+        normal in sqlite.
+        """
+        statement = Insert({self.schema.FOO.BAR: 12,
+                            self.schema.FOO.BAZ: 48})
+        csql = CatchSQL()
+        statement.on(csql)
+        self.assertEqual(
+            csql.execed,
+            [["insert into FOO (BAR, BAZ) values (:1, :2)", [12, 48]]]
+        )
+
+
+    def test_updateReturningSQLite(self):
+        """
+        Since SQLite does not support the SQL 'returning' syntax extension, in
+        order to preserve the rows that will be modified during an UPDATE
+        statement, we must first find the rows that will be affected, then
+        update them, then return the rows that were affected.  Since we might
+        be changing even part of the primary key, we use the internal 'rowid'
+        column to uniquely and reliably identify rows in the sqlite database
+        that have been modified.
+        """
+        csql = CatchSQL()
+        stmt = Update({self.schema.FOO.BAR: 4321},
+                      Where=self.schema.FOO.BAZ == 1234,
+                      Return=self.schema.FOO.BAR)
+        csql.nextResult([["sample row id"]])
+        result = resultOf(stmt.on(csql))
+        # Three statements were executed; make sure that the result returned was
+        # the result of executing the 3rd (and final) one.
+        self.assertResultList(result, 3)
+        # Check that they were the right statements.
+        self.assertEqual(len(csql.execed), 3)
+        self.assertEqual(
+            csql.execed[0],
+            ["select rowid from FOO where BAZ = :1", [1234]]
+        )
+        self.assertEqual(
+            csql.execed[1],
+            ["update FOO set BAR = :1 where BAZ = :2", [4321, 1234]]
+        )
+        self.assertEqual(
+            csql.execed[2],
+            ["select BAR from FOO where rowid = :1", ["sample row id"]]
+        )
+
+
+    def test_updateReturningMultipleValuesSQLite(self):
+        """
+        When SQLite updates multiple values, it must embed the row ID of each
+        subsequent value into its second 'where' clause, as there is no way to
+        pass a list of values to a single statement..
+        """
+        csql = CatchSQL()
+        stmt = Update({self.schema.FOO.BAR: 4321},
+                      Where=self.schema.FOO.BAZ == 1234,
+                      Return=self.schema.FOO.BAR)
+        csql.nextResult([["one row id"], ["and another"], ["and one more"]])
+        result = resultOf(stmt.on(csql))
+        # Three statements were executed; make sure that the result returned was
+        # the result of executing the 3rd (and final) one.
+        self.assertResultList(result, 3)
+        # Check that they were the right statements.
+        self.assertEqual(len(csql.execed), 3)
+        self.assertEqual(
+            csql.execed[0],
+            ["select rowid from FOO where BAZ = :1", [1234]]
+        )
+        self.assertEqual(
+            csql.execed[1],
+            ["update FOO set BAR = :1 where BAZ = :2", [4321, 1234]]
+        )
+        self.assertEqual(
+            csql.execed[2],
+            ["select BAR from FOO where rowid = :1 or rowid = :2 or rowid = :3",
+             ["one row id", "and another", "and one more"]]
+        )
+
+
+    def test_deleteReturningSQLite(self):
+        """
+        When SQLite deletes a value, ...
+        """
+        csql = CatchSQL()
+        stmt = Delete(From=self.schema.FOO, Where=self.schema.FOO.BAZ == 1234,
+                      Return=self.schema.FOO.BAR)
+        result = resultOf(stmt.on(csql))
+        self.assertResultList(result, 1)
+        self.assertEqual(len(csql.execed), 2)
+        self.assertEqual(
+            csql.execed[0],
+            ["select BAR from FOO where BAZ = :1", [1234]]
+        )
+        self.assertEqual(
+            csql.execed[1],
+            ["delete from FOO where BAZ = :1", [1234]]
+        )
+
+
     def test_insertMismatch(self):
         """
         L{Insert} raises L{TableMismatch} if the columns specified aren't all

Copied: CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/fixtures.py (from rev 9756, CalendarServer/trunk/twext/enterprise/fixtures.py)
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/fixtures.py	                        (rev 0)
+++ CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/fixtures.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -0,0 +1,55 @@
+# -*- test-case-name: twext.enterprise.test.test_fixtures -*-
+##
+# Copyright (c) 2012 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+"""
+Fixtures for testing code that uses ADBAPI2.
+"""
+
+import sqlite3
+
+from twext.enterprise.adbapi2 import ConnectionPool
+from twext.enterprise.ienterprise import SQLITE_DIALECT
+
+def buildConnectionPool(testCase, schemaText="", dialect=SQLITE_DIALECT):
+    """
+    Build a L{ConnectionPool} for testing purposes, with the given C{testCase}.
+
+    @param testCase: the test case to attach the resulting L{ConnectionPool}
+        to.
+    @type testCase: L{twisted.trial.unittest.TestCase}
+
+    @param schemaText: The text of the schema with which to initialize the
+        database.
+    @type schemaText: L{str}
+    """
+    sqlitename = testCase.mktemp()
+    seqs = {}
+    def connectionFactory(label=testCase.id()):
+        conn = sqlite3.connect(sqlitename)
+        def nextval(seq):
+            result = seqs[seq] = seqs.get(seq, 0) + 1
+            return result
+        conn.create_function("nextval", 1, nextval)
+        return conn
+    con = connectionFactory()
+    con.executescript(schemaText)
+    con.commit()
+    pool = ConnectionPool(connectionFactory, paramstyle='numeric',
+                          dialect=SQLITE_DIALECT)
+    pool.startService()
+    testCase.addCleanup(pool.stopService)
+    return pool

Modified: CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/ienterprise.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/ienterprise.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/ienterprise.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -20,6 +20,16 @@
 
 __all__ = [
     "IAsyncTransaction",
+    "ISQLExecutor",
+    "ICommandBlock",
+    "IQueuer",
+    "IDerivedParameter",
+    "AlreadyFinishedError",
+    "ConnectionError",
+    "POSTGRES_DIALECT",
+    "SQLITE_DIALECT",
+    "ORACLE_DIALECT",
+    "ORACLE_TABLE_NAME_MAX",
 ]
 
 from zope.interface import Interface, Attribute
@@ -42,6 +52,7 @@
 
 POSTGRES_DIALECT = 'postgres-dialect'
 ORACLE_DIALECT = 'oracle-dialect'
+SQLITE_DIALECT = 'sqlite-dialect'
 ORACLE_TABLE_NAME_MAX = 30
 
 
@@ -102,6 +113,18 @@
         """
 
 
+    def postCommit(operation):
+        """
+        Perform the given operation only after this L{IAsyncTransaction}
+        commits.  These will be invoked before the L{Deferred} returned by
+        L{IAsyncTransaction.commit} fires.
+
+        @param operation: a 0-argument callable that may return a L{Deferred}.
+            If it does, then the subsequent operations added by L{postCommit}
+            will not fire until that L{Deferred} fires.
+        """
+
+
     def abort():
         """
         Roll back changes caused by this transaction.
@@ -111,6 +134,17 @@
         """
 
 
+    def postAbort(operation):
+        """
+        Invoke a callback after abort.
+
+        @see: L{IAsyncTransaction.postCommit}
+
+        @param operation: 0-argument callable, potentially returning a
+            L{Deferred}.
+        """
+
+
     def commandBlock():
         """
         Create an object which will cause the commands executed on it to be
@@ -218,3 +252,32 @@
 
         @return: C{None}
         """
+
+
+
+class IQueuer(Interface):
+    """
+    An L{IQueuer} can enqueue work for later execution.
+    """
+
+    def enqueueWork(self, transaction, workItemType, **kw):
+        """
+        Perform some work, eventually.
+
+        @param transaction: an L{IAsyncTransaction} within which to I{commit}
+            to doing the work.  Note that this work will likely be done later
+            (but depending on various factors, may actually be done within this
+            transaction as well).
+
+        @param workItemType: the type of work item to create.
+        @type workItemType: L{type}, specifically, a subtype of L{WorkItem
+            <twext.enterprise.queue.WorkItem>}
+
+        @param kw: The keyword parameters are relayed to C{workItemType.create}
+            to create an appropriately initialized item.
+
+        @return: a work proposal that allows tracking of the various phases of
+            completion of the work item.
+        @rtype: L{twext.enterprise.queue.WorkItem}
+        """
+

Copied: CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/queue.py (from rev 9756, CalendarServer/trunk/twext/enterprise/queue.py)
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/queue.py	                        (rev 0)
+++ CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/queue.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -0,0 +1,1230 @@
+# -*- test-case-name: twext.enterprise.test.test_queue -*-
+##
+# Copyright (c) 2012 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+"""
+L{twext.enterprise.queue} is a task-queueing system for use by applications
+with multiple front-end servers talking to a single database instance, that
+want to defer and parallelize work that involves storing the results of
+computation.
+
+By enqueuing with L{twisted.enterprise.queue}, you may guarantee that the work
+will I{eventually} be done, and reliably commit to doing it in the future, but
+defer it if it does not need to be done I{now}.
+
+To pick a hypothetical example, let's say that you have a store which wants to
+issue a promotional coupon based on a customer loyalty program, in response to
+an administrator clicking on a button.  Determining the list of customers to
+send the coupon to is quick: a simple query will get you all their names.
+However, analyzing each user's historical purchase data is (A) time consuming
+and (B) relatively isolated, so it would be good to do that in parallel, and it
+would also be acceptable to have that happen at a later time, outside the
+critical path.
+
+Such an application might be implemented with this queueing system like so::
+
+    from twext.enterprise.queue import WorkItem, queueFromTransaction
+    from twext.enterprise.dal.parseschema import addSQLToSchema
+    from twext.enterprise.dal.syntax import SchemaSyntax
+
+    schemaModel = Schema()
+    addSQLToSchema('''
+        create table CUSTOMER (NAME varchar(255), ID integer primary key);
+        create table PRODUCT (NAME varchar(255), ID integer primary key);
+        create table PURCHASE (NAME varchar(255), WHEN timestamp,
+                               CUSTOMER_ID integer references CUSTOMER,
+                               PRODUCT_ID integer references PRODUCT;
+        create table COUPON_WORK (WORK_ID integer primary key,
+                                  CUSTOMER_ID integer references CUSTOMER);
+        create table COUPON (ID integer primary key,
+                            CUSTOMER_ID integer references customer,
+                            AMOUNT integer);
+    ''')
+    schema = SchemaSyntax(schemaModel)
+
+    class Coupon(Record, fromTable(schema.COUPON_WORK)):
+        pass
+
+    class CouponWork(WorkItem, fromTable(schema.COUPON_WORK)):
+        @inlineCallbacks
+        def doWork(self):
+            purchases = yield Select(schema.PURCHASE,
+                                     Where=schema.PURCHASE.CUSTOMER_ID
+                                     == self.customerID).on(self.transaction)
+            couponAmount = yield doSomeMathThatTakesAWhile(purchases)
+            yield Coupon.create(customerID=self.customerID,
+                                amount=couponAmount)
+
+    @inlineCallbacks
+    def makeSomeCoupons(txn):
+        # Note, txn was started before, will be committed later...
+        for customerID in (yield Select([schema.CUSTOMER.CUSTOMER_ID],
+                                        From=schema.CUSTOMER).on(txn)):
+            # queuer is a provider of IQueuer, of which there are several
+            # implementations in this module.
+            queuer.enqueueWork(txn, CouponWork, customerID=customerID)
+"""
+
+from socket import getfqdn
+from functools import wraps
+from os import getpid
+from datetime import datetime
+
+from zope.interface import implements
+
+from twisted.application.service import Service
+from twisted.internet.protocol import Factory
+from twisted.internet.defer import (
+    inlineCallbacks, returnValue, Deferred, succeed
+)
+from twisted.internet.endpoints import TCP4ClientEndpoint
+from twisted.protocols.amp import AMP, Command, Integer, Argument, String
+from twisted.python.reflect import qual
+from twisted.python import log
+
+from twext.enterprise.dal.syntax import TableSyntax, SchemaSyntax
+from twext.enterprise.dal.model import ProcedureCall
+from twext.enterprise.dal.syntax import NamedValue
+from twext.enterprise.dal.record import Record, fromTable
+from twisted.python.failure import Failure
+from twisted.internet.defer import passthru
+from twext.enterprise.dal.model import Table, Schema, SQLType, Constraint
+from twisted.internet.endpoints import TCP4ServerEndpoint
+from twext.enterprise.dal.syntax import Lock
+from twext.enterprise.ienterprise import IQueuer
+
+def makeNodeSchema(inSchema):
+    """
+    Create a self-contained schema for L{NodeInfo} to use.
+
+    @return: a schema with just the one table.
+    """
+    # Initializing this duplicate schema avoids a circular dependency, but this
+    # should really be accomplished with independent schema objects that the
+    # transaction is made aware of somehow.
+    NodeTable = Table(inSchema, 'NODE_INFO')
+    NodeTable.addColumn("HOSTNAME", SQLType("varchar", 255))
+    NodeTable.addColumn("PID", SQLType("integer", None))
+    NodeTable.addColumn("PORT", SQLType("integer", None))
+    NodeTable.addColumn("TIME", SQLType("timestamp", None)).setDefaultValue(
+        # Note: in the real data structure, this is actually a not-cleaned-up
+        # sqlparse internal data structure, but it *should* look closer to this.
+        ProcedureCall("timezone", ["UTC", NamedValue('CURRENT_TIMESTAMP')])
+    )
+    for column in NodeTable.columns:
+        NodeTable.tableConstraint(Constraint.NOT_NULL, [column.name])
+    NodeTable.primaryKey = [NodeTable.columnNamed("HOSTNAME"),
+                            NodeTable.columnNamed("PORT")]
+    return inSchema
+
+NodeInfoSchema = SchemaSyntax(makeNodeSchema(Schema(__file__)))
+
+
+ at inlineCallbacks
+def inTransaction(transactionCreator, operation):
+    """
+    Perform the given operation in a transaction, committing or aborting as
+    required.
+
+    @param transactionCreator: a 0-arg callable that returns an
+        L{IAsyncTransaction}
+
+    @param operation: a 1-arg callable that takes an L{IAsyncTransaction} and
+        returns a value.
+
+    @return: a L{Deferred} that fires with C{operation}'s result or fails with
+        its error, unless there is an error creating, aborting or committing
+        the transaction.
+    """
+    txn = transactionCreator()
+    try:
+        result = yield operation(txn)
+    except:
+        f = Failure()
+        yield txn.abort()
+        returnValue(f)
+    else:
+        yield txn.commit()
+        returnValue(result)
+
+
+
+class TableSyntaxByName(Argument):
+    """
+    Serialize and deserialize L{TableSyntax} objects for an AMP protocol with
+    an attached schema.
+    """
+
+    def fromStringProto(self, inString, proto):
+        """
+        Convert the name of the table into a table, given a C{proto} with an
+        attached C{schema}.
+
+        @param inString: the name of a table, as utf-8 encoded bytes
+        @type inString: L{bytes}
+
+        @param proto: an L{SchemaAMP}
+        """
+        return TableSyntax(proto.schema.tableNamed(inString.decode("UTF-8")))
+
+
+    def toString(self, inObject):
+        """
+        Convert a L{TableSyntax} object into just its name for wire transport.
+
+        @param inObject: a table.
+        @type inObject: L{TableSyntax}
+
+        @return: the name of that table
+        @rtype: L{bytes}
+        """
+        return inObject.model.name.encode("UTF-8")
+
+
+
+class NodeInfo(Record, fromTable(NodeInfoSchema.NODE_INFO)):
+    """
+    A L{NodeInfo} is information about a currently-active Node process.
+    """
+
+    def endpoint(self, reactor):
+        """
+        Create an L{IStreamServerEndpoint} that will talk to the node process
+        that is described by this L{NodeInfo}.
+
+        @return: an endpoint that will connect to this host.
+        @rtype: L{IStreamServerEndpoint}
+        """
+        return TCP4ClientEndpoint(reactor, self.hostname, self.port)
+
+
+
+def abstract(thunk):
+    """
+    The decorated function is abstract.
+
+    @note: only methods are currently supported.
+    """
+    @classmethod
+    @wraps(thunk)
+    def inner(cls, *a, **k):
+        raise NotImplementedError(qual(cls) + " does not implement " +
+                                  thunk.func_name)
+    return inner
+
+
+
+class WorkItem(Record):
+    """
+    An item of work.
+
+    @ivar workID: the unique identifier (primary key) for items of this type.
+        There must be a corresponding column in the database.
+    @type workID: L{int}
+
+    @cvar created: the timestamp that a given item was created, or the column
+        describing its creation time, on the class.
+    @type created: L{datetime.datetime}
+    """
+
+    @abstract
+    def doWork(self):
+        """
+        Subclasses must implement this to actually perform the queued work.
+
+        This method will be invoked in a worker process.
+
+        This method does I{not} need to delete the row referencing it; that
+        will be taken care of by the job queueing machinery.
+        """
+
+
+    @classmethod
+    def forTable(cls, table):
+        """
+        Look up a work-item class given a particular L{TableSyntax}.  Factoring
+        this correctly may place it into L{twext.enterprise.record.Record}
+        instead; it is probably generally useful to be able to look up a mapped
+        class from a table.
+
+        @param table: the table to look up
+        @type table: L{twext.enterprise.dal.model.Table}
+
+        @return: the relevant subclass
+        @rtype: L{type}
+        """
+        for subcls in cls.__subclasses__():
+            if table == getattr(subcls, "table", None):
+                return subcls
+        raise KeyError("No mapped {0} class for {1}.".format(
+            cls, table
+        ))
+
+
+
+class PerformWork(Command):
+    """
+    Notify another process that it must do some work that has been persisted to
+    the database, by informing it of the table and the ID where said work has
+    been persisted.
+    """
+
+    arguments = [
+        ("table", TableSyntaxByName()),
+        ("workID", Integer()),
+    ]
+    response = []
+
+
+
+class ReportLoad(Command):
+    """
+    Notify another node of the total, current load for this whole node (all of
+    its workers).
+    """
+    arguments = [
+        ("load", Integer())
+    ]
+    response = []
+
+
+class IdentifyNode(Command):
+    """
+    Identify this node to its peer.  The connector knows which hostname it's
+    looking for, and which hostname it considers itself to be, only the
+    initiator (not the listener) issues this command.  This command is
+    necessary because if reverse DNS isn't set up perfectly, the listener may
+    not be able to identify its peer.
+    """
+
+    arguments = [
+        ("host", String()),
+        ("port", Integer()),
+    ]
+
+
+
+class SchemaAMP(AMP):
+    """
+    An AMP instance which also has a L{Schema} attached to it.
+
+    @ivar schema: The schema to look up L{TableSyntaxByName} arguments in.
+    @type schema: L{Schema}
+    """
+
+    def __init__(self, schema, boxReceiver=None, locator=None):
+        self.schema = schema
+        super(SchemaAMP, self).__init__(boxReceiver, locator)
+
+
+
+class ConnectionFromPeerNode(SchemaAMP):
+    """
+    A connection to a peer node.  Symmetric; since the 'client' and the
+    'server' both serve the same role, the logic is the same in every node.
+    """
+
+    def __init__(self, peerPool, boxReceiver=None, locator=None):
+        """
+        Initialize this L{ConnectionFromPeerNode} with a reference to a pool of
+        local workers.
+
+        @param localWorkerPool: the pool of local worker procesess that can
+            process queue work.
+        @type localWorkerPool: L{WorkerConnectionPool}
+
+        @see: L{AMP.__init__}
+        """
+        self.peerPool = peerPool
+        self.localWorkerPool = peerPool.workerPool
+        self._bonusLoad = 0
+        self._reportedLoad = 0
+        super(ConnectionFromPeerNode, self).__init__(peerPool.schema,
+                                                     boxReceiver, locator)
+
+
+    def reportCurrentLoad(self):
+        """
+        Report the current load for the local worker pool to this peer.
+        """
+        return self.callRemote(ReportLoad,
+                               load=self.localWorkerPool.totalLoad())
+
+
+    @ReportLoad.responder
+    def repotedLoad(self, load):
+        """
+        The peer reports its load.
+        """
+        self._reportedLoad = (load - self._bonusLoad)
+        return {}
+
+
+    def startReceivingBoxes(self, sender):
+        """
+        Connection is up and running; add this to the list of active peers.
+        """
+        r = super(ConnectionFromPeerNode, self).startReceivingBoxes(sender)
+        self.peerPool.addPeerConnection(self)
+        return r
+
+
+    def stopReceivingBoxes(self, reason):
+        """
+        The connection has shut down; remove this from the list of active
+        peers.
+        """
+        self.peerPool.removePeerConnection(self)
+        r = super(ConnectionFromPeerNode, self).stopReceivingBoxes(reason)
+        return r
+
+
+    def currentLoadEstimate(self):
+        """
+        What is the current load estimate for this peer?
+
+        @return: The number of full "slots", i.e. currently-being-processed
+            queue items (and other items which may contribute to this process's
+            load, such as currently-being-processed client requests).
+        @rtype: L{int}
+        """
+        return self._reportedLoad + self._bonusLoad
+
+
+    def performWork(self, table, workID):
+        """
+        A L{local worker connection <ConnectionFromWorker>} is asking this
+        specific peer node-controller process to perform some work, having
+        already determined that it's appropriate.
+
+        @param table: The table where work is waiting.
+        @type table: L{TableSyntax}
+
+        @param workID: The primary key identifier of the given work.
+        @type workID: L{int}
+
+        @return: a L{Deferred} firing with an empty dictionary when the work is
+            complete.
+        @rtype: L{Deferred} firing L{dict}
+        """
+        d = self.callRemote(PerformWork, table=table, workID=workID)
+        self._bonusLoad += 1
+        @d.addBoth
+        def performed(result):
+            self._bonusLoad -= 1
+            return result
+        return d
+
+
+    @PerformWork.responder
+    def dispatchToWorker(self, table, workID):
+        """
+        A remote peer node has asked this node to do some work; dispatch it to
+        a local worker on this node.
+
+        @param table: the table to work on.
+        @type table: L{TableSyntax}
+
+        @param workID: the identifier within the table.
+        @type workID: L{int}
+
+        @return: a L{Deferred} that fires when the work has been completed.
+        """
+        return self.localWorkerPool.performWork(table, workID)
+
+
+    @IdentifyNode.responder
+    def identifyPeer(self, host, port):
+        self.peerPool.mapPeer(host, port, self)
+
+
+
+class WorkerConnectionPool(object):
+    """
+    A pool of L{ConnectionFromWorker}s.
+
+    L{WorkerConnectionPool} also implements the same implicit protocol as a
+    L{ConnectionFromPeerNode}, but one that dispenses work to the local worker
+    processes rather than to a remote connection pool.
+    """
+
+    def __init__(self, maximumLoadPerWorker=0):
+        self.workers = []
+        self.maximumLoadPerWorker = maximumLoadPerWorker
+
+
+    def addWorker(self, worker):
+        """
+        Add a L{ConnectionFromWorker} to this L{WorkerConnectionPool} so that
+        it can be selected.
+        """
+        self.workers.append(worker)
+
+
+    def removeWorker(self, worker):
+        """
+        Remove a L{ConnectionFromWorker} from this L{WorkerConnectionPool} that
+        was previously added.
+        """
+        self.workers.remove(worker)
+
+
+    def hasAvailableCapacity(self):
+        """
+        Does this worker connection pool have any local workers who have spare
+        hasAvailableCapacity to process another queue item?
+        """
+        for worker in self.workers:
+            if worker.currentLoad() < self.maximumLoadPerWorker:
+                return True
+        return False
+
+
+    def totalLoad(self):
+        """
+        The total load of all currently connected workers.
+        """
+        return sum(worker.currentLoad() for worker in self.workers)
+
+
+    def _selectLowestLoadWorker(self):
+        """
+        Select the local connection with the lowest current load, or C{None} if
+        all workers are too busy.
+
+        @return: a worker connection with the lowest current load.
+        @rtype: L{ConnectionFromWorker}
+        """
+        return sorted(self.workers[:], key=lambda w: w.currentLoad())[0]
+
+
+    def performWork(self, table, workID):
+        """
+        Select a local worker that is idle enough to perform the given work,
+        then ask them to perform it.
+
+        @param table: The table where work is waiting.
+        @type table: L{TableSyntax}
+
+        @param workID: The primary key identifier of the given work.
+        @type workID: L{int}
+
+        @return: a L{Deferred} firing with an empty dictionary when the work is
+            complete.
+        @rtype: L{Deferred} firing L{dict}
+        """
+        return self._selectLowestLoadWorker().performWork(table, workID)
+
+
+
+class ConnectionFromWorker(SchemaAMP):
+    """
+    An individual connection from a worker, as seem from the master's
+    perspective.  L{ConnectionFromWorker}s go into a L{WorkerConnectionPool}.
+
+    @ivar workerPool: The connection pool that this individual connection is
+        participating in.
+    @type workerPool: L{WorkerConnectionPool}
+    """
+
+    def __init__(self, schema, workerPool, boxReceiver=None, locator=None):
+        self.workerPool = workerPool
+        super(ConnectionFromWorker, self).__init__(schema, boxReceiver, locator)
+        self._load = 0
+
+
+    @property
+    def currentLoad(self):
+        """
+        What is the current load of this worker?
+        """
+        return self._load
+
+
+    def startReceivingBoxes(self, sender):
+        """
+        Start receiving AMP boxes from the peer.  Initialize all necessary
+        state.
+        """
+        result = super(ConnectionFromWorker, self).startReceivingBoxes(sender)
+        self.workerPool.addWorker(self)
+        return result
+
+
+    def stopReceivingBoxes(self, reason):
+        """
+        AMP boxes will no longer be received.
+        """
+        result = super(ConnectionFromWorker, self).stopReceivingBoxes(reason)
+        self.workerPool.removeWorker(self)
+        return result
+
+
+    def performWork(self, table, workID):
+        """
+        Dispatch work to this worker.
+
+        @see: The responder for this should always be
+            L{ConnectionFromController.actuallyReallyExecuteWorkHere}.
+        """
+        d = self.callRemote(PerformWork, table=table, workID=workID)
+        self._load += 1
+        @d.addBoth
+        def f(result):
+            self._load -= 1
+            return result
+        return d
+
+
+
+class ConnectionFromController(SchemaAMP):
+    """
+    A L{ConnectionFromController} is the connection to a node-controller
+    process, in a worker process.  It processes requests from its own
+    controller to do work.  It is the opposite end of the connection from
+    L{ConnectionFromWorker}.
+    """
+    implements(IQueuer)
+
+    def __init__(self, transactionFactory, schema, whenConnected,
+                 boxReceiver=None, locator=None):
+        super(ConnectionFromController, self).__init__(schema,
+                                                       boxReceiver, locator)
+        self.transactionFactory = transactionFactory
+        self.whenConnected = whenConnected
+
+
+    def startReceivingBoxes(self, sender):
+        super(ConnectionFromController, self).startReceivingBoxes(sender)
+        self.whenConnected(self)
+
+
+    def choosePerformer(self):
+        """
+        To conform with L{WorkProposal}'s expectations, which may run in either
+        a controller (against a L{PeerConnectionPool}) or in a worker (against
+        a L{ConnectionFromController}), this is implemented to always return
+        C{self}, since C{self} is also an object that has a C{performWork}
+        method.
+        """
+        return succeed(self)
+
+
+    def performWork(self, table, workID):
+        """
+        Ask the controller to perform some work on our behalf.
+        """
+        return self.callRemote(PerformWork, table=table, workID=workID)
+
+
+    def enqueueWork(self, txn, workItemType, **kw):
+        """
+        There is some work to do.  Do it, someplace else, ideally in parallel.
+        Later, let the caller know that the work has been completed by firing a
+        L{Deferred}.
+
+        @param workItemType: The type of work item to be enqueued.
+        @type workItemType: A subtype of L{WorkItem}
+
+        @param kw: The parameters to construct a work item.
+        @type kw: keyword parameters to C{workItemType.create}, i.e.
+            C{workItemType.__init__}
+
+        @return: an object that can track the enqueuing and remote execution of
+            this work.
+        @rtype: L{WorkProposal}
+        """
+        wp = WorkProposal(self, txn, workItemType, kw)
+        wp._start()
+        return wp
+
+
+    @PerformWork.responder
+    def actuallyReallyExecuteWorkHere(self, table, workID):
+        """
+        This is where it's time to actually do the work.  The controller
+        process has instructed this worker to do it; so, look up the data in
+        the row, and do it.
+        """
+        @inlineCallbacks
+        def work(txn):
+            workItemClass = WorkItem.forTable(table)
+            workItem = yield workItemClass.load(txn, workID)
+            # TODO: what if we fail?  error-handling should be recorded
+            # someplace, the row should probably be marked, re-tries should be
+            # triggerable administratively.
+            yield workItem.delete()
+            # TODO: verify that workID is the primary key someplace.
+            yield workItem.doWork()
+            returnValue({})
+        return inTransaction(self.transactionFactory, work)
+
+
+
+class WorkerFactory(Factory, object):
+    """
+    Factory, to be used as the client to connect from the worker to the
+    controller.
+    """
+
+    def __init__(self, transactionFactory, schema, whenConnected):
+        """
+        Create a L{WorkerFactory} with a transaction factory and a schema.
+        """
+        self.transactionFactory = transactionFactory
+        self.schema = schema
+        self.whenConnected = whenConnected
+
+
+    def buildProtocol(self, addr):
+        """
+        Create a L{ConnectionFromController} connected to the
+        transactionFactory and store.
+        """
+        return ConnectionFromController(self.transactionFactory, self.schema,
+                                        self.whenConnected)
+
+
+
+class TransactionFailed(Exception):
+    """
+    A transaction failed.
+    """
+
+
+
+def _cloneDeferred(d):
+    """
+    Make a new Deferred, adding callbacks to C{d}.
+
+    @return: another L{Deferred} that fires with C{d's} result when C{d} fires.
+    @rtype: L{Deferred}
+    """
+    d2 = Deferred()
+    d.chainDeferred(d2)
+    return d2
+
+
+
+class WorkProposal(object):
+    """
+    A L{WorkProposal} is a proposal for work that will be executed, perhaps on
+    another node, perhaps in the future.
+
+    @ivar pool: the connection pool which this L{WorkProposal} will use to
+        submit its work.
+    @type pool: L{PeerConnectionPool}
+
+    @ivar txn: The transaction where the work will be enqueued.
+    @type txn: L{IAsyncTransaction}
+
+    @ivar workItemType: The type of work to be enqueued by this L{WorkProposal}
+    @type workItemType: L{WorkItem} subclass
+
+    @ivar kw: The keyword arguments to pass to C{self.workItemType.create} to
+        construct it.
+    @type kw: L{dict}
+    """
+
+    def __init__(self, pool, txn, workItemType, kw):
+        self.pool = pool
+        self.txn = txn
+        self.workItemType = workItemType
+        self.kw = kw
+        self._whenProposed = Deferred()
+        self._whenExecuted = Deferred()
+        self._whenCommitted = Deferred()
+
+
+    def _start(self):
+        """
+        Execute this L{WorkProposal} by creating the work item in the database,
+        waiting for the transaction where that addition was completed to
+        commit, and asking the local node controller process to do the work.
+        """
+        @passthru(self.workItemType.create(self.txn, **self.kw).addCallback)
+        def created(item):
+            self._whenProposed.callback(None)
+            @self.txn.postCommit
+            def whenDone():
+                self._whenCommitted.callback(None)
+                @passthru(self.pool.choosePerformer().addCallback)
+                def performerChosen(performer):
+                    @passthru(performer.performWork(item.table, item.workID))
+                    def performed(result):
+                        self._whenExecuted.callback(None)
+                    @performed.addErrback
+                    def notPerformed(why):
+                        self._whenExecuted.errback(why)
+                @performerChosen.addErrback
+                def notChosen(whyNot):
+                    self._whenExecuted.errback(whyNot)
+            @self.txn.postAbort
+            def whenFailed():
+                self._whenCommitted.errback(TransactionFailed)
+
+
+    def whenExecuted(self):
+        """
+        Let the caller know when the proposed work has been fully executed.
+
+        @note: The L{Deferred} returned by C{whenExecuted} should be used with
+            extreme caution.  If an application decides to do any
+            database-persistent work as a result of this L{Deferred} firing,
+            that work I{may be lost} as a result of a service being normally
+            shut down between the time that the work is scheduled and the time
+            that it is executed.  So, the only things that should be added as
+            callbacks to this L{Deferred} are those which are ephemeral, in
+            memory, and reflect only presentation state associated with the
+            user's perception of the completion of work, not logical chains of
+            work which need to be completed in sequence; those should all be
+            completed within the transaction of the L{WorkItem.doWork} that
+            gets executed.
+
+        @return: a L{Deferred} that fires with C{None} when the work has been
+            completed remotely.
+        """
+        return _cloneDeferred(self._whenExecuted)
+
+
+    def whenProposed(self):
+        """
+        Let the caller know when the work has been proposed; i.e. when the work
+        is first transmitted to the database.
+
+        @return: a L{Deferred} that fires with C{None} when the relevant
+            commands have been sent to the database to create the L{WorkItem},
+            and fails if those commands do not succeed for some reason.
+        """
+        return _cloneDeferred(self._whenProposed)
+
+
+    def whenCommitted(self):
+        """
+        Let the caller know when the work has been committed to; i.e. when the
+        transaction where the work was proposed has been committed to the
+        database.
+
+        @return: a L{Deferred} that fires with C{None} when the relevant
+            transaction has been committed, or fails if the transaction is not
+            committed for any reason.
+        """
+        return _cloneDeferred(self._whenCommitted)
+
+
+
+class PeerConnectionPool(Service, object):
+    """
+    Each node has a L{PeerConnectionPool} connecting it to all the other nodes
+    currently active on the same database.
+
+    @ivar hostname: The hostname where this node process is running, as
+        reported by the local host's configuration.  Possibly this should be
+        obtained via C{config.ServerHostName} instead of C{socket.getfqdn()};
+        although hosts within a cluster may be configured with the same
+        C{ServerHostName}; TODO need to confirm.
+    @type hostname: L{bytes}
+
+    @ivar thisProcess: a L{NodeInfo} representing this process, which is
+        initialized when this L{PeerConnectionPool} service is started via
+        C{startService}.  May be C{None} if this service is not fully started
+        up or if it is shutting down.
+    @type thisProcess: L{NodeInfo}
+
+    @ivar queueProcessTimeout: The maximum amount of time allowed for a queue
+        item to be processed.  By default, 10 minutes.
+    @type queueProcessTimeout: L{float} (in seconds)
+
+    @ivar queueDelayedProcessInterval: The amount of time between database
+        pings, i.e. checks for over-due queue items that might have been
+        orphaned by a controller process that died mid-transaction.  This is
+        how often the shared database should be pinged by I{all} nodes (i.e.,
+        all controller processes, or each instance of L{PeerConnectionPool});
+        each individual node will ping commensurately less often as more nodes
+        join the database.
+    @type queueDelayedProcessInterval: L{float} (in seconds)
+
+    @ivar reactor: The reactor used for scheduling timed events.
+    @type reactor: L{IReactorTime} provider.
+
+    @ivar peers: The list of currently connected peers.
+    @type peers: L{list} of L{PeerConnectionPool}
+    """
+    implements(IQueuer)
+
+    getfqdn = staticmethod(getfqdn)
+    getpid = staticmethod(getpid)
+
+    queueProcessTimeout = (10.0 * 60.0)
+    queueDelayedProcessInterval = (60.0)
+
+    def __init__(self, reactor, transactionFactory, ampPort, schema):
+        """
+        Initialize a L{PeerConnectionPool}.
+
+        @param ampPort: The AMP TCP port number to listen on for inter-host
+            communication.  This must be an integer (and not, say, an endpoint,
+            or an endpoint description) because we need to communicate it to
+            the other peers in the cluster in a way that will be meaningful to
+            them as clients.
+        @type ampPort: L{int}
+
+        @param transactionFactory: a 0- or 1-argument callable that produces an
+            L{IAsyncTransaction}
+
+        @param schema: The schema which contains all the tables associated with
+            the L{WorkItem}s that this L{PeerConnectionPool} will process.
+        @type schema: L{Schema}
+        """
+        self.reactor = reactor
+        self.transactionFactory = transactionFactory
+        self.hostname = self.getfqdn()
+        self.pid = self.getpid()
+        self.ampPort = ampPort
+        self.thisProcess = None
+        self.workerPool = WorkerConnectionPool()
+        self.peers = []
+        self.mappedPeers = {}
+        self.schema = schema
+        self._startingUp = None
+        self._listeningPortObject = None
+        self._lastSeenTotalNodes = 1
+        self._lastSeenNodeIndex = 1
+
+
+    def addPeerConnection(self, peer):
+        """
+        Add a L{ConnectionFromPeerNode} to the active list of peers.
+        """
+        self.peers.append(peer)
+
+
+    def workerListenerFactory(self):
+        """
+        Factory that listens for connections from workers.
+        """
+        f = Factory()
+        f.buildProtocol = lambda addr: ConnectionFromWorker(self.schema,
+                                                            self.workerPool)
+        return f
+
+
+    def removePeerConnection(self, peer):
+        """
+        Remove a L{ConnectionFromPeerNode} to the active list of peers.
+        """
+        self.peers.remove(peer)
+
+
+    def choosePerformer(self):
+        """
+        Choose a peer to distribute work to based on the current known slot
+        occupancy of the other nodes.  Note that this will prefer distributing
+        work to local workers until the current node is full, because that
+        should be lower-latency.  Also, if no peers are available, work will be
+        submitted locally even if the worker pool is already over-subscribed.
+
+        @return: a L{Deferred <twisted.internet.defer.Deferred>} which fires
+            with the chosen 'peer', i.e. object with a C{performWork} method,
+            as soon as one is available.  Normally this will be synchronous,
+            but we need to account for the possibility that we may need to
+            connect to other hosts.
+        @rtype: L{Deferred <twisted.internet.defer.Deferred>} firing
+            L{ConnectionFromPeerNode} or L{WorkerConnectionPool}
+        """
+        if not self.workerPool.hasAvailableCapacity() and self.peers:
+            return sorted(self.peers, lambda p: p.currentLoadEstimate())[0]
+        else:
+            return succeed(self.workerPool)
+
+
+    def enqueueWork(self, txn, workItemType, **kw):
+        """
+        There is some work to do.  Do it, someplace else, ideally in parallel.
+        Later, let the caller know that the work has been completed by firing a
+        L{Deferred}.
+
+        @param workItemType: The type of work item to be enqueued.
+        @type workItemType: A subtype of L{WorkItem}
+
+        @param kw: The parameters to construct a work item.
+        @type kw: keyword parameters to C{workItemType.create}, i.e.
+            C{workItemType.__init__}
+
+        @return: an object that can track the enqueuing and remote execution of
+            this work.
+        @rtype: L{WorkProposal}
+        """
+        wp = WorkProposal(self, txn, workItemType, kw)
+        wp._start()
+        return wp
+
+
+    def allWorkItemTypes(self):
+        """
+        Load all the L{WorkItem} types that this node can process and return
+        them.
+
+        @return: L{list} of L{type}
+        """
+        # TODO: For completeness, this may need to involve a plugin query to
+        # make sure that all WorkItem subclasses are imported first.
+        return WorkItem.__subclasses__()
+
+
+    def totalNumberOfNodes(self):
+        """
+        How many nodes are there, total?
+
+        @return: the maximum number of other L{PeerConnectionPool} instances
+            that may be connected to the database described by
+            C{self.transactionFactory}.  Note that this is not the current count
+            by connectivity, but the count according to the database.
+        @rtype: L{int}
+        """
+        # TODO
+        return self._lastSeenTotalNodes
+
+
+    def nodeIndex(self):
+        """
+        What ordinal does this node, i.e. this instance of
+        L{PeerConnectionPool}, occupy within the ordered set of all nodes
+        connected to the database described by C{self.transactionFactory}?
+
+        @return: the index of this node within the total collection.  For
+            example, if this L{PeerConnectionPool} is 6 out of 30, this method
+            will return C{6}.
+        @rtype: L{int}
+        """
+        # TODO
+        return self._lastSeenNodeIndex
+
+
+    def _periodicLostWorkCheck(self):
+        """
+        Periodically, every node controller has to check to make sure that work
+        hasn't been dropped on the floor by someone.  In order to do that it
+        queries each work-item table.
+        """
+        @inlineCallbacks
+        def workCheck(txn):
+
+            nodes = [(node.hostname, node.port) for node in
+                     (yield self.activeNodes(txn))]
+            nodes.sort()
+            self._lastSeenTotalNodes = len(nodes)
+            self._lastSeenNodeIndex = nodes.index((self.thisProcess.hostname,
+                                                   self.thisProcess.port))
+            for itemType in self.allWorkItemTypes():
+                for overdueItem in (
+                        yield itemType.query(
+                            txn, itemType.created > self.queueProcessTimeout
+                    )):
+                    peer = yield self.choosePerformer()
+                    yield peer.performWork(overdueItem.table,
+                                           overdueItem.workID)
+        return inTransaction(self.transactionFactory, workCheck)
+
+
+    _currentWorkDeferred = None
+    _lostWorkCheckCall = None
+
+    def _lostWorkCheckLoop(self):
+        """
+        While the service is running, keep checking for any overdue / lost work
+        items and re-submit them to the cluster for processing.  Space out
+        those checks in time based on the size of the cluster.
+        """
+        self._lostWorkCheckCall = None
+        @passthru(self._periodicLostWorkCheck().addErrback(log.err)
+                  .addCallback)
+        def scheduleNext(result):
+            self._currentWorkDeferred = None
+            if not self.running:
+                return
+            index = self.nodeIndex()
+            now = self.reactor.seconds()
+
+            interval = self.queueDelayedProcessInterval
+            count = self.totalNumberOfNodes()
+            when = (now - (now % interval)) + (interval * (count + index))
+            delay = when - now
+            self._lostWorkCheckCall = self.reactor.callLater(
+                delay, self._lostWorkCheckLoop
+            )
+        self._currentWorkDeferred = scheduleNext
+
+
+    def startService(self):
+        """
+        Register ourselves with the database and establish all outgoing
+        connections to other servers in the cluster.
+        """
+        @inlineCallbacks
+        def startup(txn):
+            endpoint = TCP4ServerEndpoint(self.reactor, self.ampPort)
+            f = Factory()
+            f.buildProtocol = self.createPeerConnection
+            # If this fails, the failure mode is going to be ugly, just like all
+            # conflicted-port failures.  But, at least it won't proceed.
+            yield endpoint.listen(f)
+            yield Lock.exclusive(NodeInfo.table).on(txn)
+            nodes = yield self.activeNodes(txn)
+            selves = [node for node in nodes
+                      if ((node.hostname == self.hostname) and
+                          (node.port == self.ampPort))]
+            if selves:
+                self.thisProcess = selves[0]
+                nodes.remove(self.thisProcess)
+                yield self.thisProcess.update(pid=self.pid,
+                                              time=datetime.now())
+            else:
+                self.thisProcess = yield NodeInfo.create(
+                    txn, hostname=self.hostname, port=self.ampPort,
+                    pid=self.pid, time=datetime.now()
+                )
+            for node in nodes:
+                self._startConnectingTo(node)
+
+        self._startingUp = inTransaction(self.transactionFactory, startup)
+        @self._startingUp.addBoth
+        def done(result):
+            self._startingUp = None
+            return result
+
+
+    @inlineCallbacks
+    def stopService(self):
+        """
+        Stop this service, terminating any incoming or outgoing connections.
+        """
+        yield super(PeerConnectionPool, self).stopService()
+        if self._startingUp is not None:
+            yield self._startingUp
+        if self._listeningPortObject is not None:
+            yield self._listeningPortObject.stopListening()
+        if self._lostWorkCheckCall is not None:
+            self._lostWorkCheckCall.cancel()
+        if self._currentWorkDeferred is not None:
+            yield self._currentWorkDeferred
+        for peer in self.peers:
+            peer.transport.loseConnection()
+
+
+    def activeNodes(self, txn):
+        """
+        Load information about all other nodes.
+        """
+        return NodeInfo.all(txn)
+
+
+    def mapPeer(self, host, port, peer):
+        """
+        A peer has been identified as belonging to the given host/port
+        combination.  Disconnect any other peer that claims to be connected for
+        the same peer.
+        """
+        # if (host, port) in self.mappedPeers:
+            # TODO: think about this for race conditions
+            # self.mappedPeers.pop((host, port)).transport.loseConnection()
+        self.mappedPeers[(host, port)] = peer
+
+
+    def _startConnectingTo(self, node):
+        """
+        Start an outgoing connection to another master process.
+
+        @param node: a description of the master to connect to.
+        @type node: L{NodeInfo}
+        """
+        f = Factory()
+        f.buildProtocol = self.createPeerConnection
+        @passthru(node.endpoint(self.reactor).connect(f).addCallback)
+        def connected(proto):
+            self.mapPeer(node, proto)
+            proto.callRemote(IdentifyNode, self.thisProcess)
+
+
+    def createPeerConnection(self, addr):
+        return ConnectionFromPeerNode(self)
+
+
+
+class ImmediateWorkProposal(object):
+    """
+    Like L{WorkProposal}, but for items that must be executed immediately
+    because no real queue is set up yet.
+
+    @see: L{WorkProposal}, L{NullQueuer.enqueueWork}
+    """
+    def __init__(self, proposed, done):
+        self.proposed = proposed
+        self.done = done
+
+
+    def whenExecuted(self):
+        return _cloneDeferred(self.done)
+
+
+    def whenProposed(self):
+        return _cloneDeferred(self.proposed)
+
+
+    def whenCommitted(self):
+        return _cloneDeferred(self.done)
+
+
+
+class NullQueuer(object):
+    """
+    When work is enqueued with this queuer, it is just executed immediately,
+    within the same transaction.  While this is technically correct, it is not
+    very efficient.
+    """
+    implements(IQueuer)
+
+    def enqueueWork(self, txn, workItemType, **kw):
+        """
+        Do this work immediately.
+
+        @see: L{PeerConnectionPool.enqueueWork}
+
+        @return: a pseudo work proposal, since everything completes at the same
+            time.
+        @rtype: L{ImmediateWorkProposal}
+        """
+        proposed = Deferred()
+        done = Deferred()
+        @inlineCallbacks
+        def doit():
+            item = yield self.workItemType.create(self.txn, **self.kw)
+            proposed.callback(True)
+            yield item.delete()
+            yield item.doWork()
+        @txn.postCommit
+        def committed():
+            done.callback(True)
+        @txn.postAbort
+        def aborted():
+            tf = TransactionFailed()
+            done.errback(tf)
+            if not proposed.called:
+                proposed.errback(tf)
+        return ImmediateWorkProposal(proposed, done)
+
+
+

Modified: CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/test/test_adbapi2.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/test/test_adbapi2.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/test/test_adbapi2.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -19,19 +19,19 @@
 """
 
 from itertools import count
+from Queue import Empty
 
 from zope.interface.verify import verifyClass, verifyObject
 from zope.interface.declarations import implements
 
 from twisted.python.threadpool import ThreadPool
+from twisted.python.failure import Failure
 
 from twisted.trial.unittest import TestCase
 
-from twisted.internet.defer import execute
 from twisted.internet.task import Clock
 
 from twisted.internet.interfaces import IReactorThreads
-from twisted.internet.defer import Deferred
 
 from twisted.test.proto_helpers import StringTransport
 
@@ -45,6 +45,7 @@
 from twext.enterprise.adbapi2 import FailsafeException
 from twext.enterprise.adbapi2 import DEFAULT_PARAM_STYLE
 from twext.enterprise.adbapi2 import ConnectionPool
+from twext.internet.threadutils import ThreadHolder
 
 
 def resultOf(deferred, propagate=False):
@@ -65,6 +66,22 @@
 
 
 
+class AssertResultHelper(object):
+    """
+    Mixin for asserting about synchronous Deferred results.
+    """
+
+    def assertResultList(self, resultList, expected):
+        if not resultList:
+            self.fail("No result; Deferred didn't fire yet.")
+        else:
+            if isinstance(resultList[0], Failure):
+                resultList[0].raiseException()
+            else:
+                self.assertEqual(resultList, [expected])
+
+
+
 class Child(object):
     """
     An object with a L{Parent}, in its list of C{children}.
@@ -326,59 +343,75 @@
 
 
 
-class FakeThreadHolder(object):
+class FakeThreadHolder(ThreadHolder):
     """
-    Run things submitted to this ThreadHolder on the main thread, so that
+    Run things to submitted this ThreadHolder on the main thread, so that
     execution is easier to control.
     """
 
     def __init__(self, test):
+        super(FakeThreadHolder, self).__init__(self)
+        self.test = test
         self.started = False
         self.stopped = False
-        self.test = test
-        self.queue = []
+        self._workerIsRunning = False
 
 
     def start(self):
-        """
-        Mark this L{FakeThreadHolder} as not started.
-        """
         self.started = True
+        return super(FakeThreadHolder, self).start()
 
 
     def stop(self):
-        """
-        Mark this L{FakeThreadHolder} as stopped.
-        """
-        def stopped(nothing):
-            self.stopped = True
-        return self.submit(lambda : None).addCallback(stopped)
+        result = super(FakeThreadHolder, self).stop()
+        self.stopped = True
+        return result
 
 
-    def submit(self, work):
+    @property
+    def _q(self):
+        return self._q_
+
+
+    @_q.setter
+    def _q(self, newq):
+        if newq is not None:
+            oget = newq.get
+            newq.get = lambda: oget(timeout=0)
+            oput = newq.put
+            def putit(x):
+                p = oput(x)
+                if not self.test.paused:
+                    self.flush()
+                return p
+            newq.put = putit
+        self._q_ = newq
+
+
+    def callFromThread(self, f, *a, **k):
+        result = f(*a, **k)
+        return result
+
+
+    def callInThread(self, f, *a, **k):
         """
-        Call the function (or queue it)
+        This should be called only once, to start the worker function that
+        dedicates a thread to this L{ThreadHolder}.
         """
-        if self.test.paused:
-            d = Deferred()
-            self.queue.append((d, work))
-            return d
-        else:
-            return execute(work)
+        self._workerIsRunning = True
 
 
     def flush(self):
         """
         Fire all deferreds previously returned from submit.
         """
-        self.queue, queue = [], self.queue
-        for (d, work) in queue:
-            try:
-                result = work()
-            except:
-                d.errback()
+        try:
+            while self._workerIsRunning and self._qpull():
+                pass
             else:
-                d.callback(result)
+                self._workerIsRunning = False
+        except Empty:
+            pass
 
 
 
@@ -456,7 +489,19 @@
         self.assertEquals(threadpool.max, defaultMax)
 
 
+    def test_isRunning(self):
+        """
+        L{ConnectionPool.startService} should set its C{running} attribute to
+        true.
+        """
+        pool = ConnectionPool(None)
+        pool.reactor = ClockWithThreads()
+        self.assertEquals(pool.running, False)
+        pool.startService()
+        self.assertEquals(pool.running, True)
 
+
+
 class ConnectionPoolHelper(object):
     """
     Connection pool setting-up facilities for tests that need a
@@ -525,7 +570,7 @@
 
 
 
-class ConnectionPoolTests(ConnectionPoolHelper, TestCase):
+class ConnectionPoolTests(ConnectionPoolHelper, TestCase, AssertResultHelper):
     """
     Tests for L{ConnectionPool}.
     """
@@ -587,6 +632,7 @@
         self.assertEquals(holder.started, True)
         self.assertEquals(holder.stopped, False)
         self.pool.stopService()
+        self.assertEquals(self.pool.running, False)
         self.assertEquals(len(self.holders), 1)
         self.assertEquals(holder.started, True)
         self.assertEquals(holder.stopped, True)
@@ -634,7 +680,7 @@
         self.assertEquals(len(errors), 1)
         stopd = []
         self.pool.stopService().addBoth(stopd.append)
-        self.assertEquals([None], stopd)
+        self.assertResultList(stopd, None)
         self.assertEquals(self.clock.calls, [])
         [holder] = self.holders
         self.assertEquals(holder.started, True)
@@ -643,8 +689,8 @@
 
     def test_shutdownDuringAttemptSuccess(self):
         """
-        If L{ConnectionPool.stopService} is called while a connection attempt is
-        outstanding, the resulting L{Deferred} won't be fired until the
+        If L{ConnectionPool.stopService} is called while a connection attempt
+        is outstanding, the resulting L{Deferred} won't be fired until the
         connection attempt has finished; in this case, succeeded.
         """
         self.pauseHolders()
@@ -653,7 +699,7 @@
         self.pool.stopService().addBoth(stopd.append)
         self.assertEquals(stopd, [])
         self.flushHolders()
-        self.assertEquals(stopd, [None])
+        self.assertResultList(stopd, None)
         [holder] = self.holders
         self.assertEquals(holder.started, True)
         self.assertEquals(holder.stopped, True)
@@ -661,8 +707,8 @@
 
     def test_shutdownDuringAttemptFailed(self):
         """
-        If L{ConnectionPool.stopService} is called while a connection attempt is
-        outstanding, the resulting L{Deferred} won't be fired until the
+        If L{ConnectionPool.stopService} is called while a connection attempt
+        is outstanding, the resulting L{Deferred} won't be fired until the
         connection attempt has finished; in this case, failed.
         """
         self.factory.defaultFail()
@@ -674,7 +720,7 @@
         self.flushHolders()
         errors = self.flushLoggedErrors(FakeConnectionError)
         self.assertEquals(len(errors), 1)
-        self.assertEquals(stopd, [None])
+        self.assertResultList(stopd, None)
         [holder] = self.holders
         self.assertEquals(holder.started, True)
         self.assertEquals(holder.stopped, True)
@@ -699,7 +745,7 @@
         self.assertEquals(stopResult, [])
         self.flushHolders()
         #self.assertEquals(abortResult, [None])
-        self.assertEquals(stopResult, [None])
+        self.assertResultList(stopResult, None)
 
 
     def test_stopServiceWithSpooled(self):
@@ -845,10 +891,10 @@
         abortResult = self.resultOf(it.abort())
 
         # steal it from the queue so we can do it out of order
-        d, work = self.holders[0].queue.pop()
+        d, work = self.holders[0]._q.get()
         # that should be the only work unit so don't continue if something else
         # got in there
-        self.assertEquals(self.holders[0].queue, [])
+        self.assertEquals(list(self.holders[0]._q.queue), [])
         self.assertEquals(len(self.holders), 1)
         self.flushHolders()
         stopResult = self.resultOf(self.pool.stopService())

Copied: CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/test/test_fixtures.py (from rev 9756, CalendarServer/trunk/twext/enterprise/test/test_fixtures.py)
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/test/test_fixtures.py	                        (rev 0)
+++ CalendarServer/branches/users/gaya/inviteclean/twext/enterprise/test/test_fixtures.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -0,0 +1,51 @@
+##
+# Copyright (c) 2012 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+"""
+Tests for L{twext.enterprise.fixtures}.
+
+Quis custodiet ipsos custodes?  This module, that's who.
+"""
+
+from twext.enterprise.fixtures import buildConnectionPool
+
+from twisted.trial.unittest import TestCase
+from twisted.trial.reporter import TestResult
+from twext.enterprise.adbapi2 import ConnectionPool
+
+class PoolTests(TestCase):
+    """
+    Tests for fixtures that create a connection pool.
+    """
+
+    def test_buildConnectionPool(self):
+        """
+        L{buildConnectionPool} returns a L{ConnectionPool} which will be
+        running only for the duration of the test.
+        """
+        collect = []
+        class SampleTest(TestCase):
+            def setUp(self):
+                self.pool = buildConnectionPool(self)
+            def test_sample(self):
+                collect.append(self.pool.running)
+            def tearDown(self):
+                collect.append(self.pool.running)
+        r = TestResult()
+        t = SampleTest("test_sample")
+        t.run(r)
+        self.assertIsInstance(t.pool, ConnectionPool)
+        self.assertEqual([True, False], collect)

Modified: CalendarServer/branches/users/gaya/inviteclean/twext/internet/threadutils.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twext/internet/threadutils.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twext/internet/threadutils.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -45,22 +45,32 @@
         """
         Worker function which runs in a non-reactor thread.
         """
-        while True:
-            work = self._q.get()
-            if work is _DONE:
-                def finishStopping():
-                    self._state = _STATE_STOPPED
-                    self._q = None
-                    s = self._stopper
-                    self._stopper = None
-                    s.callback(None)
-                self._reactor.callFromThread(finishStopping)
-                return
-            self._oneWorkUnit(*work)
+        while self._qpull():
+            pass
 
 
+    def _qpull(self):
+        """
+        Pull one item off the queue and react appropriately.
+
+        Return whether or not to keep going.
+        """
+        work = self._q.get()
+        if work is _DONE:
+            def finishStopping():
+                self._state = _STATE_STOPPED
+                self._q = None
+                s = self._stopper
+                self._stopper = None
+                s.callback(None)
+            self._reactor.callFromThread(finishStopping)
+            return False
+        self._oneWorkUnit(*work)
+        return True
+
+
     def _oneWorkUnit(self, deferred, instruction):
-        try: 
+        try:
             result = instruction()
         except:
             etype, evalue, etb = sys.exc_info()
@@ -80,6 +90,8 @@
 
         @return: L{Deferred} that fires with the result of L{work}
         """
+        if self._state != _STATE_RUNNING:
+            raise RuntimeError("not running")
         d = Deferred()
         self._q.put((d, work))
         return d

Modified: CalendarServer/branches/users/gaya/inviteclean/twext/python/sendmsg.c
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twext/python/sendmsg.c	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twext/python/sendmsg.c	2012-08-30 22:48:54 UTC (rev 9757)
@@ -113,7 +113,7 @@
 
     int fd;
     int flags = 0;
-    Py_ssize_t sendmsg_result;
+    Py_ssize_t sendmsg_result, iovec_length;
     struct msghdr message_header;
     struct iovec iov[1];
     PyObject *ancillary = NULL;
@@ -123,12 +123,14 @@
             args, keywds, "it#|iO:sendmsg", kwlist,
             &fd,
             &iov[0].iov_base,
-            &iov[0].iov_len,
+            &iovec_length,
             &flags,
             &ancillary)) {
         return NULL;
     }
 
+    iov[0].iov_len = iovec_length;
+
     message_header.msg_name = NULL;
     message_header.msg_namelen = 0;
 
@@ -220,7 +222,8 @@
         /* Unpack the tuples into the control message. */
         struct cmsghdr *control_message = CMSG_FIRSTHDR(&message_header);
         while ( (item = PyIter_Next(iterator)) ) {
-            int data_len, type, level;
+            int type, level;
+            Py_ssize_t data_len;
             size_t data_size;
             unsigned char *data, *cmsg_data;
 
@@ -250,7 +253,7 @@
                 free(message_header.msg_control);
 
                 PyErr_Format(PyExc_OverflowError,
-                             "CMSG_LEN(%d) > SOCKLEN_MAX", data_len);
+                             "CMSG_LEN(%zd) > SOCKLEN_MAX", data_len);
 
                 return NULL;
             }

Modified: CalendarServer/branches/users/gaya/inviteclean/twext/web2/dav/resource.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twext/web2/dav/resource.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twext/web2/dav/resource.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -76,6 +76,7 @@
 from twext.web2.dav.noneprops import NonePropertyStore
 from twext.web2.dav.util import unimplemented, parentForURL, joinURL
 from twext.web2.dav.auth import PrincipalCredentials
+from twistedcaldav import customxml
 
 
 log = Logger()
@@ -887,6 +888,7 @@
         result.append(element.Report(element.PrincipalMatch(),))
         result.append(element.Report(element.PrincipalPropertySearch(),))
         result.append(element.Report(element.ExpandProperty(),))
+        result.append(element.Report(customxml.CalendarServerPrincipalSearch(),))
         return result
 
     ##
@@ -1736,10 +1738,11 @@
         d.addCallback(cache)
         return d
 
+    @inlineCallbacks
     def principalIsGroupMember(self, principal1, principal2, request):
         """
         Check whether one principal is a group member of another.
-        
+
         @param principal1: C{str} principalURL for principal to test.
         @param principal2: C{str} principalURL for possible group
             principal to test against.
@@ -1747,25 +1750,15 @@
         @return: L{Deferred} with result C{True} if principal1 is a
             member of principal2, C{False} otherwise
         """
-        def gotGroup(group):
-            # Get principal resource for principal2
-            if group and isinstance(group, DAVPrincipalResource):
-                def gotMembers(members):
-                    for member in members:
-                        if member.principalURL() == principal1:
-                            return True
-                    return False
+        resource1 = yield request.locateResource(principal1)
+        resource2 = yield request.locateResource(principal2)
 
-                d = group.expandedGroupMembers()
-                d.addCallback(gotMembers)
-                return d
+        if resource2 and isinstance(resource2, DAVPrincipalResource):
+            isContained = yield resource2.containsPrincipal(resource1)
+            returnValue(isContained)
+        returnValue(False)
 
-            return False
         
-        d = request.locateResource(principal2)
-        d.addCallback(gotGroup)
-        return d
-        
     def validPrincipal(self, ace_principal, request):
         """
         Check whether the supplied principal is valid for this resource.
@@ -2460,7 +2453,20 @@
             )
             return d
 
+    @inlineCallbacks
+    def containsPrincipal(self, principal):
+        """
+        Is the given principal contained within our expanded group membership?
 
+        @param principal: The principal to check
+        @type principal: L{DirectoryCalendarPrincipalResource}
+        @return: True if principal is a member, False otherwise
+        @rtype: C{boolean}
+        """
+        members = yield self.expandedGroupMembers()
+        returnValue(principal in members)
+
+
 class DAVPrincipalCollectionResource (DAVResource):
     """
     WebDAV principal collection resource.  (RFC 3744, section 5.8)

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/customxml.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/customxml.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/customxml.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -54,6 +54,11 @@
     "calendarserver-principal-property-search",
 )
 
+calendarserver_principal_search_compliance = (
+    "calendarserver-principal-search",
+)
+
+
 calendarserver_sharing_compliance = (
     "calendarserver-sharing",
 )
@@ -1349,6 +1354,32 @@
     namespace = "http://apple.com/ns/ical/"
     name = "calendar-color"
 
+#
+# calendarserver-principal-search REPORT
+#
+
+ at registerElement
+class CalendarServerPrincipalSearchToken (WebDAVTextElement):
+    """
+    Contains a search token.
+    """
+    namespace = calendarserver_namespace
+    name = "search-token"
+
+ at registerElement
+class CalendarServerPrincipalSearch (WebDAVElement):
+
+    namespace = calendarserver_namespace
+    name = "calendarserver-principal-search"
+
+    allowed_children = {
+        (calendarserver_namespace, "search-token"          ): (0, None),
+        (calendarserver_namespace, "limit"                 ): (0, 1),
+        (dav_namespace, "prop"                             ): (0, 1),
+        (dav_namespace, "apply-to-principal-collection-set"): (0, 1),
+    }
+    allowed_attributes = { "context" : False }
+
 ##
 # Extensions to ResourceType
 ##

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/database.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/database.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/database.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -107,31 +107,37 @@
             #
             # Create CALDAV table if needed
 
-            test = (yield self._test_schema_table())
-            if test:
-                version = (yield self._db_value_for_sql("select VALUE from CALDAV where KEY = 'SCHEMA_VERSION'"))
-                dbtype = (yield self._db_value_for_sql("select VALUE from CALDAV where KEY = 'TYPE'"))
+            try:
+                test = (yield self._test_schema_table())
+                if test:
+                    version = (yield self._db_value_for_sql("select VALUE from CALDAV where KEY = 'SCHEMA_VERSION'"))
+                    dbtype = (yield self._db_value_for_sql("select VALUE from CALDAV where KEY = 'TYPE'"))
 
-                if (version != self._db_version()) or (dbtype != self._db_type()):
+                    if (version != self._db_version()) or (dbtype != self._db_type()):
 
-                    if dbtype != self._db_type():
-                        log.err("Database %s has different type (%s vs. %s)"
-                                % (self.dbID, dbtype, self._db_type()))
+                        if dbtype != self._db_type():
+                            log.err("Database %s has different type (%s vs. %s)"
+                                    % (self.dbID, dbtype, self._db_type()))
 
-                        # Delete this index and start over
-                        yield self._db_remove()
-                        yield self._db_init()
+                            # Delete this index and start over
+                            yield self._db_remove()
+                            yield self._db_init()
 
-                    elif version != self._db_version():
-                        log.err("Database %s has different schema (v.%s vs. v.%s)"
-                                % (self.dbID, version, self._db_version()))
-                        
-                        # Upgrade the DB
-                        yield self._db_upgrade(version)
+                        elif version != self._db_version():
+                            log.err("Database %s has different schema (v.%s vs. v.%s)"
+                                    % (self.dbID, version, self._db_version()))
+                            
+                            # Upgrade the DB
+                            yield self._db_upgrade(version)
 
-            else:
-                yield self._db_init()
-            self.initialized = True
+                else:
+                    yield self._db_init()
+                self.initialized = True
+            except:
+                # Clean up upon error so we don't end up leaking threads
+                self.pool.close()
+                self.pool = None
+                raise
 
     def close(self):
         

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/datafilters/peruserdata.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/datafilters/peruserdata.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/datafilters/peruserdata.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -223,7 +223,6 @@
             peruser = Component(PerUserDataFilter.PERUSER_COMPONENT)
             peruser.addProperty(Property("UID", ical.resourceUID()))
             peruser.addProperty(Property(PerUserDataFilter.PERUSER_UID, self.uid))
-            ical.addComponent(peruser)
             return peruser
         
         components = tuple(ical.subcomponents())
@@ -233,21 +232,18 @@
         for component in components:
             if component.name() == "VTIMEZONE":
                 continue
+            rid = component.getRecurrenceIDUTC()
 
-            def init_perinstance_component():
-                peruser = Component(PerUserDataFilter.PERINSTANCE_COMPONENT)
-                rid = component.getRecurrenceIDUTC()
-                perinstance_components[rid] = peruser
-                return peruser
+            perinstance_component = Component(PerUserDataFilter.PERINSTANCE_COMPONENT) if self.uid else None
+            perinstance_id_different = False
 
-            perinstance_component = init_perinstance_component() if self.uid else None
-            
             # Transfer per-user properties from main component to per-instance component
             for property in tuple(component.properties()):
                 if property.name() in PerUserDataFilter.PERUSER_PROPERTIES or property.name().startswith("X-"):
                     if self.uid:
                         perinstance_component.addProperty(property)
                     component.removeProperty(property)
+                    perinstance_id_different = True
             
             # Transfer per-user components from main component to per-instance component
             for subcomponent in tuple(component.subcomponents()):
@@ -255,19 +251,29 @@
                     if self.uid:
                         perinstance_component.addComponent(subcomponent)
                     component.removeComponent(subcomponent)
+                    perinstance_id_different = True
             
+            if perinstance_id_different and perinstance_component:
+                perinstance_components[rid] = perinstance_component
+            
         if self.uid:
             # Add unique per-instance components into the per-user component
+            peruser_component_different = False
             master_perinstance = perinstance_components.get(None)
             if master_perinstance:
                 peruser_component.addComponent(master_perinstance)
+                peruser_component_different = True
             for rid, perinstance in perinstance_components.iteritems():
                 if rid is None:
                     continue
                 if master_perinstance is None or perinstance != master_perinstance:
                     perinstance.addProperty(Property("RECURRENCE-ID", rid))
                     peruser_component.addComponent(perinstance)
-    
+                    peruser_component_different = True
+            
+            if peruser_component_different:
+                ical.addComponent(peruser_component)
+
             self._compactInstances(ical)
 
     def _compactInstances(self, ical):

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/datafilters/test/test_peruserdata.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/datafilters/test/test_peruserdata.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/datafilters/test/test_peruserdata.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -959,12 +959,6 @@
 DTSTAMP:20080601T120000Z
 ORGANIZER;CN=User 01:mailto:user1 at example.com
 END:VEVENT
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
 END:VCALENDAR
 """.replace("\n", "\r\n")
         
@@ -1123,12 +1117,6 @@
 DTSTAMP:20080601T120000Z
 ORGANIZER;CN=User 01:mailto:user1 at example.com
 END:VEVENT
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
 END:VCALENDAR
 """.replace("\n", "\r\n")
         
@@ -1420,8 +1408,6 @@
 UID:12345-67890
 X-CALENDARSERVER-PERUSER-UID:user01
 BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-BEGIN:X-CALENDARSERVER-PERINSTANCE
 RECURRENCE-ID:20080602T120000Z
 TRANSP:TRANSPARENT
 BEGIN:VALARM
@@ -1716,12 +1702,6 @@
 DTSTAMP:20080601T120000Z
 ORGANIZER;CN=User 01:mailto:user1 at example.com
 END:VEVENT
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
 END:VCALENDAR
 """.replace("\n", "\r\n")
         olddata = """BEGIN:VCALENDAR
@@ -1993,12 +1973,6 @@
 END:VEVENT
 BEGIN:X-CALENDARSERVER-PERUSER
 UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
 X-CALENDARSERVER-PERUSER-UID:user02
 BEGIN:X-CALENDARSERVER-PERINSTANCE
 TRANSP:OPAQUE
@@ -2048,12 +2022,6 @@
 ORGANIZER;CN=User 01:mailto:user1 at example.com
 RRULE:FREQ=DAILY
 END:VEVENT
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
 END:VCALENDAR
 """.replace("\n", "\r\n")
         olddata = """BEGIN:VCALENDAR
@@ -2125,12 +2093,6 @@
 DTSTAMP:20080601T120000Z
 ORGANIZER;CN=User 01:mailto:user1 at example.com
 END:VEVENT
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
 END:VCALENDAR
 """.replace("\n", "\r\n")
         olddata = """BEGIN:VCALENDAR
@@ -2182,13 +2144,6 @@
 DTSTAMP:20080601T120000Z
 ORGANIZER;CN=User 01:mailto:user1 at example.com
 END:VEVENT
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-RECURRENCE-ID:20080602T120000Z
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
 END:VCALENDAR
 """.replace("\n", "\r\n")
         olddata = """BEGIN:VCALENDAR
@@ -2900,12 +2855,6 @@
 END:VEVENT
 BEGIN:X-CALENDARSERVER-PERUSER
 UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
 X-CALENDARSERVER-PERUSER-UID:user02
 BEGIN:X-CALENDARSERVER-PERINSTANCE
 TRANSP:OPAQUE
@@ -3013,12 +2962,6 @@
 END:VEVENT
 BEGIN:X-CALENDARSERVER-PERUSER
 UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
 X-CALENDARSERVER-PERUSER-UID:user02
 BEGIN:X-CALENDARSERVER-PERINSTANCE
 TRANSP:OPAQUE
@@ -3105,13 +3048,6 @@
 DTSTAMP:20080601T120000Z
 ORGANIZER;CN=User 01:mailto:user1 at example.com
 END:VEVENT
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-RECURRENCE-ID:20080602T120000Z
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
 END:VCALENDAR
 """.replace("\n", "\r\n")
         
@@ -3151,12 +3087,6 @@
 ORGANIZER;CN=User 01:mailto:user1 at example.com
 RRULE:FREQ=DAILY
 END:VEVENT
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
 END:VCALENDAR
 """.replace("\n", "\r\n")
         olddata = """BEGIN:VCALENDAR
@@ -3209,12 +3139,6 @@
 ORGANIZER;CN=User 01:mailto:user1 at example.com
 RRULE:FREQ=DAILY
 END:VEVENT
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
 END:VCALENDAR
 """.replace("\n", "\r\n")
         olddata = """BEGIN:VCALENDAR
@@ -3275,12 +3199,6 @@
 DTSTAMP:20080601T120000Z
 ORGANIZER;CN=User 01:mailto:user1 at example.com
 END:VEVENT
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
 END:VCALENDAR
 """.replace("\n", "\r\n")
         olddata = """BEGIN:VCALENDAR
@@ -3954,12 +3872,6 @@
 END:VEVENT
 BEGIN:X-CALENDARSERVER-PERUSER
 UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
 X-CALENDARSERVER-PERUSER-UID:user02
 BEGIN:X-CALENDARSERVER-PERINSTANCE
 TRANSP:OPAQUE
@@ -4067,12 +3979,6 @@
 END:VEVENT
 BEGIN:X-CALENDARSERVER-PERUSER
 UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
 X-CALENDARSERVER-PERUSER-UID:user02
 BEGIN:X-CALENDARSERVER-PERINSTANCE
 TRANSP:OPAQUE
@@ -4160,12 +4066,6 @@
 DTSTAMP:20080601T120000Z
 ORGANIZER;CN=User 01:mailto:user1 at example.com
 END:VEVENT
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
 END:VCALENDAR
 """.replace("\n", "\r\n")
         
@@ -4205,12 +4105,6 @@
 ORGANIZER;CN=User 01:mailto:user1 at example.com
 RRULE:FREQ=DAILY
 END:VEVENT
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
 END:VCALENDAR
 """.replace("\n", "\r\n")
         olddata = """BEGIN:VCALENDAR
@@ -4492,12 +4386,6 @@
 END:VEVENT
 BEGIN:X-CALENDARSERVER-PERUSER
 UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
 X-CALENDARSERVER-PERUSER-UID:user02
 BEGIN:X-CALENDARSERVER-PERINSTANCE
 TRANSP:OPAQUE
@@ -4595,12 +4483,6 @@
 END:VEVENT
 BEGIN:X-CALENDARSERVER-PERUSER
 UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
 X-CALENDARSERVER-PERUSER-UID:user02
 BEGIN:X-CALENDARSERVER-PERINSTANCE
 TRANSP:OPAQUE
@@ -4670,12 +4552,6 @@
 DTSTAMP:20080601T120000Z
 ORGANIZER;CN=User 01:mailto:user1 at example.com
 END:VEVENT
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
 END:VCALENDAR
 """.replace("\n", "\r\n")
         olddata = """BEGIN:VCALENDAR
@@ -5132,12 +5008,6 @@
 END:VEVENT
 BEGIN:X-CALENDARSERVER-PERUSER
 UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
 X-CALENDARSERVER-PERUSER-UID:user02
 BEGIN:X-CALENDARSERVER-PERINSTANCE
 TRANSP:OPAQUE
@@ -5187,13 +5057,6 @@
 DTSTAMP:20080601T120000Z
 ORGANIZER;CN=User 01:mailto:user1 at example.com
 END:VEVENT
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-RECURRENCE-ID:20080602T120000Z
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
 END:VCALENDAR
 """.replace("\n", "\r\n")
         olddata = """BEGIN:VCALENDAR
@@ -5468,13 +5331,6 @@
 DTSTAMP:20080601T120000Z
 ORGANIZER;CN=User 01:mailto:user1 at example.com
 END:VEVENT
-BEGIN:X-CALENDARSERVER-PERUSER
-UID:12345-67890
-X-CALENDARSERVER-PERUSER-UID:user01
-BEGIN:X-CALENDARSERVER-PERINSTANCE
-RECURRENCE-ID:20080602T120000Z
-END:X-CALENDARSERVER-PERINSTANCE
-END:X-CALENDARSERVER-PERUSER
 END:VCALENDAR
 """.replace("\n", "\r\n")
         

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/aggregate.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/aggregate.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/aggregate.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -148,6 +148,18 @@
     def recordWithCalendarUserAddress(self, address):
         return self._queryAll("recordWithCalendarUserAddress", address)
 
+    def recordWithCachedGroupsAlias(self, recordType, alias):
+        """
+        @param recordType: the type of the record to look up.
+        @param alias: the cached-groups alias of the record to look up.
+        @type alias: C{str}
+
+        @return: a deferred L{IDirectoryRecord} with the given cached-groups
+            alias, or C{None} if no such record is found.
+        """
+        service = self.serviceForRecordType(recordType)
+        return service.recordWithCachedGroupsAlias(recordType, alias)
+
     @inlineCallbacks
     def recordsMatchingFields(self, fields, operand="or", recordType=None):
 
@@ -164,6 +176,41 @@
 
         returnValue(itertools.chain(*generators))
 
+
+    @inlineCallbacks
+    def recordsMatchingTokens(self, tokens, context=None):
+        """
+        @param tokens: The tokens to search on
+        @type tokens: C{list} of C{str} (utf-8 bytes)
+        @param context: An indication of what the end user is searching
+            for; "attendee", "location", or None
+        @type context: C{str}
+        @return: a deferred sequence of L{IDirectoryRecord}s which
+            match the given tokens and optional context.
+
+        Each token is searched for within each record's full name and
+        email address; if each token is found within a record that
+        record is returned in the results.
+
+        If context is None, all record types are considered.  If
+        context is "location", only locations are considered.  If
+        context is "attendee", only users, groups, and resources
+        are considered.
+
+        Combine the results from the sub-services.
+        """
+
+        services = set(self._recordTypes.values())
+
+        generators = []
+        for service in services:
+            generator = (yield service.recordsMatchingTokens(tokens,
+                context=context))
+            generators.append(generator)
+
+        returnValue(itertools.chain(*generators))
+
+
     def getGroups(self, guids):
         """
         Returns a set of group records for the list of guids passed in.  For

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/appleopendirectory.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/appleopendirectory.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/appleopendirectory.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -381,7 +381,7 @@
 
         guids = set()
 
-        self.log_info("Looking up which groups %s is a member of" % (guid,))
+        self.log_debug("Looking up which groups %s is a member of" % (guid,))
         try:
             self.log_debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
                 self.directory,
@@ -442,7 +442,7 @@
             if recordGUID:
                 guids.add(recordGUID)
 
-        self.log_info("%s is a member of %d groups" % (guid, len(guids)))
+        self.log_debug("%s is a member of %d groups" % (guid, len(guids)))
 
         return guids
 
@@ -530,6 +530,194 @@
         else:
             return ()
 
+    def recordsMatchingTokens(self, tokens, context=None, lookupMethod=None):
+        """
+        @param tokens: The tokens to search on
+        @type tokens: C{list} of C{str} (utf-8 bytes)
+        @param context: An indication of what the end user is searching
+            for; "attendee", "location", or None
+        @type context: C{str}
+        @return: a deferred sequence of L{IDirectoryRecord}s which
+            match the given tokens and optional context.
+
+        Each token is searched for within each record's full name and
+        email address; if each token is found within a record that
+        record is returned in the results.
+
+        If context is None, all record types are considered.  If
+        context is "location", only locations are considered.  If
+        context is "attendee", only users, groups, and resources
+        are considered.
+        """
+
+        if lookupMethod is None:
+            lookupMethod=self.odModule.queryRecordsWithAttributes_list
+
+        def collectResults(results):
+            self.log_debug("Got back %d records from OD" % (len(results),))
+            for key, value in results:
+                self.log_debug("OD result: %s %s" % (key, value))
+                try:
+                    recordNodeName = value.get(
+                        dsattributes.kDSNAttrMetaNodeLocation)
+                    recordShortNames = self._uniqueTupleFromAttribute(
+                        value.get(dsattributes.kDSNAttrRecordName))
+
+                    recordGUID = value.get(dsattributes.kDS1AttrGeneratedUID)
+
+                    recordType = value.get(dsattributes.kDSNAttrRecordType)
+                    if isinstance(recordType, list):
+                        recordType = recordType[0]
+                    if not recordType:
+                        continue
+                    recordType = self._fromODRecordTypes[recordType]
+
+                    # Skip if group restriction is in place and guid is not
+                    # a member (but don't skip any groups)
+                    if (recordType != self.recordType_groups and
+                        self.restrictedGUIDs is not None):
+                        if str(recordGUID) not in self.restrictedGUIDs:
+                            continue
+
+                    recordAuthIDs = self._setFromAttribute(
+                        value.get(dsattributes.kDSNAttrAltSecurityIdentities))
+                    recordFullName = value.get(
+                        dsattributes.kDS1AttrDistinguishedName)
+                    recordFirstName = value.get(dsattributes.kDS1AttrFirstName)
+                    recordLastName = value.get(dsattributes.kDS1AttrLastName)
+                    recordEmailAddresses = self._setFromAttribute(
+                        value.get(dsattributes.kDSNAttrEMailAddress),
+                        lower=True)
+
+                    # Special case for groups, which have members.
+                    if recordType == self.recordType_groups:
+                        memberGUIDs = value.get(dsattributes.kDSNAttrGroupMembers)
+                        if memberGUIDs is None:
+                            memberGUIDs = ()
+                        elif type(memberGUIDs) is str:
+                            memberGUIDs = (memberGUIDs,)
+                        nestedGUIDs = value.get(dsattributes.kDSNAttrNestedGroups)
+                        if nestedGUIDs:
+                            if type(nestedGUIDs) is str:
+                                nestedGUIDs = (nestedGUIDs,)
+                            memberGUIDs += tuple(nestedGUIDs)
+                        else:
+                            nestedGUIDs = ()
+                    else:
+                        nestedGUIDs = ()
+                        memberGUIDs = ()
+
+                    # Create records but don't store them in our index or
+                    # send them to memcached, because these are transient,
+                    # existing only so we can create principal resource
+                    # objects that are used to generate the REPORT result.
+
+                    record = OpenDirectoryRecord(
+                        service               = self,
+                        recordType            = recordType,
+                        guid                  = recordGUID,
+                        nodeName              = recordNodeName,
+                        shortNames            = recordShortNames,
+                        authIDs               = recordAuthIDs,
+                        fullName              = recordFullName,
+                        firstName             = recordFirstName,
+                        lastName              = recordLastName,
+                        emailAddresses        = recordEmailAddresses,
+                        memberGUIDs           = memberGUIDs,
+                        nestedGUIDs           = nestedGUIDs,
+                        extProxies            = (),
+                        extReadOnlyProxies    = (),
+                    )
+
+                    # (Copied from below)
+                    # Look up augment information
+                    # TODO: this needs to be deferred but for now we hard code
+                    # the deferred result because we know it is completing
+                    # immediately.
+                    if self.augmentService is not None:
+                        d = self.augmentService.getAugmentRecord(record.guid,
+                            recordType)
+                        d.addCallback(lambda x:record.addAugmentInformation(x))
+
+                    yield record
+
+                except KeyError:
+                    pass
+
+        def multiQuery(directory, queries, recordTypes, attrs):
+            byGUID = { }
+            sets = []
+
+            caseInsensitive = True
+            for compound in queries:
+                compound = compound.generate()
+
+                self.log_debug("Calling OD: Types %s, Query %s" %
+                    (recordTypes, compound))
+
+                try:
+                    queryResults = lookupMethod(
+                        directory,
+                        compound,
+                        caseInsensitive,
+                        recordTypes,
+                        attrs,
+                    )
+
+                    newSet = set()
+                    for recordName, data in queryResults:
+                        guid = data.get(dsattributes.kDS1AttrGeneratedUID, None)
+                        if guid:
+                            byGUID[guid] = (recordName, data)
+                            newSet.add(guid)
+
+                    sets.append(newSet)
+
+                except self.odModule.ODError, e:
+                    self.log_error("Ignoring OD Error: %d %s" %
+                        (e.message[1], e.message[0]))
+                    continue
+
+            results = []
+            for guid in set.intersection(*sets):
+                recordName, data = byGUID.get(guid, None)
+                if data is not None:
+                    results.append((data[dsattributes.kDSNAttrRecordName], data))
+            return results
+
+        queries = buildQueriesFromTokens(tokens, self._ODFields)
+
+        # Starting with the record types corresponding to the context...
+        recordTypes = self.recordTypesForSearchContext(context)
+        # ...limit to the types this service supports...
+        recordTypes = [r for r in recordTypes if r in self.recordTypes()]
+        # ...and map those to OD representations...
+        recordTypes = [self._toODRecordTypes[r] for r in recordTypes]
+
+        if recordTypes:
+            results = multiQuery(
+                self.directory,
+                queries,
+                recordTypes,
+                [
+                    dsattributes.kDS1AttrGeneratedUID,
+                    dsattributes.kDSNAttrRecordName,
+                    dsattributes.kDSNAttrAltSecurityIdentities,
+                    dsattributes.kDSNAttrRecordType,
+                    dsattributes.kDS1AttrDistinguishedName,
+                    dsattributes.kDS1AttrFirstName,
+                    dsattributes.kDS1AttrLastName,
+                    dsattributes.kDSNAttrEMailAddress,
+                    dsattributes.kDSNAttrMetaNodeLocation,
+                    dsattributes.kDSNAttrGroupMembers,
+                    dsattributes.kDSNAttrNestedGroups,
+                ]
+            )
+            return succeed(collectResults(results))
+        else:
+            return succeed([])
+
+
     def recordsMatchingFields(self, fields, operand="or", recordType=None,
         lookupMethod=None):
 
@@ -1047,7 +1235,7 @@
 
         loop = 1
         while valuesToFetch:
-            self.log_info("getGroups loop %d" % (loop,))
+            self.log_debug("getGroups loop %d" % (loop,))
 
             results = []
 
@@ -1055,12 +1243,12 @@
                 fields = []
                 for value in batch:
                     fields.append(["guid", value, False, "equals"])
-                self.log_info("getGroups fetching batch of %d" %
+                self.log_debug("getGroups fetching batch of %d" %
                     (len(fields),))
                 result = list((yield self.recordsMatchingFields(fields,
                     recordType=self.recordType_groups)))
                 results.extend(result)
-                self.log_info("getGroups got back batch of %d for subtotal of %d" %
+                self.log_debug("getGroups got back batch of %d for subtotal of %d" %
                     (len(result), len(results)))
 
             # Reset values for next iteration
@@ -1074,7 +1262,7 @@
                 # record.nestedGUIDs() contains the sub groups of this group
                 for memberGUID in record.nestedGUIDs():
                     if memberGUID not in recordsByGUID:
-                        self.log_info("getGroups group %s contains group %s" %
+                        self.log_debug("getGroups group %s contains group %s" %
                             (record.guid, memberGUID))
                         valuesToFetch.add(memberGUID)
 
@@ -1101,7 +1289,36 @@
     return queries
 
 
+def buildQueriesFromTokens(tokens, mapping):
+    """
+    OD /Local doesn't support nested complex queries, so create a list of
+    complex queries that will be ANDed together in recordsMatchingTokens()
 
+    @param tokens: The tokens to search on
+    @type tokens: C{list} of C{str}
+    @param mapping: The mapping of DirectoryRecord attributes to OD attributes
+    @type mapping: C{dict}
+    @return: A list of expression objects
+    @type: C{list}
+    """
+
+    if len(tokens) == 0:
+        return None
+
+    fields = ["fullName", "emailAddresses"]
+
+    results = []
+    for token in tokens:
+        queries = []
+        for field in fields:
+            ODField = mapping[field]['odField']
+            query = dsquery.match(ODField, token, "contains")
+            queries.append(query)
+        results.append(dsquery.expression(dsquery.expression.OR, queries))
+    return results
+
+
+
 class OpenDirectoryRecord(CachingDirectoryRecord):
     """
     OpenDirectory implementation of L{IDirectoryRecord}.

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/calendaruserproxy.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/calendaruserproxy.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/calendaruserproxy.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -404,6 +404,24 @@
         ])
         return d
 
+    @inlineCallbacks
+    def containsPrincipal(self, principal):
+        """
+        Uses proxyFor information to turn the "contains principal" question around;
+        rather than expanding this principal's groups to see if the other principal
+        is a member, ask the other principal if they are a proxy for this principal's
+        parent resource, since this principal is a proxy principal.
+
+        @param principal: The principal to check
+        @type principal: L{DirectoryCalendarPrincipalResource}
+        @return: True if principal is a proxy (of the correct type) of our parent
+        @rtype: C{boolean}
+        """
+        readWrite = self.isProxyType(True) # is read-write
+        if principal and self.parent in (yield principal.proxyFor(readWrite)):
+            returnValue(True)
+        returnValue(False)
+
 class ProxyDB(AbstractADBAPIDatabase, LoggingMixIn):
     """
     A database to maintain calendar user proxy group memberships.

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/directory.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/directory.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/directory.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -77,6 +77,9 @@
     recordType_groups = "groups"
     recordType_locations = "locations"
     recordType_resources = "resources"
+
+    searchContext_location = "location"
+    searchContext_attendee = "attendee"
     
     def _generatedGUID(self):
         if not hasattr(self, "_guid"):
@@ -210,6 +213,18 @@
 
         return record if record and record.enabledForCalendaring else None
 
+    def recordWithCachedGroupsAlias(self, recordType, alias):
+        """
+        @param recordType: the type of the record to look up.
+        @param alias: the cached-groups alias of the record to look up.
+        @type alias: C{str}
+
+        @return: a deferred L{IDirectoryRecord} with the given cached-groups
+            alias, or C{None} if no such record is found.
+        """
+        # The default implementation uses guid
+        return succeed(self.recordWithGUID(alias))
+
     def allRecords(self):
         for recordType in self.recordTypes():
             for record in self.listRecords(recordType):
@@ -225,7 +240,96 @@
         return self.recordsMatchingFields(fields, operand=operand,
             recordType=recordType)
 
+    def recordTypesForSearchContext(self, context):
+        """
+        Map calendarserver-principal-search REPORT context value to applicable record types
 
+        @param context: The context value to map (either "location" or "attendee")
+        @type context: C{str}
+        @returns: The list of record types the context maps to
+        @rtype: C{list} of C{str}
+        """
+        if context == self.searchContext_location:
+            recordTypes = [self.recordType_locations]
+        elif context == self.searchContext_attendee:
+            recordTypes = [self.recordType_users, self.recordType_groups,
+                self.recordType_resources]
+        else:
+            recordTypes = list(self.recordTypes())
+        return recordTypes
+
+
+    def recordsMatchingTokens(self, tokens, context=None):
+        """
+        @param tokens: The tokens to search on
+        @type tokens: C{list} of C{str} (utf-8 bytes)
+        @param context: An indication of what the end user is searching
+            for; "attendee", "location", or None
+        @type context: C{str}
+        @return: a deferred sequence of L{IDirectoryRecord}s which
+            match the given tokens and optional context.
+
+        Each token is searched for within each record's full name and
+        email address; if each token is found within a record that
+        record is returned in the results.
+
+        If context is None, all record types are considered.  If
+        context is "location", only locations are considered.  If
+        context is "attendee", only users, groups, and resources
+        are considered.
+        """
+
+        # Default, bruteforce method; override with one optimized for each
+        # service
+
+        def fieldMatches(fieldValue, value):
+            if fieldValue is None:
+                return False
+            elif type(fieldValue) in types.StringTypes:
+                fieldValue = (fieldValue,)
+
+            for testValue in fieldValue:
+                testValue = testValue.lower()
+                value = value.lower()
+
+                try:
+                    testValue.index(value)
+                    return True
+                except ValueError:
+                    pass
+
+            return False
+
+        def recordMatches(record):
+            for token in tokens:
+                for fieldName in ["fullName", "emailAddresses"]:
+                    try:
+                        fieldValue = getattr(record, fieldName)
+                        if fieldMatches(fieldValue, token):
+                            break
+                    except AttributeError:
+                        # No value
+                        pass
+                else:
+                    return False
+            return True
+
+
+        def yieldMatches(recordTypes):
+            try:
+                for recordType in [r for r in recordTypes if r in self.recordTypes()]:
+                    for record in self.listRecords(recordType):
+                        if recordMatches(record):
+                            yield record
+
+            except UnknownRecordTypeError:
+                # Skip this service since it doesn't understand this record type
+                pass
+
+        recordTypes = self.recordTypesForSearchContext(context)
+        return succeed(yieldMatches(recordTypes))
+
+
     def recordsMatchingFields(self, fields, operand="or", recordType=None):
         # Default, bruteforce method; override with one optimized for each
         # service
@@ -623,10 +727,12 @@
             self.log_info("Group membership snapshot file does not yet exist")
             fast = False
             previousMembers = {}
+            callGroupsChanged = False
         else:
             self.log_info("Group membership snapshot file exists: %s" %
                 (snapshotFile.path,))
             previousMembers = pickle.loads(snapshotFile.getContent())
+            callGroupsChanged = True
 
         if useLock:
             self.log_info("Attempting to acquire group membership cache lock")
@@ -644,7 +750,13 @@
             # populate proxy DB from external resource info
             self.log_info("Applying proxy assignment changes")
             assignmentCount = 0
+            totalNumAssignments = len(assignments)
+            currentAssignmentNum = 0
             for principalUID, members in assignments:
+                currentAssignmentNum += 1
+                if currentAssignmentNum % 1000 == 0:
+                    self.log_info("...proxy assignment %d of %d" % (currentAssignmentNum,
+                        totalNumAssignments))
                 try:
                     current = (yield self.proxyDB.getMembers(principalUID))
                     if members != current:
@@ -695,13 +807,6 @@
                 for member in groupMembers:
                     memberships = members.setdefault(member, set())
                     memberships.add(groupGUID)
-                    if member in previousMembers:
-                        # Remove from previousMembers; anything still left in
-                        # previousMembers when this loop is done will be
-                        # deleted from cache (since only members that were
-                        # previously in delegated-to groups but are no longer
-                        # would still be in previousMembers)
-                        del previousMembers[member]
 
             self.log_info("There are %d users delegated-to via groups" %
                 (len(members),))
@@ -721,15 +826,49 @@
 
         self.log_info("Storing %d group memberships in memcached" %
                        (len(members),))
+        changedMembers = set()
+        totalNumMembers = len(members)
+        currentMemberNum = 0
         for member, groups in members.iteritems():
+            currentMemberNum += 1
+            if currentMemberNum % 1000 == 0:
+                self.log_info("...membership %d of %d" % (currentMemberNum,
+                    totalNumMembers))
             # self.log_debug("%s is in %s" % (member, groups))
             yield self.cache.setGroupsFor(member, groups)
+            if groups != previousMembers.get(member, None):
+                # This principal has had a change in group membership
+                # so invalidate the PROPFIND response cache
+                changedMembers.add(member)
+            try:
+                # Remove from previousMembers; anything still left in
+                # previousMembers when this loop is done will be
+                # deleted from cache (since only members that were
+                # previously in delegated-to groups but are no longer
+                # would still be in previousMembers)
+                del previousMembers[member]
+            except KeyError:
+                pass
 
         # Remove entries for principals that no longer are in delegated-to
         # groups
         for member, groups in previousMembers.iteritems():
             yield self.cache.deleteGroupsFor(member)
+            changedMembers.add(member)
 
+        # For principals whose group membership has changed, call groupsChanged()
+        if callGroupsChanged and not fast and hasattr(self.directory, "principalCollection"):
+            for member in changedMembers:
+                record = yield self.directory.recordWithCachedGroupsAlias(
+                    self.directory.recordType_users, member)
+                if record is not None:
+                    principal = self.directory.principalCollection.principalForRecord(record)
+                    if principal is not None:
+                        self.log_debug("Group membership changed for %s (%s)" %
+                            (record.shortNames[0], record.guid,))
+                        if hasattr(principal, "groupsChanged"):
+                            yield principal.groupsChanged()
+
         yield self.cache.setPopulatedMarker()
 
         if useLock:
@@ -738,7 +877,7 @@
 
         self.log_info("Group memberships cache updated")
 
-        returnValue((fast, len(members)))
+        returnValue((fast, len(members), len(changedMembers)))
 
 
 
@@ -941,6 +1080,14 @@
         from calendarserver.tap.util import directoryFromConfig
         directory = directoryFromConfig(config)
 
+        # We have to set cacheNotifierFactory otherwise group cacher can't
+        # invalidate the cache tokens for principals whose membership has
+        # changed
+        if config.EnableResponseCache and config.Memcached.Pools.Default.ClientEnabled:
+            from twistedcaldav.directory.principal import DirectoryPrincipalResource
+            from twistedcaldav.cache import MemcacheChangeNotifier
+            DirectoryPrincipalResource.cacheNotifierFactory = MemcacheChangeNotifier
+
         # Setup the ProxyDB Service
         proxydbClass = namedClass(config.ProxyDBService.type)
 

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/idirectory.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/idirectory.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/idirectory.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -81,12 +81,44 @@
             directory service may not be aware of these addresses.
         """
 
+    def recordWithCachedGroupsAlias(recordType, alias):
+        """
+        @param recordType: the type of the record to look up.
+        @param alias: the cached-groups alias of the record to look up.
+        @type alias: C{str}
+
+        @return: a deferred L{IDirectoryRecord} with the given cached-groups
+            alias, or C{None} if no such record is found.
+        """
+
+
     def recordsMatchingFields(fields):
         """
         @return: a deferred sequence of L{IDirectoryRecord}s which
             match the given fields.
         """
 
+    def recordsMatchingTokens(tokens, context=None):
+        """
+        @param tokens: The tokens to search on
+        @type tokens: C{list} of C{str} (utf-8 bytes)
+        @param context: An indication of what the end user is searching
+            for; "attendee", "location", or None
+        @type context: C{str}
+        @return: a deferred sequence of L{IDirectoryRecord}s which
+            match the given tokens and optional context.
+
+        Each token is searched for within each record's full name and
+        email address; if each token is found within a record that
+        record is returned in the results.
+
+        If context is None, all record types are considered.  If
+        context is "location", only locations are considered.  If
+        context is "attendee", only users, groups, and resources
+        are considered.
+        """
+
+
     def setRealm(realmName):
         """
         Set a new realm name for this (and nested services if any)

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/ldapdirectory.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/ldapdirectory.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/ldapdirectory.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -246,6 +246,9 @@
             # Also put the guidAttr attribute into the mappings for each type
             # so recordsMatchingFields can query on guid
             self.rdnSchema[recordType]["mapping"]["guid"] = self.rdnSchema["guidAttr"]
+            # Also put the memberIdAttr attribute into the mappings for each type
+            # so recordsMatchingFields can query on memberIdAttr
+            self.rdnSchema[recordType]["mapping"]["memberIdAttr"] = self.groupSchema["memberIdAttr"]
         if self.groupSchema["membersAttr"]:
             attrSet.add(self.groupSchema["membersAttr"])
         if self.groupSchema["nestedGroupsAttr"]:
@@ -319,12 +322,7 @@
         for dn, attrs in results:
             dn = normalizeDNstr(dn)
 
-            unrestricted = True
-            if self.restrictedGUIDs is not None:
-                if guidAttr:
-                    guid = self._getUniqueLdapAttribute(attrs, guidAttr)
-                    if guid not in self.restrictedGUIDs:
-                        unrestricted = False
+            unrestricted = self.isAllowedByRestrictToGroup(dn, attrs)
 
             try:
                 record = self._ldapResultToRecord(dn, attrs, recordType)
@@ -334,7 +332,7 @@
                 continue
 
             if not unrestricted:
-                self.log_debug("%s is not enabled because it's not a member of group: %s" % (guid, self.restrictToGroup))
+                self.log_debug("%s is not enabled because it's not a member of group: %s" % (dn, self.restrictToGroup))
                 record.enabledForCalendaring = False
                 record.enabledForAddressBooks = False
 
@@ -346,6 +344,26 @@
 
         return records
 
+    @inlineCallbacks
+    def recordWithCachedGroupsAlias(self, recordType, alias):
+        """
+        @param recordType: the type of the record to look up.
+        @param alias: the cached-groups alias of the record to look up.
+        @type alias: C{str}
+
+        @return: a deferred L{IDirectoryRecord} with the given cached-groups
+            alias, or C{None} if no such record is found.
+        """
+        memberIdAttr = self.groupSchema["memberIdAttr"]
+        attributeToSearch = "memberIdAttr" if memberIdAttr else "dn"
+
+        fields = [[attributeToSearch, alias, False, "equals"]]
+        results = (yield self.recordsMatchingFields(fields, recordType=recordType))
+        if results:
+            returnValue(results[0])
+        else:
+            returnValue(None)
+
     def getExternalProxyAssignments(self):
         """
         Retrieve proxy assignments for locations and resources from the
@@ -531,106 +549,166 @@
         raise HTTPError(StatusResponse(responsecode.SERVICE_UNAVAILABLE, "LDAP server unavailable"))
 
 
+    def isAllowedByRestrictToGroup(self, dn, attrs):
+        """
+        Check to see if the principal with the given DN and LDAP attributes is
+        a member of the restrictToGroup.
+
+        @param dn: an LDAP dn
+        @type dn: C{str}
+        @param attrs: LDAP attributes
+        @type attrs: C{dict}
+        @return: True if principal is in the group (or restrictEnabledRecords if turned off).
+        @rtype: C{boolean}
+        """
+        if not self.restrictEnabledRecords:
+            return True
+        if self.groupSchema["memberIdAttr"]:
+            value = self._getUniqueLdapAttribute(attrs, self.groupSchema["memberIdAttr"])
+        else: # No memberIdAttr implies DN
+            value = dn
+        return value in self.restrictedPrincipals
+
+
     @property
-    def restrictedGUIDs(self):
+    def restrictedPrincipals(self):
         """
         Look up (and cache) the set of guids that are members of the
         restrictToGroup.  If restrictToGroup is not set, return None to
         indicate there are no group restrictions.
-
-        guidAttr must also be specified in config for restrictToGroups to work.
         """
-        if self.restrictEnabledRecords and self.rdnSchema["guidAttr"]:
+        if self.restrictEnabledRecords:
 
             if time.time() - self.restrictedTimestamp > self.cacheTimeout:
                 # fault in the members of group of name self.restrictToGroup
-
                 recordType = self.recordType_groups
                 base = self.typeDNs[recordType]
+                # TODO: This shouldn't be hardcoded to cn
                 filterstr = "(cn=%s)" % (self.restrictToGroup,)
                 self.log_debug("Retrieving ldap record with base %s and filter %s." %
                     (ldap.dn.dn2str(base), filterstr))
                 result = self.timedSearch(ldap.dn.dn2str(base),
                     ldap.SCOPE_SUBTREE, filterstr=filterstr, attrlist=self.attrlist)
 
+                members = []
+                nestedGroups = []
+
                 if len(result) == 1:
                     dn, attrs = result[0]
                     dn = normalizeDNstr(dn)
                     if self.groupSchema["membersAttr"]:
-                        members = set(self._getMultipleLdapAttributes(attrs,
-                            self.groupSchema["membersAttr"]))
+                        members = self._getMultipleLdapAttributes(attrs,
+                            self.groupSchema["membersAttr"])
+                        if not self.groupSchema["memberIdAttr"]: # these are DNs
+                            members = [normalizeDNstr(m) for m in members]
+                        members = set(members)
+
                     if self.groupSchema["nestedGroupsAttr"]:
-                        nestedGroups = set(self._getMultipleLdapAttributes(attrs,
-                            self.groupSchema["nestedGroupsAttr"]))
+                        nestedGroups = self._getMultipleLdapAttributes(attrs,
+                            self.groupSchema["nestedGroupsAttr"])
+                        if not self.groupSchema["memberIdAttr"]: # these are DNs
+                            nestedGroups = [normalizeDNstr(g) for g in nestedGroups]
+                        nestedGroups = set(nestedGroups)
+                    else:
+                        # Since all members are lumped into the same attribute,
+                        # treat them all as nestedGroups instead
+                        nestedGroups = members
+                        members = set()
 
-                else:
-                    members = []
-                    nestedGroups = []
-
-                self._cachedRestrictedGUIDs = set(self._expandGroupMembership(members, nestedGroups, returnGroups=True))
-                self.log_info("Got %d restricted group members" % (len(self._cachedRestrictedGUIDs),))
+                self._cachedRestrictedPrincipals = set(self._expandGroupMembership(members,
+                    nestedGroups))
+                self.log_info("Got %d restricted group members" % (
+                    len(self._cachedRestrictedPrincipals),))
                 self.restrictedTimestamp = time.time()
-            return self._cachedRestrictedGUIDs
+            return self._cachedRestrictedPrincipals
         else:
             # No restrictions
             return None
 
 
-    def _expandGroupMembership(self, members, nestedGroups,
-        processedGUIDs=None, returnGroups=False):
+    def _expandGroupMembership(self, members, nestedGroups, processedItems=None):
+        """
+        A generator which recursively yields principals which are included within nestedGroups
 
-        if processedGUIDs is None:
-            processedGUIDs = set()
+        @param members:  If the LDAP service is configured to use different attributes to
+            indicate member users and member nested groups, members will include the non-groups.
+            Otherwise, members will be empty and only nestedGroups will be used.
+        @type members: C{set}
+        @param nestedGroups:  If the LDAP service is configured to use different attributes to
+            indicate member users and member nested groups, nestedGroups will include only
+            the groups; otherwise nestedGroups will include all members
+        @type members: C{set}
+        @param processedItems: The set of members that have already been looked up in LDAP
+            so the code doesn't have to look up the same member twice or get stuck in a
+            membership loop.
+        @type processedItems: C{set}
+        @return: All members of the group, the values will correspond to memberIdAttr
+            if memberIdAttr is set in the group schema, or DNs otherwise.
+        @rtype: generator of C{str}
+        """
 
+        if processedItems is None:
+            processedItems = set()
+
         if isinstance(members, str):
             members = [members]
 
         if isinstance(nestedGroups, str):
             nestedGroups = [nestedGroups]
 
-        for memberGUID in members:
-            if memberGUID not in processedGUIDs:
-                processedGUIDs.add(memberGUID)
-                yield memberGUID
+        for member in members:
+            if member not in processedItems:
+                processedItems.add(member)
+                yield member
 
-        for groupGUID in nestedGroups:
-            if groupGUID in processedGUIDs:
+        for group in nestedGroups:
+            if group in processedItems:
                 continue
 
             recordType = self.recordType_groups
             base = self.typeDNs[recordType]
-            filterstr = "(%s=%s)" % (self.rdnSchema["guidAttr"], groupGUID)
+            if self.groupSchema["memberIdAttr"]:
+                scope = ldap.SCOPE_SUBTREE
+                base = self.typeDNs[recordType]
+                filterstr = "(%s=%s)" % (self.groupSchema["memberIdAttr"], group)
+            else: # Use DN
+                scope = ldap.SCOPE_BASE
+                base = ldap.dn.str2dn(group)
+                filterstr = "(objectClass=*)"
 
             self.log_debug("Retrieving ldap record with base %s and filter %s." %
                 (ldap.dn.dn2str(base), filterstr))
             result = self.timedSearch(ldap.dn.dn2str(base),
-                ldap.SCOPE_SUBTREE, filterstr=filterstr, attrlist=self.attrlist)
+                scope, filterstr=filterstr, attrlist=self.attrlist)
 
             if len(result) == 0:
                 continue
 
+            subMembers = set()
+            subNestedGroups = set()
             if len(result) == 1:
                 dn, attrs = result[0]
                 dn = normalizeDNstr(dn)
                 if self.groupSchema["membersAttr"]:
-                    subMembers = set(self._getMultipleLdapAttributes(attrs,
-                        self.groupSchema["membersAttr"]))
-                else:
-                    subMembers = []
+                    subMembers = self._getMultipleLdapAttributes(attrs,
+                        self.groupSchema["membersAttr"])
+                    if not self.groupSchema["memberIdAttr"]: # these are DNs
+                        subMembers = [normalizeDNstr(m) for m in subMembers]
+                    subMembers = set(subMembers)
 
                 if self.groupSchema["nestedGroupsAttr"]:
-                    subNestedGroups = set(self._getMultipleLdapAttributes(attrs,
-                        self.groupSchema["nestedGroupsAttr"]))
-                else:
-                    subNestedGroups = []
+                    subNestedGroups = self._getMultipleLdapAttributes(attrs,
+                        self.groupSchema["nestedGroupsAttr"])
+                    if not self.groupSchema["memberIdAttr"]: # these are DNs
+                        subNestedGroups = [normalizeDNstr(g) for g in subNestedGroups]
+                    subNestedGroups = set(subNestedGroups)
 
-            processedGUIDs.add(groupGUID)
-            if returnGroups:
-                yield groupGUID
+            processedItems.add(group)
+            yield group
 
-            for GUID in self._expandGroupMembership(subMembers,
-                subNestedGroups, processedGUIDs, returnGroups):
-                yield GUID
+            for item in self._expandGroupMembership(subMembers, subNestedGroups,
+                processedItems):
+                yield item
 
 
     def _getUniqueLdapAttribute(self, attrs, *keys):
@@ -940,19 +1018,14 @@
                 dn, attrs = result.pop()
                 dn = normalizeDNstr(dn)
 
-                unrestricted = True
-                if self.restrictedGUIDs is not None:
-                    if guidAttr:
-                        guid = self._getUniqueLdapAttribute(attrs, guidAttr)
-                        if guid not in self.restrictedGUIDs:
-                            unrestricted = False
+                unrestricted = self.isAllowedByRestrictToGroup(dn, attrs)
 
                 try:
                     record = self._ldapResultToRecord(dn, attrs, recordType)
                     self.log_debug("Got LDAP record %s" % (record,))
 
                     if not unrestricted:
-                        self.log_debug("%s is not enabled because it's not a member of group: %s" % (guid, self.restrictToGroup))
+                        self.log_debug("%s is not enabled because it's not a member of group: %s" % (dn, self.restrictToGroup))
                         record.enabledForCalendaring = False
                         record.enabledForAddressBooks = False
 
@@ -974,6 +1047,86 @@
                         % (recordTypes, indexType, indexKey))
 
 
+    def recordsMatchingTokens(self, tokens, context=None):
+        """
+        @param tokens: The tokens to search on
+        @type tokens: C{list} of C{str} (utf-8 bytes)
+        @param context: An indication of what the end user is searching
+            for; "attendee", "location", or None
+        @type context: C{str}
+        @return: a deferred sequence of L{IDirectoryRecord}s which
+            match the given tokens and optional context.
+
+        Each token is searched for within each record's full name and
+        email address; if each token is found within a record that
+        record is returned in the results.
+
+        If context is None, all record types are considered.  If
+        context is "location", only locations are considered.  If
+        context is "attendee", only users, groups, and resources
+        are considered.
+        """
+        self.log_debug("Peforming calendar user search for %s (%s)" % (tokens, context))
+
+        records = []
+        recordTypes = self.recordTypesForSearchContext(context)
+        recordTypes = [r for r in recordTypes if r in self.recordTypes()]
+        guidAttr = self.rdnSchema["guidAttr"]
+
+        for recordType in recordTypes:
+            base = self.typeDNs[recordType]
+            scope = ldap.SCOPE_SUBTREE
+            filterstr = buildFilterFromTokens(self.rdnSchema[recordType]["mapping"],
+                tokens)
+
+            if filterstr is not None:
+                # Query the LDAP server
+                self.log_debug("LDAP search %s %s %s" %
+                    (ldap.dn.dn2str(base), scope, filterstr))
+                results = self.timedSearch(ldap.dn.dn2str(base), scope,
+                    filterstr=filterstr, attrlist=self.attrlist,
+                    timeoutSeconds=self.requestTimeoutSeconds,
+                    resultLimit=self.requestResultsLimit)
+                self.log_debug("LDAP search returned %d results" % (len(results),))
+                numMissingGuids = 0
+                numMissingRecordNames = 0
+                for dn, attrs in results:
+                    dn = normalizeDNstr(dn)
+                    # Skip if group restriction is in place and guid is not
+                    # a member
+                    if (recordType != self.recordType_groups and
+                        not self.isAllowedByRestrictToGroup(dn, attrs)):
+                        continue
+
+                    try:
+                        record = self._ldapResultToRecord(dn, attrs, recordType)
+
+                        # For non-group records, if not enabled for calendaring do
+                        # not include in principal property search results
+                        if (recordType != self.recordType_groups):
+                            if not record.enabledForCalendaring:
+                                continue
+
+                        records.append(record)
+
+                    except MissingGuidException:
+                        numMissingGuids += 1
+
+                    except MissingRecordNameException:
+                        numMissingRecordNames += 1
+
+                if numMissingGuids:
+                    self.log_warn("%d %s records are missing %s" %
+                        (numMissingGuids, recordType, guidAttr))
+
+                if numMissingRecordNames:
+                    self.log_warn("%d %s records are missing record name" %
+                        (numMissingRecordNames, recordType))
+
+        self.log_debug("Calendar user search matched %d records" % (len(records),))
+        return succeed(records)
+
+
     def recordsMatchingFields(self, fields, operand="or", recordType=None):
         """
         Carries out the work of a principal-property-search against LDAP
@@ -1032,11 +1185,8 @@
                     # Skip if group restriction is in place and guid is not
                     # a member
                     if (recordType != self.recordType_groups and
-                        self.restrictedGUIDs is not None):
-                        if guidAttr:
-                            guid = self._getUniqueLdapAttribute(attrs, guidAttr)
-                            if guid not in self.restrictedGUIDs:
-                                continue
+                        not self.isAllowedByRestrictToGroup(dn, attrs)):
+                        continue
 
                     try:
                         record = self._ldapResultToRecord(dn, attrs, recordType)
@@ -1087,7 +1237,6 @@
         attributeToSearch = "guid"
         valuesToFetch = guids
 
-
         while valuesToFetch:
             results = []
 
@@ -1133,7 +1282,7 @@
             # Switch to the LDAP attribute used for identifying members
             # for subsequent iterations.  If memberIdAttr is not specified
             # in the config, we'll search using dn.
-            attributeToSearch = memberIdAttr if memberIdAttr else "dn"
+            attributeToSearch = "memberIdAttr" if memberIdAttr else "dn"
 
         returnValue(recordsByAlias.values())
 
@@ -1248,6 +1397,58 @@
     return filterstr
 
 
+def buildFilterFromTokens(mapping, tokens):
+    """
+    Create an LDAP filter string from a list of query tokens.  Each token is
+    searched for in each LDAP attribute corresponding to "fullName" and
+    "emailAddresses" (could be multiple LDAP fields for either).
+
+    @param mapping: A dict mapping internal directory attribute names to ldap names.
+    @type mapping: C{dict}
+    @param tokens: The list of tokens to search for
+    @type tokens: C{list}
+    @return: An LDAP filterstr
+    @rtype: C{str}
+    """
+
+    filterStr = None
+    tokens = [ldapEsc(t) for t in tokens if len(t) > 2]
+    if len(tokens) == 0:
+        return None
+
+    attributes = ["fullName", "emailAddresses"]
+
+    ldapFields = []
+    for attribute in attributes:
+        ldapField = mapping.get(attribute, None)
+        if ldapField:
+            if isinstance(ldapField, str):
+                ldapFields.append(ldapField)
+            else:
+                ldapFields.extend(ldapField)
+
+    if len(ldapFields) == 0:
+        return None
+
+    tokenFragments = []
+    for token in tokens:
+        fragments = []
+        for ldapField in ldapFields:
+            fragments.append("(%s=*%s*)" % (ldapField, token))
+        if len(fragments) == 1:
+            tokenFragment = fragments[0]
+        else:
+            tokenFragment = "(|%s)" % ("".join(fragments),)
+        tokenFragments.append(tokenFragment)
+
+    if len(tokenFragments) == 1:
+        filterStr = tokenFragments[0]
+    else:
+        filterStr = "(&%s)" % ("".join(tokenFragments),)
+
+    return filterStr
+
+
 class LdapDirectoryRecord(CachingDirectoryRecord):
     """
     LDAP implementation of L{IDirectoryRecord}.

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/principal.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/principal.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/principal.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -948,6 +948,14 @@
     def expandedGroupMemberships(self):
         return self.groupMemberships(infinity=True)
 
+    def groupsChanged(self):
+        """
+        A callback indicating the directory group membership for this principal
+        has changed.  Update the cache token for this principal so the PROPFIND
+        response cache is invalidated.
+        """
+        return self.cacheNotifier.changed()
+
     def principalCollections(self):
         return self.parent.principalCollections()
 

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/test/test_buildquery.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/test/test_buildquery.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/test/test_buildquery.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -15,7 +15,7 @@
 ##
 
 from twistedcaldav.test.util import TestCase
-from twistedcaldav.directory.appleopendirectory import buildQueries, OpenDirectoryService
+from twistedcaldav.directory.appleopendirectory import buildQueries, buildQueriesFromTokens, OpenDirectoryService
 from calendarserver.platform.darwin.od import dsattributes
 
 class BuildQueryTests(TestCase):
@@ -103,3 +103,23 @@
             {
             }
         )
+
+    def test_buildQueryFromTokens(self):
+        results = buildQueriesFromTokens([], OpenDirectoryService._ODFields)
+        self.assertEquals(results, None)
+
+        results = buildQueriesFromTokens(["foo"], OpenDirectoryService._ODFields)
+        self.assertEquals(
+            results[0].generate(),
+            "(|(dsAttrTypeStandard:RealName=*foo*)(dsAttrTypeStandard:EMailAddress=*foo*))"
+        )
+
+        results = buildQueriesFromTokens(["foo", "bar"], OpenDirectoryService._ODFields)
+        self.assertEquals(
+            results[0].generate(),
+            "(|(dsAttrTypeStandard:RealName=*foo*)(dsAttrTypeStandard:EMailAddress=*foo*))"
+        )
+        self.assertEquals(
+            results[1].generate(),
+            "(|(dsAttrTypeStandard:RealName=*bar*)(dsAttrTypeStandard:EMailAddress=*bar*))"
+        )

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/test/test_directory.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/test/test_directory.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/test/test_directory.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -281,7 +281,7 @@
 
         # Allow an update by unlocking the cache
         yield cache.releaseLock()
-        self.assertEquals((False, 8), (yield updater.updateCache()))
+        self.assertEquals((False, 8, 8), (yield updater.updateCache()))
 
         # Verify cache is populated:
         self.assertTrue((yield cache.isPopulated()))
@@ -364,13 +364,20 @@
                 groups,
             )
 
+        # Verify CalendarUserProxyPrincipalResource.containsPrincipal( ) works
+        delegator = self._getPrincipalByShortName(DirectoryService.recordType_locations, "mercury")
+        proxyPrincipal = delegator.getChild("calendar-proxy-write")
+        for expected, name in [(True, "wsanchez"), (False, "cdaboo")]:
+            delegate = self._getPrincipalByShortName(DirectoryService.recordType_users, name)
+            self.assertEquals(expected, (yield proxyPrincipal.containsPrincipal(delegate)))
+
         # Verify that principals who were previously members of delegated-to groups but
         # are no longer members have their proxyFor info cleaned out of the cache:
         # Remove wsanchez from all groups in the directory, run the updater, then check
         # that wsanchez is only a proxy for gemini (since that assignment does not involve groups)
         self.directoryService.xmlFile = dirTest.child("accounts-modified.xml")
         self.directoryService._alwaysStat = True
-        self.assertEquals((False, 7), (yield updater.updateCache()))
+        self.assertEquals((False, 7, 1), (yield updater.updateCache()))
         delegate = self._getPrincipalByShortName(DirectoryService.recordType_users, "wsanchez")
         proxyFor = (yield delegate.proxyFor(True))
         self.assertEquals(
@@ -510,9 +517,10 @@
         yield cache.acquireLock()
 
         self.assertFalse((yield cache.isPopulated()))
-        fast, numMembers = (yield updater.updateCache(fast=True))
+        fast, numMembers, numChanged = (yield updater.updateCache(fast=True))
         self.assertEquals(fast, False)
         self.assertEquals(numMembers, 8)
+        self.assertEquals(numChanged, 8)
         self.assertTrue(snapshotFile.exists())
         self.assertTrue((yield cache.isPopulated()))
 
@@ -528,9 +536,10 @@
         self.assertEquals(numMembers, 0)
 
         # Try an update which faults in from the directory (fast=False)
-        fast, numMembers = (yield updater.updateCache(fast=False))
+        fast, numMembers, numChanged = (yield updater.updateCache(fast=False))
         self.assertEquals(fast, False)
         self.assertEquals(numMembers, 8)
+        self.assertEquals(numChanged, 0)
 
         # Verify the snapshot contains the pickled dictionary we expect
         members = pickle.loads(snapshotFile.getContent())
@@ -583,7 +592,62 @@
             }
         )
 
+class RecordsMatchingTokensTests(TestCase):
 
+    @inlineCallbacks
+    def setUp(self):
+        super(RecordsMatchingTokensTests, self).setUp()
+
+        self.directoryService = XMLDirectoryService(
+            {
+                'xmlFile' : xmlFile,
+                'augmentService' :
+                    augment.AugmentXMLDB(xmlFiles=(augmentsFile.path,)),
+            }
+        )
+        calendaruserproxy.ProxyDBService = calendaruserproxy.ProxySqliteDB("proxies.sqlite")
+
+        # Set up a principals hierarchy for each service we're testing with
+        self.principalRootResources = {}
+        name = self.directoryService.__class__.__name__
+        url = "/" + name + "/"
+
+        provisioningResource = DirectoryPrincipalProvisioningResource(url, self.directoryService)
+
+        self.site.resource.putChild(name, provisioningResource)
+
+        self.principalRootResources[self.directoryService.__class__.__name__] = provisioningResource
+
+        yield XMLCalendarUserProxyLoader(proxiesFile.path).updateProxyDB()
+
+    def tearDown(self):
+        """ Empty the proxy db between tests """
+        return calendaruserproxy.ProxyDBService.clean()
+
+    @inlineCallbacks
+    def test_recordsMatchingTokens(self):
+        """
+        Exercise the default recordsMatchingTokens implementation
+        """
+        records = list((yield self.directoryService.recordsMatchingTokens(["Use", "01"])))
+        self.assertEquals(len(records), 1)
+        self.assertEquals(records[0].shortNames[0], "user01")
+
+        records = list((yield self.directoryService.recordsMatchingTokens(['"quotey"'],
+            context=self.directoryService.searchContext_attendee)))
+        self.assertEquals(len(records), 1)
+        self.assertEquals(records[0].shortNames[0], "doublequotes")
+
+        records = list((yield self.directoryService.recordsMatchingTokens(["coast"])))
+        self.assertEquals(len(records), 5)
+
+        records = list((yield self.directoryService.recordsMatchingTokens(["poll"],
+            context=self.directoryService.searchContext_location)))
+        self.assertEquals(len(records), 1)
+        self.assertEquals(records[0].shortNames[0], "apollo")
+
+
+ 
 class GUIDTests(TestCase):
 
     def setUp(self):

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/test/test_ldapdirectory.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/test/test_ldapdirectory.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/test/test_ldapdirectory.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -16,7 +16,7 @@
 
 try:
     from twistedcaldav.directory.ldapdirectory import (
-        buildFilter, LdapDirectoryService,
+        buildFilter, buildFilterFromTokens, LdapDirectoryService,
         MissingGuidException, MissingRecordNameException,
         normalizeDNstr, dnContainedIn
     )
@@ -43,6 +43,7 @@
                 "firstName" : "givenName",
                 "lastName" : "sn",
                 "guid" : "generateduid",
+                "memberIDAttr" : "generateduid",
             }
 
             entries = [
@@ -193,6 +194,79 @@
                     entry["expected"]
                 )
 
+
+    class BuildFilterFromTokensTestCase(TestCase):
+
+        def test_buildFilterFromTokens(self):
+
+            entries = [
+                {
+                    "tokens" : ["foo"],
+                    "mapping" : {
+                        "fullName" : "cn",
+                        "emailAddresses" : "mail",
+                    },
+                    "expected" : "(|(cn=*foo*)(mail=*foo*))",
+                },
+                {
+                    "tokens" : ["foo"],
+                    "mapping" : {
+                        "fullName" : "cn",
+                        "emailAddresses" : ["mail", "mailAliases"],
+                    },
+                    "expected" : "(|(cn=*foo*)(mail=*foo*)(mailAliases=*foo*))",
+                },
+                {
+                    "tokens" : [],
+                    "mapping" : {
+                        "fullName" : "cn",
+                        "emailAddresses" : "mail",
+                    },
+                    "expected" : None,
+                },
+                {
+                    "tokens" : ["foo", "bar"],
+                    "mapping" : { },
+                    "expected" : None,
+                },
+                {
+                    "tokens" : ["foo", "bar"],
+                    "mapping" : {
+                        "emailAddresses" : "mail",
+                    },
+                    "expected" : "(&(mail=*foo*)(mail=*bar*))",
+                },
+                {
+                    "tokens" : ["foo", "bar"],
+                    "mapping" : {
+                        "fullName" : "cn",
+                        "emailAddresses" : "mail",
+                    },
+                    "expected" : "(&(|(cn=*foo*)(mail=*foo*))(|(cn=*bar*)(mail=*bar*)))",
+                },
+                {
+                    "tokens" : ["foo", "bar"],
+                    "mapping" : {
+                        "fullName" : "cn",
+                        "emailAddresses" : ["mail", "mailAliases"],
+                    },
+                    "expected" : "(&(|(cn=*foo*)(mail=*foo*)(mailAliases=*foo*))(|(cn=*bar*)(mail=*bar*)(mailAliases=*bar*)))",
+                },
+                {
+                    "tokens" : ["foo", "bar", "baz("],
+                    "mapping" : {
+                        "fullName" : "cn",
+                        "emailAddresses" : "mail",
+                    },
+                    "expected" : "(&(|(cn=*foo*)(mail=*foo*))(|(cn=*bar*)(mail=*bar*))(|(cn=*baz\\28*)(mail=*baz\\28*)))",
+                },
+            ]
+            for entry in entries:
+                self.assertEquals(
+                    buildFilterFromTokens(entry["mapping"], entry["tokens"]),
+                    entry["expected"]
+                )
+
     class StubList(object):
         def __init__(self, wrapper):
             self.ldap = wrapper
@@ -222,23 +296,58 @@
         """
 
 
-        def __init__(self, actual):
+        def __init__(self, actual, records):
             self.actual = actual
             self.async = StubAsync()
 
             # Test data returned from search_s.
             # Note that some DNs have various extra whitespace added and mixed
             # up case since LDAP is pretty loose about these.
-            self.records = (
+            self.records = records
+
+
+        def search_s(self, base, scope, filterstr="(objectClass=*)",
+            attrlist=None):
+            """ A simple implementation of LDAP search filter processing """
+
+            base = normalizeDNstr(base)
+            results = []
+            for dn, attrs in self.records:
+                dn = normalizeDNstr(dn)
+                if dn == base:
+                    results.append(("ignored", (dn, attrs)))
+                elif dnContainedIn(ldap.dn.str2dn(dn), ldap.dn.str2dn(base)):
+                    if filterstr in ("(objectClass=*)", "(!(objectClass=organizationalUnit))"):
+                        results.append(("ignored", (dn, attrs)))
+                    else:
+                        trans = maketrans("&(|)", "   |")
+                        fragments = filterstr.encode("utf-8").translate(trans).split("|")
+                        for fragment in fragments:
+                            if not fragment:
+                                continue
+                            fragment = fragment.strip()
+                            key, value = fragment.split("=")
+                            if value in attrs.get(key, []):
+                                results.append(("ignored", (dn, attrs)))
+
+            return results
+
+
+    class LdapDirectoryServiceTestCase(TestCase):
+
+        nestedUsingDifferentAttributeUsingDN = (
+            (
                 (
                     "cn=Recursive1_coasts, cn=gROUps,dc=example, dc=com",
                     {
                         'cn': ['recursive1_coasts'],
                         'apple-generateduid': ['recursive1_coasts'],
                         'uniqueMember': [
-                            'cn=recursive2_coasts,cn=groups,dc=example,dc=com',
                             'uid=wsanchez ,cn=users, dc=eXAMple,dc=com',
                         ],
+                        'nestedGroups': [
+                            'cn=recursive2_coasts,cn=groups,dc=example,dc=com',
+                        ],
                     }
                 ),
                 (
@@ -247,12 +356,229 @@
                         'cn': ['recursive2_coasts'],
                         'apple-generateduid': ['recursive2_coasts'],
                         'uniqueMember': [
+                            'uid=cdaboo,cn=users,dc=example,dc=com',
+                        ],
+                        'nestedGroups': [
                             'cn=recursive1_coasts,cn=groups,dc=example,dc=com',
+                        ],
+                    }
+                ),
+                (
+                    'cn=both_coasts,cn=groups,dc=example,dc=com',
+                    {
+                        'cn': ['both_coasts'],
+                        'apple-generateduid': ['both_coasts'],
+                        'nestedGroups': [
+                            'cn=right_coast,cn=groups,dc=example,dc=com',
+                            'cn=left_coast,cn=groups,dc=example,dc=com',
+                        ],
+                    }
+                ),
+                (
+                    'cn=right_coast,cn=groups,dc=example,dc=com',
+                    {
+                        'cn': ['right_coast'],
+                        'apple-generateduid': ['right_coast'],
+                        'uniqueMember': [
                             'uid=cdaboo,cn=users,dc=example,dc=com',
                         ],
                     }
                 ),
                 (
+                    'cn=left_coast,cn=groups,dc=example,dc=com',
+                    {
+                        'cn': ['left_coast'],
+                        'apple-generateduid': ['left_coast'],
+                        'uniqueMember': [
+                            'uid=wsanchez, cn=users,dc=example,dc=com',
+                            'uid=lecroy,cn=users,dc=example,dc=com',
+                            'uid=dreid,cn=users,dc=example,dc=com',
+                        ],
+                    }
+                ),
+                (
+                    "uid=odtestamanda,cn=users,dc=example,dc=com",
+                    {
+                        'uid': ['odtestamanda'],
+                        'apple-generateduid': ['9DC04A70-E6DD-11DF-9492-0800200C9A66'],
+                        'sn': ['Test'],
+                        'mail': ['odtestamanda at example.com', 'alternate at example.com'],
+                        'givenName': ['Amanda'],
+                        'cn': ['Amanda Test']
+                    }
+                ),
+                (
+                    "uid=odtestbetty,cn=users,dc=example,dc=com",
+                    {
+                        'uid': ['odtestbetty'],
+                        'apple-generateduid': ['93A8F5C5-49D8-4641-840F-CD1903B0394C'],
+                        'sn': ['Test'],
+                        'mail': ['odtestbetty at example.com'],
+                        'givenName': ['Betty'],
+                        'cn': ['Betty Test']
+                    }
+                ),
+                (
+                    "uid=odtestcarlene,cn=users,dc=example,dc=com",
+                    {
+                        'uid': ['odtestcarlene'],
+                        # Note: no guid here, to test this record is skipped
+                        'sn': ['Test'],
+                        'mail': ['odtestcarlene at example.com'],
+                        'givenName': ['Carlene'],
+                        'cn': ['Carlene Test']
+                    }
+                ),
+                (
+                    "uid=cdaboo,cn=users,dc=example,dc=com",
+                    {
+                        'uid': ['cdaboo'],
+                        'apple-generateduid': ['5A985493-EE2C-4665-94CF-4DFEA3A89500'],
+                        'sn': ['Daboo'],
+                        'mail': ['daboo at example.com'],
+                        'givenName': ['Cyrus'],
+                        'cn': ['Cyrus Daboo']
+                    }
+                ),
+                (
+                    "uid=wsanchez  ,  cn=users  , dc=example,dc=com",
+                    {
+                        'uid': ['wsanchez'],
+                        'apple-generateduid': ['6423F94A-6B76-4A3A-815B-D52CFD77935D'],
+                        'sn': ['Sanchez'],
+                        'mail': ['wsanchez at example.com'],
+                        'givenName': ['Wilfredo'],
+                        'cn': ['Wilfredo Sanchez']
+                    }
+                ),
+            ),
+            {
+                "augmentService" : None,
+                "groupMembershipCache" : None,
+                "cacheTimeout": 1, # Minutes
+                "negativeCaching": False,
+                "warningThresholdSeconds": 3,
+                "batchSize": 500,
+                "queryLocationsImplicitly": True,
+                "restrictEnabledRecords": True,
+                "restrictToGroup": "both_coasts",
+                "recordTypes": ("users", "groups", "locations", "resources"),
+                "uri": "ldap://localhost/",
+                "tls": False,
+                "tlsCACertFile": None,
+                "tlsCACertDir": None,
+                "tlsRequireCert": None, # never, allow, try, demand, hard
+                "credentials": {
+                    "dn": None,
+                    "password": None,
+                },
+                "authMethod": "LDAP",
+                "rdnSchema": {
+                    "base": "dc=example,dc=com",
+                    "guidAttr": "apple-generateduid",
+                    "users": {
+                        "rdn": "cn=Users",
+                        "attr": "uid", # used only to synthesize email address
+                        "emailSuffix": None, # used only to synthesize email address
+                        "filter": "", # additional filter for this type
+                        "loginEnabledAttr" : "", # attribute controlling login
+                        "loginEnabledValue" : "yes", # "True" value of above attribute
+                        "calendarEnabledAttr" : "enable-calendar", # attribute controlling calendaring
+                        "calendarEnabledValue" : "yes", # "True" value of above attribute
+                        "mapping": { # maps internal record names to LDAP
+                            "recordName": "uid",
+                            "fullName" : "cn",
+                            "emailAddresses" : ["mail", "emailAliases"],
+                            "firstName" : "givenName",
+                            "lastName" : "sn",
+                        },
+                    },
+                    "groups": {
+                        "rdn": "cn=Groups",
+                        "attr": "cn", # used only to synthesize email address
+                        "emailSuffix": None, # used only to synthesize email address
+                        "filter": "", # additional filter for this type
+                        "mapping": { # maps internal record names to LDAP
+                            "recordName": "cn",
+                            "fullName" : "cn",
+                            "emailAddresses" : ["mail", "emailAliases"],
+                            "firstName" : "givenName",
+                            "lastName" : "sn",
+                        },
+                    },
+                    "locations": {
+                        "rdn": "cn=Places",
+                        "attr": "cn", # used only to synthesize email address
+                        "emailSuffix": None, # used only to synthesize email address
+                        "filter": "(objectClass=apple-resource)", # additional filter for this type
+                        "calendarEnabledAttr" : "", # attribute controlling calendaring
+                        "calendarEnabledValue" : "yes", # "True" value of above attribute
+                        "mapping": { # maps internal record names to LDAP
+                            "recordName": "cn",
+                            "fullName" : "cn",
+                            "emailAddresses" : "", # old style, single string
+                            "firstName" : "givenName",
+                            "lastName" : "sn",
+                        },
+                    },
+                    "resources": {
+                        "rdn": "cn=Resources",
+                        "attr": "cn", # used only to synthesize email address
+                        "emailSuffix": None, # used only to synthesize email address
+                        "filter": "(objectClass=apple-resource)", # additional filter for this type
+                        "calendarEnabledAttr" : "", # attribute controlling calendaring
+                        "calendarEnabledValue" : "yes", # "True" value of above attribute
+                        "mapping": { # maps internal record names to LDAP
+                            "recordName": "cn",
+                            "fullName" : "cn",
+                            "emailAddresses" : [], # new style, array
+                            "firstName" : "givenName",
+                            "lastName" : "sn",
+                        },
+                    },
+                },
+                "groupSchema": {
+                    "membersAttr": "uniqueMember", # how members are specified
+                    "nestedGroupsAttr": "nestedGroups", # how nested groups are specified
+                    "memberIdAttr": "", # which attribute the above refer to
+                },
+                "resourceSchema": {
+                    "resourceInfoAttr": "apple-resource-info", # contains location/resource info
+                    "autoScheduleAttr": None,
+                    "proxyAttr": None,
+                    "readOnlyProxyAttr": None,
+                },
+                "partitionSchema": {
+                    "serverIdAttr": "server-id", # maps to augments server-id
+                    "partitionIdAttr": "partition-id", # maps to augments partition-id
+                },
+            }
+        )
+        nestedUsingSameAttributeUsingDN = (
+            (
+                (
+                    "cn=Recursive1_coasts, cn=gROUps,dc=example, dc=com",
+                    {
+                        'cn': ['recursive1_coasts'],
+                        'apple-generateduid': ['recursive1_coasts'],
+                        'uniqueMember': [
+                            'uid=wsanchez ,cn=users, dc=eXAMple,dc=com',
+                            'cn=recursive2_coasts,cn=groups,dc=example,dc=com',
+                        ],
+                    }
+                ),
+                (
+                    "cn=recursive2_coasts,cn=groups,dc=example,dc=com",
+                    {
+                        'cn': ['recursive2_coasts'],
+                        'apple-generateduid': ['recursive2_coasts'],
+                        'uniqueMember': [
+                            'uid=cdaboo,cn=users,dc=example,dc=com',
+                            'cn=recursive1_coasts,cn=groups,dc=example,dc=com',
+                        ],
+                    }
+                ),
+                (
                     'cn=both_coasts,cn=groups,dc=example,dc=com',
                     {
                         'cn': ['both_coasts'],
@@ -340,41 +666,8 @@
                         'cn': ['Wilfredo Sanchez']
                     }
                 ),
-            )
-
-        def search_s(self, base, scope, filterstr="(objectClass=*)",
-            attrlist=None):
-            """ A simple implementation of LDAP search filter processing """
-
-            base = normalizeDNstr(base)
-            results = []
-            for dn, attrs in self.records:
-                dn = normalizeDNstr(dn)
-                if dn == base:
-                    results.append(("ignored", (dn, attrs)))
-                elif dnContainedIn(ldap.dn.str2dn(dn), ldap.dn.str2dn(base)):
-                    if filterstr in ("(objectClass=*)", "(!(objectClass=organizationalUnit))"):
-                        results.append(("ignored", (dn, attrs)))
-                    else:
-                        trans = maketrans("&(|)", "   |")
-                        fragments = filterstr.encode("utf-8").translate(trans).split("|")
-                        for fragment in fragments:
-                            if not fragment:
-                                continue
-                            fragment = fragment.strip()
-                            key, value = fragment.split("=")
-                            if value in attrs.get(key, []):
-                                results.append(("ignored", (dn, attrs)))
-
-            return results
-
-
-    class LdapDirectoryServiceTestCase(TestCase):
-
-        def setUp(self):
-            super(LdapDirectoryServiceTestCase, self).setUp()
-
-            params = {
+            ),
+            {
                 "augmentService" : None,
                 "groupMembershipCache" : None,
                 "cacheTimeout": 1, # Minutes
@@ -382,8 +675,8 @@
                 "warningThresholdSeconds": 3,
                 "batchSize": 500,
                 "queryLocationsImplicitly": True,
-                "restrictEnabledRecords": False,
-                "restrictToGroup": "",
+                "restrictEnabledRecords": True,
+                "restrictToGroup": "both_coasts",
                 "recordTypes": ("users", "groups", "locations", "resources"),
                 "uri": "ldap://localhost/",
                 "tls": False,
@@ -475,9 +768,441 @@
                     "partitionIdAttr": "partition-id", # maps to augments partition-id
                 },
             }
+        )
+        nestedUsingDifferentAttributeUsingGUID = (
+            (
+                (
+                    "cn=Recursive1_coasts, cn=gROUps,dc=example, dc=com",
+                    {
+                        'cn': ['recursive1_coasts'],
+                        'apple-generateduid': ['recursive1_coasts'],
+                        'uniqueMember': [
+                            '6423F94A-6B76-4A3A-815B-D52CFD77935D',
+                        ],
+                        'nestedGroups': [
+                            'recursive2_coasts',
+                        ],
+                    }
+                ),
+                (
+                    "cn=recursive2_coasts,cn=groups,dc=example,dc=com",
+                    {
+                        'cn': ['recursive2_coasts'],
+                        'apple-generateduid': ['recursive2_coasts'],
+                        'uniqueMember': [
+                            '5A985493-EE2C-4665-94CF-4DFEA3A89500',
+                        ],
+                        'nestedGroups': [
+                            'recursive1_coasts',
+                        ],
+                    }
+                ),
+                (
+                    'cn=both_coasts,cn=groups,dc=example,dc=com',
+                    {
+                        'cn': ['both_coasts'],
+                        'apple-generateduid': ['both_coasts'],
+                        'nestedGroups': [
+                            'right_coast',
+                            'left_coast',
+                        ],
+                    }
+                ),
+                (
+                    'cn=right_coast,cn=groups,dc=example,dc=com',
+                    {
+                        'cn': ['right_coast'],
+                        'apple-generateduid': ['right_coast'],
+                        'uniqueMember': [
+                            '5A985493-EE2C-4665-94CF-4DFEA3A89500',
+                        ],
+                    }
+                ),
+                (
+                    'cn=left_coast,cn=groups,dc=example,dc=com',
+                    {
+                        'cn': ['left_coast'],
+                        'apple-generateduid': ['left_coast'],
+                        'uniqueMember': [
+                            '6423F94A-6B76-4A3A-815B-D52CFD77935D',
+                        ],
+                    }
+                ),
+                (
+                    "uid=odtestamanda,cn=users,dc=example,dc=com",
+                    {
+                        'uid': ['odtestamanda'],
+                        'apple-generateduid': ['9DC04A70-E6DD-11DF-9492-0800200C9A66'],
+                        'sn': ['Test'],
+                        'mail': ['odtestamanda at example.com', 'alternate at example.com'],
+                        'givenName': ['Amanda'],
+                        'cn': ['Amanda Test']
+                    }
+                ),
+                (
+                    "uid=odtestbetty,cn=users,dc=example,dc=com",
+                    {
+                        'uid': ['odtestbetty'],
+                        'apple-generateduid': ['93A8F5C5-49D8-4641-840F-CD1903B0394C'],
+                        'sn': ['Test'],
+                        'mail': ['odtestbetty at example.com'],
+                        'givenName': ['Betty'],
+                        'cn': ['Betty Test']
+                    }
+                ),
+                (
+                    "uid=odtestcarlene,cn=users,dc=example,dc=com",
+                    {
+                        'uid': ['odtestcarlene'],
+                        # Note: no guid here, to test this record is skipped
+                        'sn': ['Test'],
+                        'mail': ['odtestcarlene at example.com'],
+                        'givenName': ['Carlene'],
+                        'cn': ['Carlene Test']
+                    }
+                ),
+                (
+                    "uid=cdaboo,cn=users,dc=example,dc=com",
+                    {
+                        'uid': ['cdaboo'],
+                        'apple-generateduid': ['5A985493-EE2C-4665-94CF-4DFEA3A89500'],
+                        'sn': ['Daboo'],
+                        'mail': ['daboo at example.com'],
+                        'givenName': ['Cyrus'],
+                        'cn': ['Cyrus Daboo']
+                    }
+                ),
+                (
+                    "uid=wsanchez  ,  cn=users  , dc=example,dc=com",
+                    {
+                        'uid': ['wsanchez'],
+                        'apple-generateduid': ['6423F94A-6B76-4A3A-815B-D52CFD77935D'],
+                        'sn': ['Sanchez'],
+                        'mail': ['wsanchez at example.com'],
+                        'givenName': ['Wilfredo'],
+                        'cn': ['Wilfredo Sanchez']
+                    }
+                ),
+            ),
+            {
+                "augmentService" : None,
+                "groupMembershipCache" : None,
+                "cacheTimeout": 1, # Minutes
+                "negativeCaching": False,
+                "warningThresholdSeconds": 3,
+                "batchSize": 500,
+                "queryLocationsImplicitly": True,
+                "restrictEnabledRecords": True,
+                "restrictToGroup": "both_coasts",
+                "recordTypes": ("users", "groups", "locations", "resources"),
+                "uri": "ldap://localhost/",
+                "tls": False,
+                "tlsCACertFile": None,
+                "tlsCACertDir": None,
+                "tlsRequireCert": None, # never, allow, try, demand, hard
+                "credentials": {
+                    "dn": None,
+                    "password": None,
+                },
+                "authMethod": "LDAP",
+                "rdnSchema": {
+                    "base": "dc=example,dc=com",
+                    "guidAttr": "apple-generateduid",
+                    "users": {
+                        "rdn": "cn=Users",
+                        "attr": "uid", # used only to synthesize email address
+                        "emailSuffix": None, # used only to synthesize email address
+                        "filter": "", # additional filter for this type
+                        "loginEnabledAttr" : "", # attribute controlling login
+                        "loginEnabledValue" : "yes", # "True" value of above attribute
+                        "calendarEnabledAttr" : "enable-calendar", # attribute controlling calendaring
+                        "calendarEnabledValue" : "yes", # "True" value of above attribute
+                        "mapping": { # maps internal record names to LDAP
+                            "recordName": "uid",
+                            "fullName" : "cn",
+                            "emailAddresses" : ["mail", "emailAliases"],
+                            "firstName" : "givenName",
+                            "lastName" : "sn",
+                        },
+                    },
+                    "groups": {
+                        "rdn": "cn=Groups",
+                        "attr": "cn", # used only to synthesize email address
+                        "emailSuffix": None, # used only to synthesize email address
+                        "filter": "", # additional filter for this type
+                        "mapping": { # maps internal record names to LDAP
+                            "recordName": "cn",
+                            "fullName" : "cn",
+                            "emailAddresses" : ["mail", "emailAliases"],
+                            "firstName" : "givenName",
+                            "lastName" : "sn",
+                        },
+                    },
+                    "locations": {
+                        "rdn": "cn=Places",
+                        "attr": "cn", # used only to synthesize email address
+                        "emailSuffix": None, # used only to synthesize email address
+                        "filter": "(objectClass=apple-resource)", # additional filter for this type
+                        "calendarEnabledAttr" : "", # attribute controlling calendaring
+                        "calendarEnabledValue" : "yes", # "True" value of above attribute
+                        "mapping": { # maps internal record names to LDAP
+                            "recordName": "cn",
+                            "fullName" : "cn",
+                            "emailAddresses" : "", # old style, single string
+                            "firstName" : "givenName",
+                            "lastName" : "sn",
+                        },
+                    },
+                    "resources": {
+                        "rdn": "cn=Resources",
+                        "attr": "cn", # used only to synthesize email address
+                        "emailSuffix": None, # used only to synthesize email address
+                        "filter": "(objectClass=apple-resource)", # additional filter for this type
+                        "calendarEnabledAttr" : "", # attribute controlling calendaring
+                        "calendarEnabledValue" : "yes", # "True" value of above attribute
+                        "mapping": { # maps internal record names to LDAP
+                            "recordName": "cn",
+                            "fullName" : "cn",
+                            "emailAddresses" : [], # new style, array
+                            "firstName" : "givenName",
+                            "lastName" : "sn",
+                        },
+                    },
+                },
+                "groupSchema": {
+                    "membersAttr": "uniqueMember", # how members are specified
+                    "nestedGroupsAttr": "nestedGroups", # how nested groups are specified
+                    "memberIdAttr": "apple-generateduid", # which attribute the above refer to
+                },
+                "resourceSchema": {
+                    "resourceInfoAttr": "apple-resource-info", # contains location/resource info
+                    "autoScheduleAttr": None,
+                    "proxyAttr": None,
+                    "readOnlyProxyAttr": None,
+                },
+                "partitionSchema": {
+                    "serverIdAttr": "server-id", # maps to augments server-id
+                    "partitionIdAttr": "partition-id", # maps to augments partition-id
+                },
+            }
+        )
+        nestedUsingSameAttributeUsingGUID = (
+            (
+                (
+                    "cn=Recursive1_coasts, cn=gROUps,dc=example, dc=com",
+                    {
+                        'cn': ['recursive1_coasts'],
+                        'apple-generateduid': ['recursive1_coasts'],
+                        'uniqueMember': [
+                            '6423F94A-6B76-4A3A-815B-D52CFD77935D',
+                            'recursive2_coasts',
+                        ],
+                    }
+                ),
+                (
+                    "cn=recursive2_coasts,cn=groups,dc=example,dc=com",
+                    {
+                        'cn': ['recursive2_coasts'],
+                        'apple-generateduid': ['recursive2_coasts'],
+                        'uniqueMember': [
+                            '5A985493-EE2C-4665-94CF-4DFEA3A89500',
+                            'recursive1_coasts',
+                        ],
+                    }
+                ),
+                (
+                    'cn=both_coasts,cn=groups,dc=example,dc=com',
+                    {
+                        'cn': ['both_coasts'],
+                        'apple-generateduid': ['both_coasts'],
+                        'uniqueMember': [
+                            'right_coast',
+                            'left_coast',
+                        ],
+                    }
+                ),
+                (
+                    'cn=right_coast,cn=groups,dc=example,dc=com',
+                    {
+                        'cn': ['right_coast'],
+                        'apple-generateduid': ['right_coast'],
+                        'uniqueMember': [
+                            '5A985493-EE2C-4665-94CF-4DFEA3A89500',
+                        ],
+                    }
+                ),
+                (
+                    'cn=left_coast,cn=groups,dc=example,dc=com',
+                    {
+                        'cn': ['left_coast'],
+                        'apple-generateduid': ['left_coast'],
+                        'uniqueMember': [
+                            '6423F94A-6B76-4A3A-815B-D52CFD77935D',
+                        ],
+                    }
+                ),
+                (
+                    "uid=odtestamanda,cn=users,dc=example,dc=com",
+                    {
+                        'uid': ['odtestamanda'],
+                        'apple-generateduid': ['9DC04A70-E6DD-11DF-9492-0800200C9A66'],
+                        'sn': ['Test'],
+                        'mail': ['odtestamanda at example.com', 'alternate at example.com'],
+                        'givenName': ['Amanda'],
+                        'cn': ['Amanda Test']
+                    }
+                ),
+                (
+                    "uid=odtestbetty,cn=users,dc=example,dc=com",
+                    {
+                        'uid': ['odtestbetty'],
+                        'apple-generateduid': ['93A8F5C5-49D8-4641-840F-CD1903B0394C'],
+                        'sn': ['Test'],
+                        'mail': ['odtestbetty at example.com'],
+                        'givenName': ['Betty'],
+                        'cn': ['Betty Test']
+                    }
+                ),
+                (
+                    "uid=odtestcarlene,cn=users,dc=example,dc=com",
+                    {
+                        'uid': ['odtestcarlene'],
+                        # Note: no guid here, to test this record is skipped
+                        'sn': ['Test'],
+                        'mail': ['odtestcarlene at example.com'],
+                        'givenName': ['Carlene'],
+                        'cn': ['Carlene Test']
+                    }
+                ),
+                (
+                    "uid=cdaboo,cn=users,dc=example,dc=com",
+                    {
+                        'uid': ['cdaboo'],
+                        'apple-generateduid': ['5A985493-EE2C-4665-94CF-4DFEA3A89500'],
+                        'sn': ['Daboo'],
+                        'mail': ['daboo at example.com'],
+                        'givenName': ['Cyrus'],
+                        'cn': ['Cyrus Daboo']
+                    }
+                ),
+                (
+                    "uid=wsanchez  ,  cn=users  , dc=example,dc=com",
+                    {
+                        'uid': ['wsanchez'],
+                        'apple-generateduid': ['6423F94A-6B76-4A3A-815B-D52CFD77935D'],
+                        'sn': ['Sanchez'],
+                        'mail': ['wsanchez at example.com'],
+                        'givenName': ['Wilfredo'],
+                        'cn': ['Wilfredo Sanchez']
+                    }
+                ),
+            ),
+            {
+                "augmentService" : None,
+                "groupMembershipCache" : None,
+                "cacheTimeout": 1, # Minutes
+                "negativeCaching": False,
+                "warningThresholdSeconds": 3,
+                "batchSize": 500,
+                "queryLocationsImplicitly": True,
+                "restrictEnabledRecords": True,
+                "restrictToGroup": "both_coasts",
+                "recordTypes": ("users", "groups", "locations", "resources"),
+                "uri": "ldap://localhost/",
+                "tls": False,
+                "tlsCACertFile": None,
+                "tlsCACertDir": None,
+                "tlsRequireCert": None, # never, allow, try, demand, hard
+                "credentials": {
+                    "dn": None,
+                    "password": None,
+                },
+                "authMethod": "LDAP",
+                "rdnSchema": {
+                    "base": "dc=example,dc=com",
+                    "guidAttr": "apple-generateduid",
+                    "users": {
+                        "rdn": "cn=Users",
+                        "attr": "uid", # used only to synthesize email address
+                        "emailSuffix": None, # used only to synthesize email address
+                        "filter": "", # additional filter for this type
+                        "loginEnabledAttr" : "", # attribute controlling login
+                        "loginEnabledValue" : "yes", # "True" value of above attribute
+                        "calendarEnabledAttr" : "enable-calendar", # attribute controlling calendaring
+                        "calendarEnabledValue" : "yes", # "True" value of above attribute
+                        "mapping": { # maps internal record names to LDAP
+                            "recordName": "uid",
+                            "fullName" : "cn",
+                            "emailAddresses" : ["mail", "emailAliases"],
+                            "firstName" : "givenName",
+                            "lastName" : "sn",
+                        },
+                    },
+                    "groups": {
+                        "rdn": "cn=Groups",
+                        "attr": "cn", # used only to synthesize email address
+                        "emailSuffix": None, # used only to synthesize email address
+                        "filter": "", # additional filter for this type
+                        "mapping": { # maps internal record names to LDAP
+                            "recordName": "cn",
+                            "fullName" : "cn",
+                            "emailAddresses" : ["mail", "emailAliases"],
+                            "firstName" : "givenName",
+                            "lastName" : "sn",
+                        },
+                    },
+                    "locations": {
+                        "rdn": "cn=Places",
+                        "attr": "cn", # used only to synthesize email address
+                        "emailSuffix": None, # used only to synthesize email address
+                        "filter": "(objectClass=apple-resource)", # additional filter for this type
+                        "calendarEnabledAttr" : "", # attribute controlling calendaring
+                        "calendarEnabledValue" : "yes", # "True" value of above attribute
+                        "mapping": { # maps internal record names to LDAP
+                            "recordName": "cn",
+                            "fullName" : "cn",
+                            "emailAddresses" : "", # old style, single string
+                            "firstName" : "givenName",
+                            "lastName" : "sn",
+                        },
+                    },
+                    "resources": {
+                        "rdn": "cn=Resources",
+                        "attr": "cn", # used only to synthesize email address
+                        "emailSuffix": None, # used only to synthesize email address
+                        "filter": "(objectClass=apple-resource)", # additional filter for this type
+                        "calendarEnabledAttr" : "", # attribute controlling calendaring
+                        "calendarEnabledValue" : "yes", # "True" value of above attribute
+                        "mapping": { # maps internal record names to LDAP
+                            "recordName": "cn",
+                            "fullName" : "cn",
+                            "emailAddresses" : [], # new style, array
+                            "firstName" : "givenName",
+                            "lastName" : "sn",
+                        },
+                    },
+                },
+                "groupSchema": {
+                    "membersAttr": "uniqueMember", # how members are specified
+                    "nestedGroupsAttr": "", # how nested groups are specified
+                    "memberIdAttr": "apple-generateduid", # which attribute the above refer to
+                },
+                "resourceSchema": {
+                    "resourceInfoAttr": "apple-resource-info", # contains location/resource info
+                    "autoScheduleAttr": None,
+                    "proxyAttr": None,
+                    "readOnlyProxyAttr": None,
+                },
+                "partitionSchema": {
+                    "serverIdAttr": "server-id", # maps to augments server-id
+                    "partitionIdAttr": "partition-id", # maps to augments partition-id
+                },
+            }
+        )
 
-            self.service = LdapDirectoryService(params)
-            self.service.ldap = LdapDirectoryTestWrapper(self.service.ldap)
+        def setupService(self, scenario):
+            self.service = LdapDirectoryService(scenario[1])
+            self.service.ldap = LdapDirectoryTestWrapper(self.service.ldap, scenario[0])
             self.patch(ldap, "async", StubAsync())
 
 
@@ -485,6 +1210,8 @@
             """
             Exercise the fake search_s implementation
             """
+            self.setupService(self.nestedUsingDifferentAttributeUsingDN)
+
             # Get all groups
             self.assertEquals(
                 len(self.service.ldap.search_s("cn=groups,dc=example,dc=com", 0, "(objectClass=*)", [])), 5)
@@ -500,6 +1227,7 @@
             Exercise _ldapResultToRecord(), which converts a dictionary
             of LDAP attributes into an LdapDirectoryRecord
             """
+            self.setupService(self.nestedUsingDifferentAttributeUsingDN)
 
             # User without enabled-for-calendaring specified
 
@@ -731,6 +1459,7 @@
             listRecords makes an LDAP query (with fake results in this test)
             and turns the results into records
             """
+            self.setupService(self.nestedUsingDifferentAttributeUsingDN)
 
             records = self.service.listRecords(self.service.recordType_users)
             self.assertEquals(len(records), 4)
@@ -739,6 +1468,97 @@
                 set(["Amanda", "Betty", "Cyrus", "Wilfredo"]) # Carlene is skipped because no guid in LDAP
             )
 
+        def test_restrictedPrincipalsUsingDN(self):
+            """
+            If restrictToGroup is in place, restrictedPrincipals should return only the principals
+            within that group.  In this case we're testing scenarios in which membership
+            is specified by DN
+            """
+            for scenario in (
+                self.nestedUsingSameAttributeUsingDN,
+                self.nestedUsingDifferentAttributeUsingDN,
+                ):
+                self.setupService(scenario)
+
+                self.assertEquals(
+                    set([
+                        "cn=left_coast,cn=groups,dc=example,dc=com",
+                        "cn=right_coast,cn=groups,dc=example,dc=com",
+                        "uid=cdaboo,cn=users,dc=example,dc=com",
+                        "uid=dreid,cn=users,dc=example,dc=com",
+                        "uid=lecroy,cn=users,dc=example,dc=com",
+                        "uid=wsanchez,cn=users,dc=example,dc=com",
+                    ]),
+                    self.service.restrictedPrincipals)
+
+                dn = "uid=cdaboo,cn=users,dc=example,dc=com"
+                attrs = {
+                    'uid': ['cdaboo'],
+                    'apple-generateduid': ['5A985493-EE2C-4665-94CF-4DFEA3A89500'],
+                    'sn': ['Daboo'],
+                    'mail': ['daboo at example.com'],
+                    'givenName': ['Cyrus'],
+                    'cn': ['Cyrus Daboo']
+                }
+                self.assertTrue(self.service.isAllowedByRestrictToGroup(dn, attrs))
+
+                dn = "uid=unknown,cn=users,dc=example,dc=com"
+                attrs = {
+                    'uid': ['unknown'],
+                    'apple-generateduid': ['5A985493-EE2C-4665-94CF-4DFEA3A89500'],
+                    'sn': ['unknown'],
+                    'mail': ['unknown at example.com'],
+                    'givenName': ['unknown'],
+                    'cn': ['unknown']
+                }
+                self.assertFalse(self.service.isAllowedByRestrictToGroup(dn, attrs))
+
+
+        def test_restrictedPrincipalsUsingGUID(self):
+            """
+            If restrictToGroup is in place, restrictedPrincipals should return only the principals
+            within that group.  In this case we're testing scenarios in which membership
+            is specified by an attribute, not DN
+            """
+            for scenario in (
+                self.nestedUsingDifferentAttributeUsingGUID,
+                self.nestedUsingSameAttributeUsingGUID,
+                ):
+                self.setupService(scenario)
+
+                self.assertEquals(
+                    set([
+                        "left_coast",
+                        "right_coast",
+                        "5A985493-EE2C-4665-94CF-4DFEA3A89500",
+                        "6423F94A-6B76-4A3A-815B-D52CFD77935D",
+                    ]),
+                    self.service.restrictedPrincipals)
+
+                dn = "uid=cdaboo,cn=users,dc=example,dc=com"
+                attrs = {
+                    'uid': ['cdaboo'],
+                    'apple-generateduid': ['5A985493-EE2C-4665-94CF-4DFEA3A89500'],
+                    'sn': ['Daboo'],
+                    'mail': ['daboo at example.com'],
+                    'givenName': ['Cyrus'],
+                    'cn': ['Cyrus Daboo']
+                }
+                self.assertTrue(self.service.isAllowedByRestrictToGroup(dn, attrs))
+
+                dn = "uid=unknown,cn=users,dc=example,dc=com"
+                attrs = {
+                    'uid': ['unknown'],
+                    'apple-generateduid': ['unknown'],
+                    'sn': ['unknown'],
+                    'mail': ['unknown at example.com'],
+                    'givenName': ['unknown'],
+                    'cn': ['unknown']
+                }
+                self.assertFalse(self.service.isAllowedByRestrictToGroup(dn, attrs))
+
+
+
         @inlineCallbacks
         def test_groupMembershipAliases(self):
             """
@@ -749,6 +1569,7 @@
             guids, updateCache( ) is smart enough to map between guids and this
             attribute which is referred to in the code as record.cachedGroupsAlias().
             """
+            self.setupService(self.nestedUsingDifferentAttributeUsingDN)
 
             # Set up proxydb and preload it from xml
             calendaruserproxy.ProxyDBService = calendaruserproxy.ProxySqliteDB("proxies.sqlite")
@@ -760,7 +1581,7 @@
             updater = GroupMembershipCacheUpdater(calendaruserproxy.ProxyDBService,
                 self.service, 30, 15, cache=cache, useExternalProxies=False)
 
-            self.assertEquals((False, 8), (yield updater.updateCache()))
+            self.assertEquals((False, 8, 8), (yield updater.updateCache()))
 
             users = self.service.recordType_users
 
@@ -774,6 +1595,7 @@
 
 
         def test_splitIntoBatches(self):
+            self.setupService(self.nestedUsingDifferentAttributeUsingDN)
             # Data is perfect multiple of size
             results = list(splitIntoBatches(set(range(12)), 4))
             self.assertEquals(results,
@@ -791,6 +1613,7 @@
         def test_recordTypeForDN(self):
             # Ensure dn comparison is case insensitive and ignores extra
             # whitespace
+            self.setupService(self.nestedUsingDifferentAttributeUsingDN)
 
             # Base DNs for each recordtype should already be lowercase
             for dn in self.service.typeDNs.itervalues():
@@ -829,6 +1652,7 @@
             Verify queryDirectory skips LDAP queries where there has been no
             LDAP attribute mapping provided for the given index type.
             """
+            self.setupService(self.nestedUsingDifferentAttributeUsingDN)
 
             self.history = []
 

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/test/test_livedirectory.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/test/test_livedirectory.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/directory/test/test_livedirectory.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -186,5 +186,8 @@
         class LiveODDirectoryServiceCase(LiveDirectoryTests, TestCase):
 
             def setUp(self):
-                params = {}
+                params = {
+                    "augmentService":
+                        augment.AugmentXMLDB(xmlFiles=(augmentsFile.path,)),
+                }
                 self.svc = OpenDirectoryService(params)

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/extensions.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/extensions.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/extensions.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -249,7 +249,95 @@
         returnValue(MultiStatusResponse(responses))
 
 
+    @inlineCallbacks
+    def report_http___calendarserver_org_ns__calendarserver_principal_search(self, request,
+        calendarserver_principal_search):
+        """
+        Generate a calendarserver-principal-search REPORT.
 
+        @param request: Request object
+        @param calendarserver_principal_search: CalendarServerPrincipalSearch object
+        """
+
+        # Verify root element
+        if not isinstance(calendarserver_principal_search, customxml.CalendarServerPrincipalSearch):
+            msg = "%s expected as root element, not %s." % (customxml.CalendarServerPrincipalSearch.sname(), calendarserver_principal_search.sname())
+            log.warn(msg)
+            raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, msg))
+
+        # Only handle Depth: 0
+        depth = request.headers.getHeader("depth", "0")
+        if depth != "0":
+            log.err("Error in calendarserver-principal-search REPORT, Depth set to %s" % (depth,))
+            raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, "Depth %s not allowed" % (depth,)))
+
+        tokens, context, applyTo, clientLimit, propElement = extractCalendarServerPrincipalSearchData(calendarserver_principal_search)
+
+        # Run report
+        resultsWereLimited = None
+        resources = []
+        if applyTo or not hasattr(self, "directory"):
+            for principalCollection in self.principalCollections():
+                uri = principalCollection.principalCollectionURL()
+                resource = (yield request.locateResource(uri))
+                if resource:
+                    resources.append((resource, uri))
+        else:
+            resources.append((self, request.uri))
+
+        # We need to access a directory service
+        principalCollection = resources[0][0]
+        dir = principalCollection.directory
+
+        matchingResources = []
+        matchcount = 0
+
+        records = (yield dir.recordsMatchingTokens(tokens, context=context))
+
+        for record in records:
+            resource = principalCollection.principalForRecord(record)
+            if resource:
+                matchingResources.append(resource)
+
+                # We've determined this is a matching resource
+                matchcount += 1
+                if clientLimit is not None and matchcount >= clientLimit:
+                    resultsWereLimited = ("client", matchcount)
+                    break
+                if matchcount >= config.MaxPrincipalSearchReportResults:
+                    resultsWereLimited = ("server", matchcount)
+                    break
+
+        # Generate the response
+        responses = []
+        for resource in matchingResources:
+            url = resource.url()
+            yield prop_common.responseForHref(
+                request,
+                responses,
+                element.HRef.fromString(url),
+                resource,
+                prop_common.propertyListForResource,
+                propElement
+            )
+
+        if resultsWereLimited is not None:
+            if resultsWereLimited[0] == "server":
+                log.err("Too many matching resources in "
+                        "calendarserver-principal-search report")
+            responses.append(element.StatusResponse(
+                element.HRef.fromString(request.uri),
+                element.Status.fromResponseCode(
+                    responsecode.INSUFFICIENT_STORAGE_SPACE
+                ),
+                element.Error(element.NumberOfMatchesWithinLimits()),
+                element.ResponseDescription("Results limited by %s at %d"
+                                           % resultsWereLimited),
+            ))
+        returnValue(MultiStatusResponse(responses))
+
+
+
 class DirectoryElement(Element):
     """
     A L{DirectoryElement} is an L{Element} for rendering the contents of a
@@ -562,6 +650,13 @@
         return self.putChildren.keys()
 
 
+    def countChildren(self):
+        """
+        @return: the number of all known children of this resource.
+        """
+        return len(self.putChildren.keys())
+
+
     def locateChild(self, req, segments):
         """
         See L{IResource.locateChild}.
@@ -868,3 +963,41 @@
             )
         return self._data
 
+def extractCalendarServerPrincipalSearchData(doc):
+    """
+    Extract relevant info from a CalendarServerPrincipalSearch document
+
+    @param doc: CalendarServerPrincipalSearch object to extract info from
+    @return: A tuple containing:
+        the list of tokens
+        the context string
+        the applyTo boolean
+        the clientLimit integer
+        the propElement containing the properties to return
+    """
+    context = doc.attributes.get("context", None)
+    applyTo = False
+    tokens = []
+    clientLimit = None
+    for child in doc.children:
+        if child.qname() == (dav_namespace, "prop"):
+            propElement = child
+
+        elif child.qname() == (dav_namespace,
+            "apply-to-principal-collection-set"):
+            applyTo = True
+
+        elif child.qname() == (calendarserver_namespace, "search-token"):
+            tokens.append(str(child))
+
+        elif child.qname() == (calendarserver_namespace, "limit"):
+            try:
+                nresults = child.childOfType(customxml.NResults)
+                clientLimit = int(str(nresults))
+            except (TypeError, ValueError,):
+                msg = "Bad XML: unknown value for <limit> element"
+                log.warn(msg)
+                raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, msg))
+
+    return tokens, context, applyTo, clientLimit, propElement
+

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/mail.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/mail.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/mail.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -29,7 +29,6 @@
 
 from cStringIO import StringIO
 
-from email.mime.image import MIMEImage
 from email.mime.multipart import MIMEMultipart
 from email.mime.text import MIMEText
 
@@ -1405,32 +1404,6 @@
                 formattedFrom, recipient, addressWithToken))
 
 
-    def getIconPath(self, details, canceled, language='en'):
-        iconDir = config.Scheduling.iMIP.MailIconsDirectory.rstrip("/")
-
-        if canceled:
-            iconName = "canceled.png"
-            iconPath = os.path.join(iconDir, iconName)
-            if os.path.exists(iconPath):
-                return iconPath
-            else:
-                return None
-
-        else:
-            month = int(details['month'])
-            day = int(details['day'])
-            with translationTo(language) as trans:
-                monthName = trans.monthAbbreviation(month)
-            iconName = "%02d.png" % (day,)
-            iconPath = os.path.join(iconDir, monthName.encode("utf-8"), iconName)
-            if not os.path.exists(iconPath):
-                # Try the generic (numeric) version
-                iconPath = os.path.join(iconDir, "%02d" % (month,), iconName)
-                if not os.path.exists(iconPath):
-                    return None
-            return iconPath
-
-
     def generateEmail(self, inviteState, calendar, orgEmail, orgCN,
                       attendees, fromAddress, replyToAddress, toAddress,
                       language='en'):
@@ -1483,18 +1456,16 @@
 
         details = self.getEventDetails(calendar, language=language)
         canceled = (calendar.propertyValue("METHOD") == "CANCEL")
-        iconPath = self.getIconPath(details, canceled, language=language)
 
         subjectFormat, labels = localizedLabels(language, canceled, inviteState)
         details.update(labels)
 
         details['subject'] = subjectFormat % {'summary' : details['summary']}
-        details['iconName'] = iconName = "calicon.png"
 
         plainText = self.renderPlainText(details, (orgCN, orgEmail),
                                          attendees, canceled)
 
-        [addIcon, htmlText] = self.renderHTML(details, (orgCN, orgEmail),
+        htmlText = self.renderHTML(details, (orgCN, orgEmail),
                                               attendees, canceled)
 
         msg = MIMEMultipart()
@@ -1520,19 +1491,6 @@
         msgHtml = MIMEText(htmlText, "html", "UTF-8")
         msgHtmlRelated.attach(msgHtml)
 
-        # an image for html version
-        if addIcon and iconPath != None and os.path.exists(iconPath):
-
-            with open(iconPath) as iconFile:
-                msgIcon = MIMEImage(iconFile.read(),
-                    _subtype='png;x-apple-mail-type=stationery;name="%s"' %
-                    (iconName,))
-
-            msgIcon.add_header("Content-ID", "<%s>" % (iconName,))
-            msgIcon.add_header("Content-Disposition", "inline;filename=%s" %
-                (iconName,))
-            msgHtmlRelated.attach(msgIcon)
-
         calendarText = str(calendar)
         # the icalendar attachment
         self.log_debug("Mail gateway sending calendar body: %s"
@@ -1584,10 +1542,7 @@
         Render HTML message part based on invitation details and a flag
         indicating whether the message is a cancellation.
 
-        @return: a 2-tuple of (should add icon (C{bool}), html text (C{str},
-            representing utf-8 encoded bytes)).  The first element indicates
-            whether the MIME generator needs to add a C{cid:} icon image part to
-            satisfy the HTML links.
+        @return: html text (C{str}, representing utf-8 encoded bytes)).
         """
         orgCN, orgEmail = organizer
 
@@ -1649,10 +1604,7 @@
         flattenString(None, EmailElement()).addCallback(textCollector.append)
         htmlText = textCollector[0]
 
-        # If the template refers to an icon in a cid: link, it needs to be added
-        # in the MIME.
-        addIcon = (htmlTemplate.find("cid:%(iconName)s") != -1)
-        return (addIcon, htmlText)
+        return htmlText
 
 
     def getEventDetails(self, calendar, language='en'):

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/method/put_addressbook_common.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/method/put_addressbook_common.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/method/put_addressbook_common.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -89,7 +89,7 @@
         request,
         source=None, source_uri=None, sourceparent=None, sourceadbk=False, deletesource=False,
         destination=None, destination_uri=None, destinationparent=None, destinationadbk=True,
-        vcard=None, vcarddata=None,
+        vcard=None,
         indexdestination = True,
         returnData=False,
    ):
@@ -103,7 +103,6 @@
         @param destination:       the L{CalDAVResource} for the destination resource to copy into.
         @param destination_uri:   the URI for the destination resource.
         @param vcard:             the C{str} or L{Component} vcard data if there is no source, None otherwise.
-        @param vcarddata:         the C{str} vcard data if there is no source, None otherwise. Optional.
         @param sourceadbk:        True if the source resource is in a vcard collection, False otherwise.
         @param destinationadbk:   True if the destination resource is in a vcard collection, False otherwise
         @param sourceparent:      the L{CalDAVResource} for the source resource's parent collection, or None if source is None.
@@ -143,7 +142,7 @@
         self.destination_uri = destination_uri
         self.destinationparent = destinationparent
         self.vcard = vcard
-        self.vcarddata = vcarddata
+        self.vcarddata = None
         self.deletesource = deletesource
         self.indexdestination = indexdestination
         self.returnData = returnData
@@ -191,7 +190,6 @@
                 else:
                     try:
                         if type(self.vcard) in (types.StringType, types.UnicodeType,):
-                            self.vcarddata = self.vcard
                             self.vcard = Component.fromString(self.vcard)
                     except ValueError, e:
                         log.err(str(e))
@@ -288,7 +286,7 @@
         message = ""
         if not self.destination.exists() and \
             config.MaxResourcesPerCollection and \
-            len((yield self.destinationparent.listChildren())) >= config.MaxResourcesPerCollection:
+            (yield self.destinationparent.countChildren()) >= config.MaxResourcesPerCollection:
                 result = False
                 message = "Too many resources in collection %s" % (self.destinationparent,)
 
@@ -416,11 +414,8 @@
     @inlineCallbacks
     def doStorePut(self):
 
-        if self.vcarddata is None:
-            self.vcarddata = str(self.vcard)
-        stream = MemoryStream(self.vcarddata)
+        stream = MemoryStream(str(self.vcard))
         response = (yield self.destination.storeStream(stream))
-
         returnValue(response)
 
     @inlineCallbacks

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/method/put_common.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/method/put_common.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/method/put_common.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -127,7 +127,7 @@
         request,
         source=None, source_uri=None, sourceparent=None, sourcecal=False, deletesource=False,
         destination=None, destination_uri=None, destinationparent=None, destinationcal=True,
-        calendar=None, calendardata=None,
+        calendar=None,
         isiTIP=False,
         allowImplicitSchedule=True,
         internal_request=False,
@@ -144,7 +144,6 @@
         @param destination:       the L{CalDAVResource} for the destination resource to copy into.
         @param destination_uri:   the URI for the destination resource.
         @param calendar:          the C{str} or L{Component} calendar data if there is no source, None otherwise.
-        @param calendardata:      the C{str} calendar data if there is no source, None otherwise. Optional
         @param sourcecal:         True if the source resource is in a calendar collection, False otherwise.
         @param destinationcal:    True if the destination resource is in a calendar collection, False otherwise
         @param sourceparent:      the L{CalDAVResource} for the source resource's parent collection, or None if source is None.
@@ -189,7 +188,6 @@
         self.destination_uri = destination_uri
         self.destinationparent = destinationparent
         self.calendar = calendar
-        self.calendardata = calendardata
         self.deletesource = deletesource
         self.isiTIP = isiTIP
         self.allowImplicitSchedule = allowImplicitSchedule
@@ -278,7 +276,6 @@
                 else:
                     try:
                         if type(self.calendar) in (types.StringType, types.UnicodeType,):
-                            self.calendardata = self.calendar
                             self.calendar = Component.fromString(self.calendar)
                     except ValueError, e:
                         log.err(str(e))
@@ -290,8 +287,7 @@
 
                 # Possible timezone stripping
                 if config.EnableTimezonesByReference:
-                    if self.calendar.stripKnownTimezones():
-                        self.calendardata = None
+                    self.calendar.stripKnownTimezones()
 
                 # Skip validation on internal requests
                 if not self.internal_request:
@@ -502,7 +498,7 @@
         message = ""
         if not self.destination.exists() and \
             config.MaxResourcesPerCollection and \
-            len((yield self.destinationparent.listChildren())) >= config.MaxResourcesPerCollection:
+            (yield self.destinationparent.countChildren()) >= config.MaxResourcesPerCollection:
                 result = False
                 message = "Too many resources in collection %s" % (self.destinationparent,)
 
@@ -645,7 +641,6 @@
             if not self.source and self.destination.exists() and self.destination.accessMode:
                 old_access = self.destination.accessMode
                 self.calendar.addProperty(Property(name=Component.ACCESS_PROPERTY, value=old_access))
-                self.calendardata = None
                 
         return succeed(None)
 
@@ -673,7 +668,6 @@
                     "X-CALENDARSERVER-PRIVATE-COMMENT",
                     "X-CALENDARSERVER-ATTENDEE-COMMENT",
                 ))
-                self.calendardata = None
 
 
     @inlineCallbacks
@@ -724,8 +718,6 @@
                             anAttendee.setParameter("PARTSTAT", "COMPLETED")
                         component.addProperty(anAttendee)                   
 
-                self.calendardata = None
-
             elif new_completed ^ old_completed and not self.internal_request:
                 # COMPLETED changed - sync up attendee state
                 # We need this because many VTODO clients are not aware of scheduling,
@@ -740,7 +732,6 @@
                 originatorPrincipal = (yield self.destination.ownerPrincipal(self.request))
                 originatorAddresses = originatorPrincipal.calendarUserAddresses()
                 
-                changed = False
                 for component in self.calendar.subcomponents():
                     if component.name() != "VTODO":
                         continue
@@ -752,10 +743,6 @@
                             newpartstat = "COMPLETED" if component.hasProperty("COMPLETED") else "IN-PROCESS"
                             if newpartstat != oldpartstat:
                                 anAttendee.setParameter("PARTSTAT", newpartstat)
-                                changed = True
-
-                if changed:
-                    self.calendardata = None
                 
 
     @inlineCallbacks
@@ -815,9 +802,6 @@
                     if uri:
                         attachment.setValue(uri)
                         changed = True
-
-                if changed:
-                    self.calendardata = None
         
         returnValue(changed)
 
@@ -859,8 +843,6 @@
         alarm = self.destinationparent.getDefaultAlarm(vevent, timed)
         if alarm:
             changed = self.calendar.addAlarms(alarm)
-            if changed:
-                self.calendardata = None
         return changed
 
     @inlineCallbacks
@@ -970,7 +952,6 @@
                         returnValue(new_calendar)
                     else:
                         self.calendar = new_calendar
-                        self.calendardata = None
                         data_changed = True
                 did_implicit_action = True
         else:
@@ -1002,7 +983,6 @@
                     (caldav_namespace, "valid-calendar-data"),
                     "Cannot merge per-user data",
                 ))
-            self.calendardata = None
 
 
     @inlineCallbacks
@@ -1010,7 +990,7 @@
 
         # Stash the current calendar data as we may need to return it
         if self.returnData:
-            self.storeddata = str(self.calendar) if self.calendardata is None else self.calendardata
+            self.storeddata = str(self.calendar)
 
         # Always do the per-user data merge right before we store
         yield self.mergePerUserData()
@@ -1048,9 +1028,7 @@
         if data is None:
             # We'll be passing this component directly to storeComponent( )
             componentToStore = self.calendar
-            if self.calendardata is None:
-                self.calendardata = str(self.calendar)
-            data = self.calendardata
+            data = str(self.calendar)
         else:
             # We'll be passing data as a stream to storeStream( )
             componentToStore = None

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/resource.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/resource.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/resource.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -430,6 +430,11 @@
                     carddavxml.MaxResourceSize.qname(),
                 )
 
+        if self.isNotificationCollection():
+            baseProperties += (
+                customxml.GETCTag.qname(),
+            )
+
         if hasattr(self, "scheduleTag") and self.scheduleTag:
             baseProperties += (
                 caldavxml.ScheduleTag.qname(),
@@ -606,7 +611,9 @@
             returnValue(element.ResourceID(element.HRef.fromString(self.resourceID())))
 
         elif qname == customxml.GETCTag.qname() and (
-            self.isPseudoCalendarCollection() or self.isAddressBookCollection()
+            self.isPseudoCalendarCollection() or
+            self.isAddressBookCollection() or
+            self.isNotificationCollection()
         ):
             returnValue(customxml.GETCTag.fromString((yield self.getInternalSyncToken())))
 

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/sharing.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/sharing.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/sharing.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -767,7 +767,9 @@
                     del setDict[u]
                 for userid, access in removeDict.iteritems():
                     result = (yield self.uninviteUserToShare(userid, access, request))
-                    (okusers if result else badusers).add(userid)
+                    # If result is False that means the user being removed was not
+                    # actually invited, but let's not return an error in this case.
+                    okusers.add(userid)
                 for userid, (cn, access, summary) in setDict.iteritems():
                     result = (yield self.inviteUserToShare(userid, cn, access, summary, request))
                     (okusers if result else badusers).add(userid)

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/stdconfig.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/stdconfig.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/stdconfig.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -447,8 +447,6 @@
     "AccountingLogRoot"   : "accounting",
 
     "GlobalStatsSocket"           : "caldavd-stats.sock", 
-    "GlobalStatsLoggingPeriod"    : 60, 
-    "GlobalStatsLoggingFrequency" : 12,
     
     "LogDatabase" : {
         "LabelsInSQL"            : False,
@@ -1470,6 +1468,7 @@
 
     # Principal property search is always enabled
     compliance += customxml.calendarserver_principal_property_search_compliance
+    compliance += customxml.calendarserver_principal_search_compliance
 
     configDict.CalDAVComplianceClasses = compliance
 

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/storebridge.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/storebridge.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/storebridge.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -310,6 +310,12 @@
         children.update((yield self._newStoreObject.listObjectResources()))
         returnValue(sorted(children))
 
+    def countChildren(self):
+        """
+        @return: L{Deferred} with the count of all known children of this resource.
+        """
+        return self._newStoreObject.countObjectResources()
+
     def name(self):
         return self._name
 
@@ -1120,7 +1126,7 @@
         return caldavxml.CalendarData
 
     @inlineCallbacks
-    def storeResourceData(self, request, newchild, newchildURL, component, text=None, returnData=False):
+    def storeResourceData(self, request, newchild, newchildURL, component, returnData=False):
         storer = StoreCalendarObjectResource(
             request = request,
             destination = newchild,
@@ -1128,7 +1134,6 @@
             destinationcal = True,
             destinationparent = self,
             calendar = component,
-            calendardata = text,
             returnData = returnData,
         )
         yield storer.run()
@@ -2051,7 +2056,7 @@
         return carddavxml.AddressData
 
     @inlineCallbacks
-    def storeResourceData(self, request, newchild, newchildURL, component, text=None, returnData=False):
+    def storeResourceData(self, request, newchild, newchildURL, component, returnData=False):
         storer = StoreAddressObjectResource(
             request = request,
             sourceadbk = False,
@@ -2060,7 +2065,6 @@
             destinationadbk = True,
             destinationparent = self,
             vcard = component,
-            vcarddata = text,
             returnData = returnData,
         )
         yield storer.run()

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_database.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_database.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_database.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -121,6 +121,22 @@
         self.assertTrue(db.initialized)
 
     @inlineCallbacks
+    def test_connectFailure(self):
+        """
+        Failure to connect cleans up the pool
+        """
+        db = Database.TestDB(self.mktemp())
+        # Make _db_init fail
+        db._db_init = lambda : 1/0
+        self.assertFalse(db.initialized)
+        try:
+            yield db.open()
+        except:
+            pass
+        self.assertFalse(db.initialized)
+        self.assertEquals(db.pool, None)
+
+    @inlineCallbacks
     def test_readwrite(self):
         """
         Add a record, search for it

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_extensions.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_extensions.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_extensions.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -17,6 +17,7 @@
 
 from twext.python.filepath import CachingFilePath as FilePath
 from txdav.xml.element import WebDAVElement, ResourceType
+from txdav.xml.parser import WebDAVDocument
 from twext.web2.http_headers import MimeType
 from twext.web2.static import MetaDataMixin
 
@@ -24,7 +25,7 @@
 from twisted.trial.unittest import TestCase
 from twisted.web.microdom import parseString
 
-from twistedcaldav.extensions import DAVFile, DAVResourceWithChildrenMixin
+from twistedcaldav.extensions import DAVFile, DAVResourceWithChildrenMixin, extractCalendarServerPrincipalSearchData
 
 from xml.etree.cElementTree import XML
 
@@ -219,3 +220,46 @@
         self.assertEquals(len(result), 2)
         self.assertEquals(result[0].name, 'cheese')
         self.assertEquals(result[1], ['burger'])
+
+
+class CalendarServerPrincipalSearchTests(TestCase):
+    def test_extractCalendarServerPrincipalSearchData(self):
+        """
+        Exercise the parser for calendarserver-principal-search documents
+        """
+
+        data = """<B:calendarserver-principal-search xmlns:A="DAV:" xmlns:B="http://calendarserver.org/ns/" context="attendee">
+  <B:search-token>morgen</B:search-token>
+  <A:prop>
+    <A:principal-URL/>
+    <A:displayname/>
+  </A:prop>
+</B:calendarserver-principal-search>
+"""
+        doc = WebDAVDocument.fromString(data)
+        tokens, context, applyTo, clientLimit, propElement =  extractCalendarServerPrincipalSearchData(doc.root_element)
+        self.assertEquals(tokens, ["morgen"])
+        self.assertEquals(context, "attendee")
+        self.assertFalse(applyTo)
+        self.assertEquals(clientLimit, None)
+
+
+        data = """<B:calendarserver-principal-search xmlns:A="DAV:" xmlns:B="http://calendarserver.org/ns/">
+  <B:search-token>morgen</B:search-token>
+  <B:search-token>sagen</B:search-token>
+  <B:limit>
+      <B:nresults>42</B:nresults>
+  </B:limit>
+  <A:prop>
+    <A:principal-URL/>
+    <A:displayname/>
+  </A:prop>
+  <A:apply-to-principal-collection-set/>
+</B:calendarserver-principal-search>
+"""
+        doc = WebDAVDocument.fromString(data)
+        tokens, context, applyTo, clientLimit, propElement =  extractCalendarServerPrincipalSearchData(doc.root_element)
+        self.assertEquals(tokens, ["morgen", "sagen"])
+        self.assertEquals(context, None)
+        self.assertTrue(applyTo)
+        self.assertEquals(clientLimit, 42)

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_mail.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_mail.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_mail.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -17,7 +17,6 @@
 
 from cStringIO import StringIO
 from twisted.internet.defer import inlineCallbacks
-from twisted.python.filepath import FilePath
 from twisted.python.modules import getModule
 from twisted.web.template import Element, renderer, flattenString
 from twistedcaldav.config import config, ConfigDict
@@ -184,21 +183,6 @@
         self.assertEquals(retrieved, token)
 
 
-
-    def test_iconPath(self):
-        iconPath = self.handler.getIconPath({'day':'1', 'month':'1'}, False,
-                                            language='en')
-        iconDir = FilePath("/usr/share/caldavd/share/date_icons")
-
-        if iconDir.exists():
-            if iconDir.child("JAN").child("01.png"):
-                monthName = "JAN"
-            else:
-                monthName = "01"
-            monthPath = iconDir.child(monthName)
-            self.assertEquals(iconPath, monthPath.child("01.png").path)
-
-
     def test_checkDSNFailure(self):
 
         data = {

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_resource.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_resource.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_resource.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -26,6 +26,7 @@
  CalendarHomeResource, AddressBookHomeResource
 from twistedcaldav.test.util import InMemoryPropertyStore
 from twistedcaldav.test.util import TestCase
+from twistedcaldav.notifications import NotificationCollectionResource
 
 
 class StubProperty(object):
@@ -85,7 +86,11 @@
         self.assertTrue(('http://calendarserver.org/ns/', 'xmpp-heartbeat-uri') not in resource.liveProperties())
         self.assertTrue(('http://calendarserver.org/ns/', 'xmpp-server') not in resource.liveProperties())
 
+    def test_notificationCollectionLiveProperties(self):
+        resource = NotificationCollectionResource()
+        self.assertTrue(('http://calendarserver.org/ns/', 'getctag') in resource.liveProperties())
 
+
     @inlineCallbacks
     def test_push404(self):
         """

Modified: CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_sharing.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_sharing.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/twistedcaldav/test/test_sharing.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -462,7 +462,52 @@
             ),
         ))
 
+    @inlineCallbacks
+    def test_POSTremoveNonInvitee(self):
+        """
+        Ensure that removing a sharee that is not currently invited
+        doesn't return an error.  The server will just pretend it
+        removed the sharee.
+        """
 
+        self.resource.upgradeToShare()
+
+        yield self._doPOST("""<?xml version="1.0" encoding="utf-8" ?>
+<CS:share xmlns:D="DAV:" xmlns:CS="http://calendarserver.org/ns/">
+    <CS:set>
+        <D:href>mailto:user02 at example.com</D:href>
+        <CS:summary>My Shared Calendar</CS:summary>
+        <CS:read-write/>
+    </CS:set>
+    <CS:set>
+        <D:href>mailto:user03 at example.com</D:href>
+        <CS:summary>My Shared Calendar</CS:summary>
+        <CS:read-write/>
+    </CS:set>
+</CS:share>
+""")
+        yield self._doPOST("""<?xml version="1.0" encoding="utf-8" ?>
+<CS:share xmlns:D="DAV:" xmlns:CS="http://calendarserver.org/ns/">
+    <CS:remove>
+        <D:href>mailto:user03 at example.com</D:href>
+    </CS:remove>
+</CS:share>
+""")
+        yield self._doPOST("""<?xml version="1.0" encoding="utf-8" ?>
+<CS:share xmlns:D="DAV:" xmlns:CS="http://calendarserver.org/ns/">
+    <CS:remove>
+        <D:href>mailto:user02 at example.com</D:href>
+    </CS:remove>
+    <CS:remove>
+        <D:href>mailto:user03 at example.com</D:href>
+    </CS:remove>
+</CS:share>
+""")
+
+        propInvite = (yield self.resource.readProperty(customxml.Invite, None))
+        self.assertEquals(self._clearUIDElementValue(propInvite), customxml.Invite())
+
+
     @inlineCallbacks
     def test_POSTaddInvalidInvitee(self):
         self.resource.upgradeToShare()

Modified: CalendarServer/branches/users/gaya/inviteclean/txdav/base/datastore/file.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/txdav/base/datastore/file.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/txdav/base/datastore/file.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -187,15 +187,19 @@
                         self.log_error("Cannot undo DataStoreTransaction")
                 raise
 
-        for (operation, ignored) in self._postCommitOperations:
+        for operation in self._postCommitOperations:
             operation()
 
-    def postCommit(self, operation, immediately=False):
-        self._postCommitOperations.append((operation, immediately))
 
+    def postCommit(self, operation):
+        self._postCommitOperations.append(operation)
+
+
     def postAbort(self, operation):
         self._postAbortOperations.append(operation)
 
+
+
 class FileMetaDataMixin(object):
     
     implements(IDataStoreObject)

Modified: CalendarServer/branches/users/gaya/inviteclean/txdav/base/datastore/util.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/txdav/base/datastore/util.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/txdav/base/datastore/util.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -70,13 +70,13 @@
 
 
     def setAfterCommit(self, transaction, key, value):
-        transaction.postCommit(lambda: self.set(key, value), immediately=True)
+        transaction.postCommit(lambda: self.set(key, value))
 
     def invalidateAfterCommit(self, transaction, key):
         # Invalidate now (so that operations within this transaction see it)
         # and *also* post-commit (because there could be a scheduled setAfterCommit
         # for this key)
-        transaction.postCommit(lambda: self.delete(key), immediately=True)
+        transaction.postCommit(lambda: self.delete(key))
         return self.delete(key)
 
     # Home child objects by name

Modified: CalendarServer/branches/users/gaya/inviteclean/txdav/caldav/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/txdav/caldav/datastore/sql.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/txdav/caldav/datastore/sql.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -142,7 +142,7 @@
         for dropboxID, path in rows:
             attachment = Attachment._attachmentPathRoot(self._txn, dropboxID).child(path)
             if attachment.exists():
-                self._txn.postCommit(attachment.remove)
+                yield attachment.remove()
 
         yield Delete(
             From=at,
@@ -1098,12 +1098,13 @@
             tr.TRANSPARENT                 : transp,
         }, Return=tr.INSTANCE_ID).on(txn))[0][0]
         peruserdata = component.perUserTransparency(rid)
-        for useruid, transp in peruserdata:
-            (yield Insert({
-                tpy.TIME_RANGE_INSTANCE_ID : instanceid,
-                tpy.USER_ID                : useruid,
-                tpy.TRANSPARENT            : transp,
-            }).on(txn))
+        for useruid, usertransp in peruserdata:
+            if usertransp != transp:
+                (yield Insert({
+                    tpy.TIME_RANGE_INSTANCE_ID : instanceid,
+                    tpy.USER_ID                : useruid,
+                    tpy.TRANSPARENT            : usertransp,
+                }).on(txn))
 
 
     @inlineCallbacks

Modified: CalendarServer/branches/users/gaya/inviteclean/txdav/caldav/datastore/test/common.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/txdav/caldav/datastore/test/common.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/txdav/caldav/datastore/test/common.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -232,7 +232,7 @@
     )
 
     @classproperty(cache=False)
-    def requirements(cls):
+    def requirements(cls): #@NoSelf
         metadata1 = cls.metadata1.copy()
         metadata2 = cls.metadata2.copy()
         metadata3 = cls.metadata3.copy()

Modified: CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/file.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/file.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/file.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -830,6 +830,10 @@
         )
 
 
+    def countObjectResources(self):
+        return len(self.listObjectResources())
+
+
     def objectResourceWithName(self, name):
         if name in self._removedObjectResources:
             return None

Modified: CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/sql.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/sql.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -70,7 +70,7 @@
 
 from twext.enterprise.dal.syntax import \
     Delete, utcNowSQL, Union, Insert, Len, Max, Parameter, SavepointAction, \
-    Select, Update, ColumnSyntax, TableSyntax, Upper
+    Select, Update, ColumnSyntax, TableSyntax, Upper, Count, ALL_COLUMNS
 
 from twistedcaldav.config import config
 
@@ -84,6 +84,7 @@
     pyCalendarTodatetime
 
 from txdav.base.datastore.util import normalizeUUIDOrNot
+from twext.enterprise.queue import NullQueuer
 
 from pycalendar.datetime import PyCalendarDateTime
 
@@ -131,8 +132,13 @@
 
     @ivar quota: the amount of space granted to each calendar home (in bytes)
         for storing attachments, or C{None} if quota should not be enforced.
+    @type quota: C{int} or C{NoneType}
 
-    @type quota: C{int} or C{NoneType}
+    @ivar queuer: An object with an C{enqueueWork} method, from
+        L{twext.enterprise.queue}.  Initially, this is a L{NullQueuer}, so it
+        is always usable, but in a properly configured environment it will be
+        upgraded to a more capable object that can distribute work throughout a
+        cluster.
     """
 
     implements(ICalendarStore)
@@ -159,6 +165,7 @@
         self.logSQL = logSQL
         self.logTransactionWaits = logTransactionWaits
         self.timeoutTransactions = timeoutTransactions
+        self.queuer = NullQueuer()
         self._migrating = False
         self._enableNotifications = True
 
@@ -334,6 +341,8 @@
         if self.timeoutSeconds:
             self.delayedTimeout = self.callLater(self.timeoutSeconds, _forceAbort)
 
+
+
 class CommonStoreTransaction(object):
     """
     Transaction implementation for SQL database.
@@ -349,8 +358,6 @@
         self._calendarHomes = {}
         self._addressbookHomes = {}
         self._notificationHomes = {}
-        self._postCommitOperations = []
-        self._postAbortOperations = []
         self._notifierFactory = notifierFactory
         self._notifiedAlready = set()
         self._bumpedAlready = set()
@@ -382,6 +389,10 @@
         self.dialect = sqlTxn.dialect
 
         self._stats = TransactionStatsCollector(self._label, self._store.logStatsLogFile) if self._store.logStats else None
+        self._stats = (
+            TransactionStatsCollector(self._label, self._store.logStatsLogFile)
+            if self._store.logStats else None
+        )
         self.statementCount = 0
         self.iudCount = 0
         self.currentStatement = None
@@ -401,6 +412,22 @@
             __import__("txdav.carddav.datastore.sql")
 
 
+    def enqueue(self, workItem, **kw):
+        """
+        Enqueue a L{twext.enterprise.queue.WorkItem} for later execution.
+
+        For example::
+
+            yield (txn.enqueue(MyWorkItem, workDescription="some work to do")
+                   .whenProposed())
+
+        @return: a work proposal describing various events in the work's
+            life-cycle.
+        @rtype: L{twext.enterprise.queue.WorkProposal}
+        """
+        return self._store.queuer.enqueueWork(self, workItem, **kw)
+
+
     def store(self):
         return self._store
 
@@ -605,18 +632,18 @@
         return self._apnSubscriptionsBySubscriberQuery.on(self, subscriberGUID=guid)
 
 
-    def postCommit(self, operation, immediately=False):
+    def postCommit(self, operation):
         """
         Run things after C{commit}.
         """
-        self._postCommitOperations.append((operation, immediately))
+        return self._sqlTxn.postCommit(operation)
 
 
     def postAbort(self, operation):
         """
         Run things after C{abort}.
         """
-        self._postAbortOperations.append(operation)
+        return self._sqlTxn.postAbort(operation)
 
 
     def isNotifiedAlready(self, obj):
@@ -773,30 +800,16 @@
         """
         Commit the transaction and execute any post-commit hooks.
         """
-        @inlineCallbacks
-        def postCommit(ignored):
-            for operation, immediately in self._postCommitOperations:
-                if immediately:
-                    yield operation()
-                else:
-                    operation()
-            returnValue(ignored)
-
         if self._stats:
             self._stats.printReport()
+        return self._sqlTxn.commit()
 
-        return self._sqlTxn.commit().addCallback(postCommit)
 
-
     def abort(self):
         """
         Abort the transaction.
         """
-        def postAbort(ignored):
-            for operation in self._postAbortOperations:
-                operation()
-            return ignored
-        return self._sqlTxn.abort().addCallback(postAbort)
+        return self._sqlTxn.abort()
 
 
     def _oldEventsBase(limited): #@NoSelf
@@ -816,9 +829,9 @@
             ],
             From=ch.join(co).join(cb).join(tr),
             Where=(
-                ch.RESOURCE_ID == cb.CALENDAR_HOME_RESOURCE_ID).And(
-                tr.CALENDAR_OBJECT_RESOURCE_ID == co.RESOURCE_ID).And(
-                cb.CALENDAR_RESOURCE_ID == tr.CALENDAR_RESOURCE_ID).And(
+                ch.RESOURCE_ID == cb.CALENDAR_HOME_RESOURCE_ID     ).And(
+                tr.CALENDAR_OBJECT_RESOURCE_ID == co.RESOURCE_ID   ).And(
+                cb.CALENDAR_RESOURCE_ID == tr.CALENDAR_RESOURCE_ID ).And(
                 cb.BIND_MODE == _BIND_MODE_OWN
             ),
             GroupBy=(
@@ -931,7 +944,7 @@
         count = 0
         for dropboxID, path in results:
             attachment = Attachment(self, dropboxID, path)
-            (yield attachment.remove())
+            (yield attachment.remove( ))
             count += 1
         returnValue(count)
 
@@ -1386,7 +1399,7 @@
     def _loadPropertyStore(self):
         props = yield PropertyStore.load(
             self.uid(),
-            None,
+            self.uid(),
             self._txn,
             self._resourceID,
             notifyCallback=self.notifyChanged
@@ -1908,9 +1921,9 @@
             # REVISIONS table still exists so we have to detect that and do db
             # INSERT or UPDATE as appropriate
 
-            found = bool((
+            found = bool( (
                 yield self._insertFindPreviouslyNamedQuery.on(
-                    self._txn, resourceID=self._resourceID, name=name)))
+                    self._txn, resourceID=self._resourceID, name=name)) )
             if found:
                 self._syncTokenRevision = (
                     yield self._updatePreviouslyNamedQuery.on(
@@ -1945,19 +1958,19 @@
 
     _objectResourceClass = None
 
-    _bindSchema = None
-    _homeSchema = None
-    _homeChildSchema = None
-    _homeChildMetaDataSchema = None
-    _revisionsSchema = None
-    _objectSchema = None
+    _bindSchema 				= None
+    _homeSchema 				= None
+    _homeChildSchema 			= None
+    _homeChildMetaDataSchema 	= None
+    _revisionsSchema 			= None
+    _objectSchema 				= None
 
-    _bindTable = None
-    _homeChildTable = None
+    _bindTable 			= None
+    _homeChildTable 	= None
     _homeChildBindTable = None
-    _revisionsTable = None
+    _revisionsTable 	= None
     _revisionsBindTable = None
-    _objectTable = None
+    _objectTable 		= None
 
 
     def __init__(self, home, name, resourceID, mode, status, message=None, ownerHome=None):
@@ -1969,20 +1982,20 @@
         else:
             notifiers = None
 
-        self._home = home
-        self._name = name
-        self._resourceID = resourceID
-        self._bindMode = mode
-        self._bindStatus = status
-        self._bindMessage = message
-        self._ownerHome = home if ownerHome is None else ownerHome
-        self._created = None
-        self._modified = None
-        self._objects = {}
-        self._objectNames = None
+        self._home 				= home
+        self._name 				= name
+        self._resourceID 		= resourceID
+        self._bindMode 			= mode
+        self._bindStatus 		= status
+        self._bindMessage 		= message
+        self._ownerHome 		= home if ownerHome is None else ownerHome
+        self._created 			= None
+        self._modified 			= None
+        self._objects 			= {}
+        self._objectNames 		= None
         self._syncTokenRevision = None
-        self._notifiers = notifiers
-        self._index = None  # Derived classes need to set this
+        self._notifiers 		= notifiers
+        self._index 			= None  # Derived classes need to set this
 
 
     @classproperty
@@ -2143,7 +2156,6 @@
             sharedName = yield self._txn.subtransaction(doInsert)
         except AllRetriesFailed:
             # FIXME: catch more specific exception
-
             sharedName = (yield self._updateBindQuery.on(
                 self._txn,
                 mode=mode, status=status, message=message,
@@ -2428,7 +2440,6 @@
                 message=bindMessage, ownerHome=self._home
             )
             yield new.initFromStore()
-
             result.append(new)
         returnValue(result)
 
@@ -2594,7 +2605,6 @@
         
         if not rows:
             returnValue(None)
-
         
         bindMode, homeID, resourceID, resourceName, bindStatus, bindMessage, ownerHomeID = rows[0] #@UnusedVariable
         
@@ -2623,7 +2633,8 @@
         return cls._bindFor((bind.RESOURCE_ID == Parameter("resourceID"))
                                .And(bind.HOME_RESOURCE_ID == Parameter("homeID"))
                                )
-
+                               
+                               
     @classmethod
     @inlineCallbacks
     def objectWithID(cls, home, resourceID):
@@ -2877,9 +2888,9 @@
 
         # Set to non-existent state
         self._resourceID = None
-        self._created = None
-        self._modified = None
-        self._objects = {}
+        self._created    = None
+        self._modified   = None
+        self._objects    = {}
 
         yield self.notifyChanged()
 
@@ -2981,6 +2992,25 @@
         returnValue(self._objectNames)
 
 
+    @classproperty
+    def _objectCountQuery(cls): #@NoSelf
+        """
+        DAL query to count all object resources for a home child.
+        """
+        obj = cls._objectSchema
+        return Select([Count(ALL_COLUMNS)], From=obj,
+                      Where=obj.PARENT_RESOURCE_ID == Parameter('resourceID'))
+
+
+    @inlineCallbacks
+    def countObjectResources(self):
+        if self._objectNames is None:
+            rows = yield self._objectCountQuery.on(
+                self._txn, resourceID=self._resourceID)
+            returnValue(rows[0][0])
+        returnValue(len(self._objectNames))
+
+
     def objectResourceWithName(self, name):
         if name in self._objects:
             return succeed(self._objects[name])
@@ -3870,7 +3900,7 @@
     def _loadPropertyStore(self):
         self._propertyStore = yield PropertyStore.load(
             self._uid,
-            None,
+            self._uid,
             self._txn,
             self._resourceID,
             notifyCallback=self.notifyChanged

Modified: CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/sql_schema/current.sql
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/sql_schema/current.sql	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/sql_schema/current.sql	2012-08-30 22:48:54 UTC (rev 9757)
@@ -22,7 +22,23 @@
 
 create sequence RESOURCE_ID_SEQ;
 
+-------------------------
+-- Cluster Bookkeeping --
+-------------------------
 
+-- Information about a process connected to this database.
+
+-- Note that this must match the node info schema in twext.enterprise.queue.
+create table NODE_INFO (
+  HOSTNAME  varchar(255) not null,
+  PID       integer not null,
+  PORT      integer not null,
+  TIME      timestamp not null default timezone('UTC', CURRENT_TIMESTAMP),
+
+  primary key(HOSTNAME, PORT)
+);
+
+
 -------------------
 -- Calendar Home --
 -------------------
@@ -497,6 +513,6 @@
   VALUE                         varchar(255)
 );
 
-insert into CALENDARSERVER values ('VERSION', '11');
+insert into CALENDARSERVER values ('VERSION', '12');
 insert into CALENDARSERVER values ('CALENDAR-DATAVERSION', '3');
 insert into CALENDARSERVER values ('ADDRESSBOOK-DATAVERSION', '1');

Copied: CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_11_to_12.sql (from rev 9756, CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_11_to_12.sql)
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_11_to_12.sql	                        (rev 0)
+++ CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_11_to_12.sql	2012-08-30 22:48:54 UTC (rev 9757)
@@ -0,0 +1,31 @@
+----
+-- Copyright (c) 2012 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+---------------------------------------------------
+-- Upgrade database schema from VERSION 11 to 12 --
+---------------------------------------------------
+
+create table NODE_INFO (
+    "HOSTNAME" nvarchar2(255),
+    "PID" integer not null,
+    "PORT" integer not null,
+    "TIME" timestamp default CURRENT_TIMESTAMP at time zone 'UTC' not null,
+    primary key("HOSTNAME", "PORT")
+);
+
+-- Now update the version
+-- No data upgrades
+update CALENDARSERVER set VALUE = '12' where NAME = 'VERSION';

Copied: CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_11_to_12.sql (from rev 9756, CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_11_to_12.sql)
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_11_to_12.sql	                        (rev 0)
+++ CalendarServer/branches/users/gaya/inviteclean/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_11_to_12.sql	2012-08-30 22:48:54 UTC (rev 9757)
@@ -0,0 +1,31 @@
+----
+-- Copyright (c) 2012 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+---------------------------------------------------
+-- Upgrade database schema from VERSION 11 to 12 --
+---------------------------------------------------
+
+create table NODE_INFO (
+  HOSTNAME  varchar(255) not null,
+  PID       integer not null,
+  PORT      integer not null,
+  TIME      timestamp not null default timezone('UTC', CURRENT_TIMESTAMP),
+  primary key(HOSTNAME, PORT)
+);
+
+-- Now update the version
+-- No data upgrades
+update CALENDARSERVER set VALUE = '12' where NAME = 'VERSION';

Modified: CalendarServer/branches/users/gaya/inviteclean/txdav/idav.py
===================================================================
--- CalendarServer/branches/users/gaya/inviteclean/txdav/idav.py	2012-08-30 16:57:22 UTC (rev 9756)
+++ CalendarServer/branches/users/gaya/inviteclean/txdav/idav.py	2012-08-30 22:48:54 UTC (rev 9757)
@@ -207,29 +207,15 @@
         """
 
 
-    def postCommit(operation, immediately=False):
+    def postCommit(operation):
         """
-        Registers an operation to be executed after the transaction is
-        committed.
-
-        postCommit can be called multiple times, and operations are executed
-        in the order which they were registered.
-
-        @param operation: a callable.
-        @param immediately: a boolean; True means finish this operation *before* the
-            commit( ) call completes, defaults to False.
+        @see: L{IAsyncTransaction.postCommit}
         """
 
 
     def postAbort(operation):
         """
-        Registers an operation to be executed after the transaction is
-        aborted.
-
-        postAbort can be called multiple times, and operations are executed
-        in the order which they were registered.
-
-        @param operation: a callable.
+        @see: L{IAsyncTransaction.postAbort}
         """
 
 
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20120830/58fd65a7/attachment-0001.html>


More information about the calendarserver-changes mailing list