[CalendarServer-changes] [11861] CalendarServer/branches/users/gaya/sharedgroupfixes

source_changes at macosforge.org source_changes at macosforge.org
Wed Mar 12 11:22:03 PDT 2014


Revision: 11861
          http://trac.calendarserver.org//changeset/11861
Author:   gaya at apple.com
Date:     2013-10-31 15:23:31 -0700 (Thu, 31 Oct 2013)
Log Message:
-----------
merge in r11779 through r11860

Revision Links:
--------------
    http://trac.calendarserver.org//changeset/11779
    http://trac.calendarserver.org//changeset/11860

Modified Paths:
--------------
    CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/accesslog.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/push/amppush.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/push/notifier.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/push/test/test_notifier.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/tap/caldav.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/tap/util.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/tools/upgrade.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/conf/auth/accounts-test.xml
    CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/config.dist.plist
    CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/config.plist
    CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/population.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/sim.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/test_sim.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/sqlusage/requests/httpTests.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/sqlusage/sqlusage.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/tools/fix_calendar
    CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/tools/protocolanalysis.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/support/version.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/twext/enterprise/dal/syntax.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/twext/enterprise/dal/test/test_sqlsyntax.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/twext/patches.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/aggregate.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/directory.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/expression.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/idirectory.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/index.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/util.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/xml.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/directory/directory.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/directory/ldapdirectory.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/directory/test/test_directory.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/resource.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/scheduling_store/caldav/resource.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/stdconfig.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/base/datastore/subpostgres.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/base/datastore/test/test_subpostgres.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/base/datastore/util.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/file.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/schedule.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/scheduling/imip/inbound.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/scheduling/imip/test/test_inbound.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/scheduling/implicit.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/scheduling/utils.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/sql.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/test/common.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/test/test_util.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/carddav/datastore/test/common.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/file.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/sql.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_19_to_20.sql
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_13_to_14.sql
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/test/util.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/test/test_upgrade.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrade.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/addressbook_upgrade_from_1_to_2.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_1_to_2.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_3_to_4.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_4_to_5.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/test/test_upgrade_from_3_to_4.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/test/test_upgrade_from_4_to_5.py
    CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/util.py

Added Paths:
-----------
    CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/clients.plist
    CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/standard-configs/

Property Changed:
----------------
    CalendarServer/branches/users/gaya/sharedgroupfixes/


Property changes on: CalendarServer/branches/users/gaya/sharedgroupfixes
___________________________________________________________________
Modified: svn:mergeinfo
   - /CalDAVTester/trunk:11193-11198
/CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/release/CalendarServer-4.3-dev:10180-10190,10192
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/fix-no-ischedule:11612
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
/CalendarServer/branches/users/cdaboo/managed-attachments:9985-10145
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/store-scheduling:10876-11129
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/gaya/sharedgroups-3:11088-11204
/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/digest-auth-redux:10624-10635
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/enforce-max-requests:11640-11643
/CalendarServer/branches/users/glyph/hang-fix:11465-11491
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/launchd-wrapper-bis:11413-11436
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/log-cleanups:11691-11731
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/one-home-list-api:10048-10073
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/q:9560-9688
/CalendarServer/branches/users/glyph/queue-locking-and-timing:10204-10289
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/start-service-start-loop:11060-11065
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/unshare-when-access-revoked:10562-10595
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/warning-cleanups:11347-11357
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/sagen/testing:10827-10851,10853-10855
/CalendarServer/branches/users/wsanchez/transations:5515-5593
/CalendarServer/trunk:11632-11778
   + /CalDAVTester/trunk:11193-11198
/CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/release/CalendarServer-4.3-dev:10180-10190,10192
/CalendarServer/branches/release/CalendarServer-5.1-dev:11846
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/fix-no-ischedule:11612
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
/CalendarServer/branches/users/cdaboo/managed-attachments:9985-10145
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/performance-tweaks:11824-11836
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/store-scheduling:10876-11129
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/gaya/sharedgroups-3:11088-11204
/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/digest-auth-redux:10624-10635
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/enforce-max-requests:11640-11643
/CalendarServer/branches/users/glyph/hang-fix:11465-11491
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/launchd-wrapper-bis:11413-11436
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/log-cleanups:11691-11731
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/one-home-list-api:10048-10073
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/q:9560-9688
/CalendarServer/branches/users/glyph/queue-locking-and-timing:10204-10289
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/start-service-start-loop:11060-11065
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/unshare-when-access-revoked:10562-10595
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/warning-cleanups:11347-11357
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/sagen/testing:10827-10851,10853-10855
/CalendarServer/branches/users/wsanchez/transations:5515-5593
/CalendarServer/trunk:11632-11860

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/accesslog.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/accesslog.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/accesslog.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -173,7 +173,7 @@
                     formatArgs["t"] = (nowtime - request.timeStamps[0][1]) * 1000
 
                 if hasattr(request, "extendedLogItems"):
-                    for k, v in request.extendedLogItems.iteritems():
+                    for k, v in sorted(request.extendedLogItems.iteritems(), key=lambda x: x[0]):
                         k = str(k).replace('"', "%22")
                         v = str(v).replace('"', "%22")
                         if " " in v:

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/push/amppush.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/push/amppush.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/push/amppush.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -48,7 +48,8 @@
 # AMP Commands sent to client (and forwarded to Master)
 
 class NotificationForID(amp.Command):
-    arguments = [('id', amp.String()), ('dataChangedTimestamp', amp.Integer())]
+    arguments = [('id', amp.String()),
+                 ('dataChangedTimestamp', amp.Integer(optional=True))]
     response = [('status', amp.String())]
 
 

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/push/notifier.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/push/notifier.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/push/notifier.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -84,10 +84,13 @@
 
 
     @inlineCallbacks
-    def notify(self):
+    def notify(self, txn):
         """
         Send the notification. For a home object we just push using the home id. For a home
         child we push both the owner home id and the owned home child id.
+
+        @param txn: The transaction to create the work item with
+        @type txn: L{CommonStoreTransaction}
         """
         # Push ids from the store objects are a tuple of (prefix, name,) and we need to compose that
         # into a single token.
@@ -100,7 +103,7 @@
         for prefix, id in ids:
             if self._notify:
                 self.log.debug("Notifications are enabled: %s %s/%s" % (self._storeObject, prefix, id,))
-                yield self._notifierFactory.send(prefix, id)
+                yield self._notifierFactory.send(prefix, id, txn)
             else:
                 self.log.debug("Skipping notification for: %s %s/%s" % (self._storeObject, prefix, id,))
 
@@ -147,11 +150,12 @@
 
 
     @inlineCallbacks
-    def send(self, prefix, id):
-        txn = self.store.newTransaction()
+    def send(self, prefix, id, txn):
+        """
+        Enqueue a push notification work item on the provided transaction.
+        """
         notBefore = datetime.datetime.utcnow() + datetime.timedelta(seconds=self.coalesceSeconds)
         yield txn.enqueue(PushNotificationWork, pushID=self.pushKeyForId(prefix, id), notBefore=notBefore)
-        yield txn.commit()
 
 
     def newNotifier(self, storeObject):

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/push/test/test_notifier.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/push/test/test_notifier.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/push/test/test_notifier.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -169,8 +169,8 @@
 
         home = yield self.homeUnderTest()
         yield home.notifyChanged()
+        self.assertEquals(self.notifierFactory.history, ["/CalDAV/example.com/home1/"])
         yield self.commit()
-        self.assertEquals(self.notifierFactory.history, ["/CalDAV/example.com/home1/"])
 
 
     @inlineCallbacks
@@ -178,11 +178,11 @@
 
         calendar = yield self.calendarUnderTest()
         yield calendar.notifyChanged()
-        yield self.commit()
         self.assertEquals(
             set(self.notifierFactory.history),
             set(["/CalDAV/example.com/home1/", "/CalDAV/example.com/home1/calendar_1/"])
         )
+        yield self.commit()
 
 
     @inlineCallbacks
@@ -191,7 +191,6 @@
         calendar = yield self.calendarUnderTest()
         home2 = yield self.homeUnderTest(name="home2")
         yield calendar.shareWith(home2, _BIND_MODE_WRITE)
-        yield self.commit()
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -200,11 +199,11 @@
                 "/CalDAV/example.com/home2/"
             ])
         )
+        yield self.commit()
 
         calendar = yield self.calendarUnderTest()
         home2 = yield self.homeUnderTest(name="home2")
         yield calendar.unshareWith(home2)
-        yield self.commit()
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -213,6 +212,7 @@
                 "/CalDAV/example.com/home2/"
             ])
         )
+        yield self.commit()
 
 
     @inlineCallbacks
@@ -226,11 +226,11 @@
 
         shared = yield self.calendarUnderTest(home="home2", name=shareName)
         yield shared.notifyChanged()
-        yield self.commit()
         self.assertEquals(
             set(self.notifierFactory.history),
             set(["/CalDAV/example.com/home1/", "/CalDAV/example.com/home1/calendar_1/"])
         )
+        yield self.commit()
 
 
     @inlineCallbacks
@@ -238,8 +238,8 @@
 
         notifications = yield self.transactionUnderTest().notificationsWithUID("home1")
         yield notifications.notifyChanged()
-        yield self.commit()
         self.assertEquals(
             set(self.notifierFactory.history),
             set(["/CalDAV/example.com/home1/", "/CalDAV/example.com/home1/notification/"])
         )
+        yield self.commit()

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/tap/caldav.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/tap/caldav.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/tap/caldav.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -1212,6 +1212,28 @@
             else:
                 groupCacher = None
 
+            # Optionally enable Manhole access
+            if config.Manhole.Enabled:
+                try:
+                    from twisted.conch.manhole_tap import makeService as manholeMakeService
+                    portString = "tcp:%d:interface=127.0.0.1" % (config.Manhole.StartingPortNumber,)
+                    manholeService = manholeMakeService({
+                        "sshPort" : None,
+                        "telnetPort" : portString,
+                        "namespace" : {
+                            "config" : config,
+                            "service" : result,
+                            "store" : store,
+                            "directory" : directory,
+                            },
+                        "passwd" : config.Manhole.PasswordFilePath,
+                    })
+                    manholeService.setServiceParent(result)
+                    # Using print(because logging isn't ready at this point)
+                    print("Manhole access enabled: %s" % (portString,))
+                except ImportError:
+                    print("Manhole access could not enabled because manhole_tap could not be imported")
+
             def decorateTransaction(txn):
                 txn._pushDistributor = pushDistributor
                 txn._rootResource = result.rootResource
@@ -1407,7 +1429,9 @@
 
                 # Conditionally stop after upgrade at this point
                 pps.addStep(
-                    QuitAfterUpgradeStep(config.StopAfterUpgradeTriggerFile)
+                    QuitAfterUpgradeStep(
+                        config.StopAfterUpgradeTriggerFile or config.UpgradeHomePrefix
+                    )
                 )
 
                 pps.addStep(

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/tap/util.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/tap/util.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/tap/util.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -95,6 +95,7 @@
 from txdav.common.datastore.sql import CommonDataStore as CommonSQLDataStore
 from txdav.common.datastore.file import CommonDataStore as CommonFileDataStore
 from txdav.common.datastore.sql import current_sql_schema
+from txdav.common.datastore.upgrade.sql.upgrade import NotAllowedToUpgrade
 from twext.python.filepath import CachingFilePath
 from urllib import quote
 from twisted.python.usage import UsageError
@@ -1077,7 +1078,8 @@
 
 
     def defaultStepWithFailure(self, failure):
-        log.failure("Step failure", failure=failure)
+        if failure.type != NotAllowedToUpgrade:
+            log.failure("Step failure", failure=failure)
         return failure
 
     # def protectStep(self, callback):

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/tools/upgrade.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/tools/upgrade.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/calendarserver/tools/upgrade.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -82,6 +82,7 @@
 
     optParameters = [
         ['config', 'f', DEFAULT_CONFIG_FILE, "Specify caldavd.plist configuration path."],
+        ['prefix', 'x', "", "Only upgrade homes with the specified GUID prefix - partial upgrade only."],
     ]
 
     def __init__(self):
@@ -142,11 +143,17 @@
         """
         Immediately stop.  The upgrade will have been run before this.
         """
-        # If we get this far the database is OK
-        if self.options["status"]:
-            self.output.write("Database OK.\n")
+        if self.store is None:
+            if self.options["status"]:
+                self.output.write("Upgrade needed.\n")
+            else:
+                self.output.write("Upgrade failed.\n")
         else:
-            self.output.write("Upgrade complete, shutting down.\n")
+            # If we get this far the database is OK
+            if self.options["status"]:
+                self.output.write("Database OK.\n")
+            else:
+                self.output.write("Upgrade complete, shutting down.\n")
         UpgraderService.started = True
 
         from twisted.internet import reactor
@@ -191,9 +198,11 @@
             data.MergeUpgrades = True
         config.addPostUpdateHooks([setMerge])
 
+
     def makeService(store):
         return UpgraderService(store, options, output, reactor, config)
 
+
     def onlyUpgradeEvents(eventDict):
         text = formatEvent(eventDict)
         output.write(logDateString() + " " + text + "\n")
@@ -203,14 +212,19 @@
         log.publisher.levels.setLogLevelForNamespace(None, LogLevel.debug)
         addObserver(onlyUpgradeEvents)
 
+
     def customServiceMaker():
         customService = CalDAVServiceMaker()
         customService.doPostImport = options["postprocess"]
         return customService
 
+
     def _patchConfig(config):
         config.FailIfUpgradeNeeded = options["status"]
+        if options["prefix"]:
+            config.UpgradeHomePrefix = options["prefix"]
 
+
     def _onShutdown():
         if not UpgraderService.started:
             print("Failed to start service.")

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/conf/auth/accounts-test.xml
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/conf/auth/accounts-test.xml	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/conf/auth/accounts-test.xml	2013-10-31 22:23:31 UTC (rev 11861)
@@ -89,7 +89,7 @@
     <first-name>ま</first-name>
     <last-name>だ</last-name>
   </user>
-  <user repeat="99">
+  <user repeat="101">
     <uid>user%02d</uid>
     <uid>User %02d</uid>
     <guid>user%02d</guid>

Copied: CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/clients.plist (from rev 11860, CalendarServer/trunk/contrib/performance/loadtest/clients.plist)
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/clients.plist	                        (rev 0)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/clients.plist	2013-10-31 22:23:31 UTC (rev 11861)
@@ -0,0 +1,445 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+    Copyright (c) 2011-2013 Apple Inc. All rights reserved.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+  -->
+
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+	<dict>
+		<!-- Define the kinds of software and user behavior the load simulation
+			will simulate. -->
+		<key>clients</key>
+
+		<!-- Have as many different kinds of software and user behavior configurations
+			as you want. Each is a dict -->
+		<array>
+
+			<dict>
+
+				<!-- Here is a OS X client simulator. -->
+				<key>software</key>
+				<string>contrib.performance.loadtest.ical.OS_X_10_7</string>
+
+				<!-- Arguments to use to initialize the OS_X_10_7 instance. -->
+				<key>params</key>
+				<dict>
+					<!-- Name that appears in logs. -->
+					<key>title</key>
+					<string>10.7</string>
+	
+					<!-- OS_X_10_7 can poll the calendar home at some interval. This is
+						in seconds. -->
+					<key>calendarHomePollInterval</key>
+					<integer>30</integer>
+
+					<!-- If the server advertises xmpp push, OS_X_10_7 can wait for notifications
+						about calendar home changes instead of polling for them periodically. If
+						this option is true, then look for the server advertisement for xmpp push
+						and use it if possible. Still fall back to polling if there is no xmpp push
+						advertised. -->
+					<key>supportPush</key>
+					<false />
+
+					<key>supportAmpPush</key>
+					<true/>
+					<key>ampPushHost</key>
+					<string>localhost</string>
+					<key>ampPushPort</key>
+					<integer>62311</integer>
+				</dict>
+
+				<!-- The profiles define certain types of user behavior on top of the
+					client software being simulated. -->
+				<key>profiles</key>
+				<array>
+
+					<!-- First an event-creating profile, which will periodically create
+						new events at a random time on a random calendar. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.Eventer</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<true/>
+
+							<!-- Define the interval (in seconds) at which this profile will use
+								its client to create a new event. -->
+							<key>interval</key>
+							<integer>60</integer>
+
+							<!-- Define how start times (DTSTART) for the randomly generated events
+								will be selected. This is an example of a "Distribution" parameter. The value
+								for most "Distribution" parameters are interchangeable and extensible. -->
+							<key>eventStartDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized. It produces timestamps
+									in the near future, limited to certain days of the week and certain hours
+									of the day. -->
+								<key>type</key>
+								<string>contrib.performance.stats.WorkDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- These are the days of the week the distribution will use. -->
+									<key>daysOfWeek</key>
+									<array>
+										<string>mon</string>
+										<string>tue</string>
+										<string>wed</string>
+										<string>thu</string>
+										<string>fri</string>
+									</array>
+
+									<!-- The earliest hour of a day at which an event might be scheduled. -->
+									<key>beginHour</key>
+									<integer>8</integer>
+
+									<!-- And the latest hour of a day (at which an event will be scheduled
+										to begin!). -->
+									<key>endHour</key>
+									<integer>16</integer>
+
+									<!-- The timezone in which the event is scheduled. (XXX Does this
+										really work right?) -->
+									<key>tzname</key>
+									<string>America/Los_Angeles</string>
+								</dict>
+							</dict>
+
+							<!-- Define how recurrences are created. -->
+							<key>recurrenceDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized.  We have a fixed set of
+								     RRULEs defined for this distribution and pick each based on a
+								     weight. -->
+								<key>type</key>
+								<string>contrib.performance.stats.RecurrenceDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- False to disable RRULEs -->
+									<key>allowRecurrence</key>
+									<true/>
+
+									<!-- These are the weights for the specific set of RRULEs. -->
+									<key>weights</key>
+									<dict>
+										<!-- Half of all events will be non-recurring -->
+										<key>none</key>
+										<integer>50</integer>
+										
+										<!-- Daily and weekly are pretty common -->
+										<key>daily</key>
+										<integer>10</integer>
+										<key>weekly</key>
+										<integer>20</integer>
+										
+										<!-- Monthly, yearly, daily & weekly limit not so common -->
+										<key>monthly</key>
+										<integer>2</integer>
+										<key>yearly</key>
+										<integer>1</integer>
+										<key>dailylimit</key>
+										<integer>2</integer>
+										<key>weeklylimit</key>
+										<integer>5</integer>
+										
+										<!-- Work days pretty common -->
+										<key>workdays</key>
+										<integer>10</integer>
+									</dict>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+					<!-- This profile invites some number of new attendees to new events. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.RealisticInviter</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<true/>
+
+							<!-- Define the frequency at which new invitations will be sent out. -->
+							<key>sendInvitationDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.NormalDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- mu gives the mean of the normal distribution (in seconds). -->
+									<key>mu</key>
+									<integer>60</integer>
+
+									<!-- and sigma gives its standard deviation. -->
+									<key>sigma</key>
+									<integer>5</integer>
+								</dict>
+							</dict>
+
+							<!-- Define the distribution of who will be invited to an event.
+							
+								When inviteeClumping is turned on each invitee is based on a sample of
+								users "close to" the organizer based on account index. If the clumping
+								is too "tight" for the requested number of attendees, then invites for
+								those larger numbers will simply fail (the sim will report that situation).
+								
+								When inviteeClumping is off invitees will be sampled across an entire
+								range of account indexes. In this case the distribution ought to be a
+								UniformIntegerDistribution with min=0 and max set to the number of accounts.
+							-->
+							<key>inviteeDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.UniformIntegerDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- The minimum value (inclusive) of the uniform distribution. -->
+									<key>min</key>
+									<integer>0</integer>
+									<!-- The maximum value (exclusive) of the uniform distribution. -->
+									<key>max</key>
+									<integer>99</integer>
+								</dict>
+							</dict>
+
+							<key>inviteeClumping</key>
+							<true/>
+
+							<!-- Define the distribution of how many attendees will be invited to an event.
+							
+								LogNormal is the best fit to observed data.
+
+
+								For LogNormal "mode" is the peak, "mean" is the mean value.	For invites,
+								mode should typically be 1, and mean whatever matches the user behavior.
+								Our typical mean is 6. 							
+							     -->
+							<key>inviteeCountDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.LogNormalDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- mode - peak-->
+									<key>mode</key>
+									<integer>1</integer>
+									<!-- mean - average-->
+									<key>median</key>
+									<integer>6</integer>
+									<!-- maximum -->
+									<key>maximum</key>
+									<real>60</real>
+								</dict>
+							</dict>
+
+							<!-- Define how start times (DTSTART) for the randomly generated events
+								will be selected. This is an example of a "Distribution" parameter. The value
+								for most "Distribution" parameters are interchangeable and extensible. -->
+							<key>eventStartDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized. It produces timestamps
+									in the near future, limited to certain days of the week and certain hours
+									of the day. -->
+								<key>type</key>
+								<string>contrib.performance.stats.WorkDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- These are the days of the week the distribution will use. -->
+									<key>daysOfWeek</key>
+									<array>
+										<string>mon</string>
+										<string>tue</string>
+										<string>wed</string>
+										<string>thu</string>
+										<string>fri</string>
+									</array>
+
+									<!-- The earliest hour of a day at which an event might be scheduled. -->
+									<key>beginHour</key>
+									<integer>8</integer>
+
+									<!-- And the latest hour of a day (at which an event will be scheduled
+										to begin!). -->
+									<key>endHour</key>
+									<integer>16</integer>
+
+									<!-- The timezone in which the event is scheduled. (XXX Does this
+										really work right?) -->
+									<key>tzname</key>
+									<string>America/Los_Angeles</string>
+								</dict>
+							</dict>
+
+							<!-- Define how recurrences are created. -->
+							<key>recurrenceDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized.  We have a fixed set of
+								     RRULEs defined for this distribution and pick each based on a
+								     weight. -->
+								<key>type</key>
+								<string>contrib.performance.stats.RecurrenceDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- False to disable RRULEs -->
+									<key>allowRecurrence</key>
+									<true/>
+
+									<!-- These are the weights for the specific set of RRULEs. -->
+									<key>weights</key>
+									<dict>
+										<!-- Half of all events will be non-recurring -->
+										<key>none</key>
+										<integer>50</integer>
+										
+										<!-- Daily and weekly are pretty common -->
+										<key>daily</key>
+										<integer>10</integer>
+										<key>weekly</key>
+										<integer>20</integer>
+										
+										<!-- Monthly, yearly, daily & weekly limit not so common -->
+										<key>monthly</key>
+										<integer>2</integer>
+										<key>yearly</key>
+										<integer>1</integer>
+										<key>dailylimit</key>
+										<integer>2</integer>
+										<key>weeklylimit</key>
+										<integer>5</integer>
+										
+										<!-- Work days pretty common -->
+										<key>workdays</key>
+										<integer>10</integer>
+									</dict>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+					<!-- This profile accepts invitations to events, handles cancels, and
+					     handles replies received. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.Accepter</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<true/>
+
+							<!-- Define how long to wait after seeing a new invitation before
+								accepting it.
+
+								For LogNormal "mode" is the peak, "median" is the 50% cummulative value
+								(i.e., half of the user have accepted by that time).								
+							-->
+							<key>acceptDelayDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.LogNormalDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- mode - peak-->
+									<key>mode</key>
+									<integer>300</integer>
+									<!-- median - 50% done-->
+									<key>median</key>
+									<integer>1800</integer>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+					<!-- A task-creating profile, which will periodically create
+						new tasks at a random time on a random calendar. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.Tasker</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<true/>
+
+							<!-- Define the interval (in seconds) at which this profile will use
+								its client to create a new task. -->
+							<key>interval</key>
+							<integer>300</integer>
+
+							<!-- Define how due times (DUE) for the randomly generated tasks
+								will be selected. This is an example of a "Distribution" parameter. The value
+								for most "Distribution" parameters are interchangeable and extensible. -->
+							<key>taskDueDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized. It produces timestamps
+									in the near future, limited to certain days of the week and certain hours
+									of the day. -->
+								<key>type</key>
+								<string>contrib.performance.stats.WorkDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- These are the days of the week the distribution will use. -->
+									<key>daysOfWeek</key>
+									<array>
+										<string>mon</string>
+										<string>tue</string>
+										<string>wed</string>
+										<string>thu</string>
+										<string>fri</string>
+									</array>
+
+									<!-- The earliest hour of a day at which an event might be scheduled. -->
+									<key>beginHour</key>
+									<integer>8</integer>
+
+									<!-- And the latest hour of a day (at which an event will be scheduled
+										to begin!). -->
+									<key>endHour</key>
+									<integer>16</integer>
+
+									<!-- The timezone in which the event is scheduled. (XXX Does this
+										really work right?) -->
+									<key>tzname</key>
+									<string>America/Los_Angeles</string>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+				</array>
+
+				<!-- Determine the frequency at which this client configuration will
+					appear in the clients which are created by the load tester. -->
+				<key>weight</key>
+				<integer>1</integer>
+			</dict>
+		</array>
+	</dict>
+</plist>

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/config.dist.plist
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/config.dist.plist	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/config.dist.plist	2013-10-31 22:23:31 UTC (rev 11861)
@@ -50,10 +50,19 @@
 			<integer>8080</integer>
 		</dict>
 
-		<!--  Define whether client data should be saved and re-used. -->
+		<!--  Define whether server supports stats socket. -->
+		<key>serverStats</key>
+		<dict>
+			<key>enabled</key>
+			<true/>
+			<key>Port</key>
+			<integer>8100</integer>
+		</dict>
+
+		<!--  Define whether client data should be re-used. It will always be saved to the specified path.-->
 		<key>clientDataSerialization</key>
 		<dict>
-			<key>Enabled</key>
+			<key>UseOldData</key>
 			<true/>
 			<key>Path</key>
 			<string>/tmp/sim</string>
@@ -119,471 +128,6 @@
 
 		</dict>
 
-		<!-- Define the kinds of software and user behavior the load simulation 
-			will simulate. -->
-		<key>clients</key>
-
-		<!-- Have as many different kinds of software and user behavior configurations 
-			as you want. Each is a dict -->
-		<array>
-
-			<dict>
-
-				<!-- Here is a OS X client simulator. -->
-				<key>software</key>
-				<string>contrib.performance.loadtest.ical.OS_X_10_7</string>
-
-				<!-- Arguments to use to initialize the OS_X_10_7 instance. -->
-				<key>params</key>
-				<dict>
-					<!-- Name that appears in logs. -->
-					<key>title</key>
-					<string>10.7</string>
-
-					<!-- OS_X_10_7 can poll the calendar home at some interval. This is 
-						in seconds. -->
-					<key>calendarHomePollInterval</key>
-					<integer>30</integer>
-
-					<!-- If the server advertises xmpp push, OS_X_10_7 can wait for notifications 
-						about calendar home changes instead of polling for them periodically. If 
-						this option is true, then look for the server advertisement for xmpp push 
-						and use it if possible. Still fall back to polling if there is no xmpp push 
-						advertised. -->
-					<key>supportPush</key>
-					<false />
-				</dict>
-
-				<!-- The profiles define certain types of user behavior on top of the 
-					client software being simulated. -->
-				<key>profiles</key>
-				<array>
-
-					<!-- First an event-creating profile, which will periodically create 
-						new events at a random time on a random calendar. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Eventer</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define the interval (in seconds) at which this profile will use 
-								its client to create a new event. -->
-							<key>interval</key>
-							<integer>60</integer>
-
-							<!-- Define how start times (DTSTART) for the randomly generated events 
-								will be selected. This is an example of a "Distribution" parameter. The value 
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>eventStartDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps 
-									in the near future, limited to certain days of the week and certain hours 
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled 
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this 
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-
-							<!-- Define how recurrences are created. -->
-							<key>recurrenceDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized.  We have a fixed set of
-								     RRULEs defined for this distribution and pick each based on a
-								     weight. -->
-								<key>type</key>
-								<string>contrib.performance.stats.RecurrenceDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- False to disable RRULEs -->
-									<key>allowRecurrence</key>
-									<true/>
-
-									<!-- These are the weights for the specific set of RRULEs. -->
-									<key>weights</key>
-									<dict>
-										<!-- Half of all events will be non-recurring -->
-										<key>none</key>
-										<integer>50</integer>
-										
-										<!-- Daily and weekly are pretty common -->
-										<key>daily</key>
-										<integer>10</integer>
-										<key>weekly</key>
-										<integer>20</integer>
-										
-										<!-- Monthly, yearly, daily & weekly limit not so common -->
-										<key>monthly</key>
-										<integer>2</integer>
-										<key>yearly</key>
-										<integer>1</integer>
-										<key>dailylimit</key>
-										<integer>2</integer>
-										<key>weeklylimit</key>
-										<integer>5</integer>
-										
-										<!-- Work days pretty common -->
-										<key>workdays</key>
-										<integer>10</integer>
-									</dict>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- This profile invites new attendees to existing events. 
-					     This profile should no longer be used - use RealisticInviter instead. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Inviter</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<false/>
-
-							<!-- Define the frequency at which new invitations will be sent out. -->
-							<key>sendInvitationDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.NormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mu gives the mean of the normal distribution (in seconds). -->
-									<key>mu</key>
-									<integer>60</integer>
-
-									<!-- and sigma gives its standard deviation. -->
-									<key>sigma</key>
-									<integer>5</integer>
-								</dict>
-							</dict>
-
-							<!-- Define the distribution of who will be invited to an event. Each 
-								set of credentials loaded by the load tester has an index; samples from this 
-								distribution will be added to that index to arrive at the index of some other 
-								credentials, which will be the target of the invitation. -->
-							<key>inviteeDistanceDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.UniformIntegerDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- The minimum value (inclusive) of the uniform distribution. -->
-									<key>min</key>
-									<integer>-100</integer>
-									<!-- The maximum value (exclusive) of the uniform distribution. -->
-									<key>max</key>
-									<integer>101</integer>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- This profile invites some number of new attendees to new events. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.RealisticInviter</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define the frequency at which new invitations will be sent out. -->
-							<key>sendInvitationDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.NormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mu gives the mean of the normal distribution (in seconds). -->
-									<key>mu</key>
-									<integer>60</integer>
-
-									<!-- and sigma gives its standard deviation. -->
-									<key>sigma</key>
-									<integer>5</integer>
-								</dict>
-							</dict>
-
-							<!-- Define the distribution of who will be invited to an event.
-							
-								When inviteeClumping is turned on each invitee is based on a sample of
-								users "close to" the organizer based on account index. If the clumping
-								is too "tight" for the requested number of attendees, then invites for
-								those larger numbers will simply fail (the sim will report that situation).
-								
-								When inviteeClumping is off invitees will be sampled across an entire
-								range of account indexes. In this case the distribution ought to be a
-								UniformIntegerDistribution with min=0 and max set to the number of accounts.
-							-->
-							<key>inviteeDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.UniformIntegerDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- The minimum value (inclusive) of the uniform distribution. -->
-									<key>min</key>
-									<integer>-100</integer>
-									<!-- The maximum value (exclusive) of the uniform distribution. -->
-									<key>max</key>
-									<integer>101</integer>
-								</dict>
-							</dict>
-
-							<key>inviteeClumping</key>
-							<true/>
-
-							<!-- Define the distribution of how many attendees will be invited to an event.
-							
-								LogNormal is the best fit to observed data.
-
-
-								For LogNormal "mode" is the peak, "mean" is the mean value.	For invites,
-								mode should typically be 1, and mean whatever matches the user behavior.
-								Our typical mean is 6. 							
-							     -->
-							<key>inviteeCountDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.LogNormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mode - peak-->
-									<key>mode</key>
-									<integer>1</integer>
-									<!-- mean - average-->
-									<key>median</key>
-									<integer>6</integer>
-									<!-- maximum -->
-									<key>maximum</key>
-									<real>100</real>
-								</dict>
-							</dict>
-
-							<!-- Define how start times (DTSTART) for the randomly generated events 
-								will be selected. This is an example of a "Distribution" parameter. The value 
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>eventStartDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps 
-									in the near future, limited to certain days of the week and certain hours 
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled 
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this 
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-
-							<!-- Define how recurrences are created. -->
-							<key>recurrenceDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized.  We have a fixed set of
-								     RRULEs defined for this distribution and pick each based on a
-								     weight. -->
-								<key>type</key>
-								<string>contrib.performance.stats.RecurrenceDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- False to disable RRULEs -->
-									<key>allowRecurrence</key>
-									<true/>
-
-									<!-- These are the weights for the specific set of RRULEs. -->
-									<key>weights</key>
-									<dict>
-										<!-- Half of all events will be non-recurring -->
-										<key>none</key>
-										<integer>50</integer>
-										
-										<!-- Daily and weekly are pretty common -->
-										<key>daily</key>
-										<integer>10</integer>
-										<key>weekly</key>
-										<integer>20</integer>
-										
-										<!-- Monthly, yearly, daily & weekly limit not so common -->
-										<key>monthly</key>
-										<integer>2</integer>
-										<key>yearly</key>
-										<integer>1</integer>
-										<key>dailylimit</key>
-										<integer>2</integer>
-										<key>weeklylimit</key>
-										<integer>5</integer>
-										
-										<!-- Work days pretty common -->
-										<key>workdays</key>
-										<integer>10</integer>
-									</dict>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- This profile accepts invitations to events, handles cancels, and
-					     handles replies received. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Accepter</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define how long to wait after seeing a new invitation before
-								accepting it.
-
-								For LogNormal "mode" is the peak, "median" is the 50% cummulative value
-								(i.e., half of the user have accepted by that time).								
-							-->
-							<key>acceptDelayDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.LogNormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mode - peak-->
-									<key>mode</key>
-									<integer>300</integer>
-									<!-- median - 50% done-->
-									<key>median</key>
-									<integer>1800</integer>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- A task-creating profile, which will periodically create 
-						new tasks at a random time on a random calendar. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Tasker</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define the interval (in seconds) at which this profile will use 
-								its client to create a new task. -->
-							<key>interval</key>
-							<integer>300</integer>
-
-							<!-- Define how due times (DUE) for the randomly generated tasks 
-								will be selected. This is an example of a "Distribution" parameter. The value 
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>taskDueDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps 
-									in the near future, limited to certain days of the week and certain hours 
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled 
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this 
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-				</array>
-
-				<!-- Determine the frequency at which this client configuration will 
-					appear in the clients which are created by the load tester. -->
-				<key>weight</key>
-				<integer>1</integer>
-			</dict>
-		</array>
-
 		<!-- Define some log observers to report on the load test. -->
 		<key>observers</key>
 		<array>

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/config.plist
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/config.plist	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/config.plist	2013-10-31 22:23:31 UTC (rev 11861)
@@ -37,10 +37,19 @@
 			<integer>8080</integer>
 		</dict>
 
-		<!--  Define whether client data should be saved and re-used. -->
+		<!--  Define whether server supports stats socket. -->
+		<key>serverStats</key>
+		<dict>
+			<key>enabled</key>
+			<true/>
+			<key>Port</key>
+			<integer>8100</integer>
+		</dict>
+
+		<!--  Define whether client data should be re-used. It will always be saved to the specified path.-->
 		<key>clientDataSerialization</key>
 		<dict>
-			<key>Enabled</key>
+			<key>UseOldData</key>
 			<true/>
 			<key>Path</key>
 			<string>/tmp/sim</string>
@@ -106,429 +115,6 @@
 
 		</dict>
 
-		<!-- Define the kinds of software and user behavior the load simulation
-			will simulate. -->
-		<key>clients</key>
-
-		<!-- Have as many different kinds of software and user behavior configurations
-			as you want. Each is a dict -->
-		<array>
-
-			<dict>
-
-				<!-- Here is a OS X client simulator. -->
-				<key>software</key>
-				<string>contrib.performance.loadtest.ical.OS_X_10_7</string>
-
-				<!-- Arguments to use to initialize the OS_X_10_7 instance. -->
-				<key>params</key>
-				<dict>
-					<!-- Name that appears in logs. -->
-					<key>title</key>
-					<string>10.7</string>
-	
-					<!-- OS_X_10_7 can poll the calendar home at some interval. This is
-						in seconds. -->
-					<key>calendarHomePollInterval</key>
-					<integer>30</integer>
-
-					<!-- If the server advertises xmpp push, OS_X_10_7 can wait for notifications
-						about calendar home changes instead of polling for them periodically. If
-						this option is true, then look for the server advertisement for xmpp push
-						and use it if possible. Still fall back to polling if there is no xmpp push
-						advertised. -->
-					<key>supportPush</key>
-					<false />
-
-					<key>supportAmpPush</key>
-					<true/>
-					<key>ampPushHost</key>
-					<string>localhost</string>
-					<key>ampPushPort</key>
-					<integer>62311</integer>
-				</dict>
-
-				<!-- The profiles define certain types of user behavior on top of the
-					client software being simulated. -->
-				<key>profiles</key>
-				<array>
-
-					<!-- First an event-creating profile, which will periodically create
-						new events at a random time on a random calendar. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Eventer</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define the interval (in seconds) at which this profile will use
-								its client to create a new event. -->
-							<key>interval</key>
-							<integer>60</integer>
-
-							<!-- Define how start times (DTSTART) for the randomly generated events
-								will be selected. This is an example of a "Distribution" parameter. The value
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>eventStartDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps
-									in the near future, limited to certain days of the week and certain hours
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-
-							<!-- Define how recurrences are created. -->
-							<key>recurrenceDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized.  We have a fixed set of
-								     RRULEs defined for this distribution and pick each based on a
-								     weight. -->
-								<key>type</key>
-								<string>contrib.performance.stats.RecurrenceDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- False to disable RRULEs -->
-									<key>allowRecurrence</key>
-									<true/>
-
-									<!-- These are the weights for the specific set of RRULEs. -->
-									<key>weights</key>
-									<dict>
-										<!-- Half of all events will be non-recurring -->
-										<key>none</key>
-										<integer>50</integer>
-										
-										<!-- Daily and weekly are pretty common -->
-										<key>daily</key>
-										<integer>10</integer>
-										<key>weekly</key>
-										<integer>20</integer>
-										
-										<!-- Monthly, yearly, daily & weekly limit not so common -->
-										<key>monthly</key>
-										<integer>2</integer>
-										<key>yearly</key>
-										<integer>1</integer>
-										<key>dailylimit</key>
-										<integer>2</integer>
-										<key>weeklylimit</key>
-										<integer>5</integer>
-										
-										<!-- Work days pretty common -->
-										<key>workdays</key>
-										<integer>10</integer>
-									</dict>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- This profile invites some number of new attendees to new events. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.RealisticInviter</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define the frequency at which new invitations will be sent out. -->
-							<key>sendInvitationDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.NormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mu gives the mean of the normal distribution (in seconds). -->
-									<key>mu</key>
-									<integer>60</integer>
-
-									<!-- and sigma gives its standard deviation. -->
-									<key>sigma</key>
-									<integer>5</integer>
-								</dict>
-							</dict>
-
-							<!-- Define the distribution of who will be invited to an event.
-							
-								When inviteeClumping is turned on each invitee is based on a sample of
-								users "close to" the organizer based on account index. If the clumping
-								is too "tight" for the requested number of attendees, then invites for
-								those larger numbers will simply fail (the sim will report that situation).
-								
-								When inviteeClumping is off invitees will be sampled across an entire
-								range of account indexes. In this case the distribution ought to be a
-								UniformIntegerDistribution with min=0 and max set to the number of accounts.
-							-->
-							<key>inviteeDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.UniformIntegerDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- The minimum value (inclusive) of the uniform distribution. -->
-									<key>min</key>
-									<integer>0</integer>
-									<!-- The maximum value (exclusive) of the uniform distribution. -->
-									<key>max</key>
-									<integer>99</integer>
-								</dict>
-							</dict>
-
-							<key>inviteeClumping</key>
-							<true/>
-
-							<!-- Define the distribution of how many attendees will be invited to an event.
-							
-								LogNormal is the best fit to observed data.
-
-
-								For LogNormal "mode" is the peak, "mean" is the mean value.	For invites,
-								mode should typically be 1, and mean whatever matches the user behavior.
-								Our typical mean is 6. 							
-							     -->
-							<key>inviteeCountDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.LogNormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mode - peak-->
-									<key>mode</key>
-									<integer>1</integer>
-									<!-- mean - average-->
-									<key>median</key>
-									<integer>6</integer>
-									<!-- maximum -->
-									<key>maximum</key>
-									<real>60</real>
-								</dict>
-							</dict>
-
-							<!-- Define how start times (DTSTART) for the randomly generated events
-								will be selected. This is an example of a "Distribution" parameter. The value
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>eventStartDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps
-									in the near future, limited to certain days of the week and certain hours
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-
-							<!-- Define how recurrences are created. -->
-							<key>recurrenceDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized.  We have a fixed set of
-								     RRULEs defined for this distribution and pick each based on a
-								     weight. -->
-								<key>type</key>
-								<string>contrib.performance.stats.RecurrenceDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- False to disable RRULEs -->
-									<key>allowRecurrence</key>
-									<true/>
-
-									<!-- These are the weights for the specific set of RRULEs. -->
-									<key>weights</key>
-									<dict>
-										<!-- Half of all events will be non-recurring -->
-										<key>none</key>
-										<integer>50</integer>
-										
-										<!-- Daily and weekly are pretty common -->
-										<key>daily</key>
-										<integer>10</integer>
-										<key>weekly</key>
-										<integer>20</integer>
-										
-										<!-- Monthly, yearly, daily & weekly limit not so common -->
-										<key>monthly</key>
-										<integer>2</integer>
-										<key>yearly</key>
-										<integer>1</integer>
-										<key>dailylimit</key>
-										<integer>2</integer>
-										<key>weeklylimit</key>
-										<integer>5</integer>
-										
-										<!-- Work days pretty common -->
-										<key>workdays</key>
-										<integer>10</integer>
-									</dict>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- This profile accepts invitations to events, handles cancels, and
-					     handles replies received. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Accepter</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define how long to wait after seeing a new invitation before
-								accepting it.
-
-								For LogNormal "mode" is the peak, "median" is the 50% cummulative value
-								(i.e., half of the user have accepted by that time).								
-							-->
-							<key>acceptDelayDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.LogNormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mode - peak-->
-									<key>mode</key>
-									<integer>300</integer>
-									<!-- median - 50% done-->
-									<key>median</key>
-									<integer>1800</integer>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- A task-creating profile, which will periodically create
-						new tasks at a random time on a random calendar. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Tasker</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define the interval (in seconds) at which this profile will use
-								its client to create a new task. -->
-							<key>interval</key>
-							<integer>300</integer>
-
-							<!-- Define how due times (DUE) for the randomly generated tasks
-								will be selected. This is an example of a "Distribution" parameter. The value
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>taskDueDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps
-									in the near future, limited to certain days of the week and certain hours
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-				</array>
-
-				<!-- Determine the frequency at which this client configuration will
-					appear in the clients which are created by the load tester. -->
-				<key>weight</key>
-				<integer>1</integer>
-			</dict>
-		</array>
-
 		<!-- Define some log observers to report on the load test. -->
 		<key>observers</key>
 		<array>

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/population.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/population.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/population.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -396,6 +396,7 @@
         self._failed_clients = []
         self._failed_sim = collections.defaultdict(int)
         self._startTime = datetime.now()
+        self._expired_data = None
 
         # Load parameters from config
         if "thresholdsPath" in params:
@@ -423,6 +424,13 @@
             self._fail_cut_off = params["failCutoff"]
 
 
+    def observe(self, event):
+        if event.get('type') == 'sim-expired':
+            self.simExpired(event)
+        else:
+            super(ReportStatistics, self).observe(event)
+
+
     def countUsers(self):
         return len(self._users)
 
@@ -454,6 +462,10 @@
         self._failed_sim[event['reason']] += 1
 
 
+    def simExpired(self, event):
+        self._expired_data = event['reason']
+
+
     def printMiscellaneous(self, output, items):
         maxColumnWidth = str(len(max(items.iterkeys(), key=len)))
         fmt = "%" + maxColumnWidth + "s : %-s\n"
@@ -480,7 +492,7 @@
             if result is not None:
                 differences.append(result)
 
-        return mean(differences) if differences else "None"
+        return ("%-8.4f" % mean(differences)) if differences else "None"
 
 
     def qos_value(self, method, value):
@@ -518,7 +530,7 @@
             'Start time': self._startTime.strftime('%m/%d %H:%M:%S'),
             'Run time': "%02d:%02d:%02d" % (runHours, runMinutes, runSeconds),
             'CPU Time': "user %-5.2f sys %-5.2f total %02d:%02d:%02d" % (cpuUser, cpuSys, cpuHours, cpuMinutes, cpuSeconds,),
-            'QoS': "%-8.4f" % (self.qos(),),
+            'QoS': self.qos(),
         }
         if self.countClientFailures() > 0:
             items['Failed clients'] = self.countClientFailures()
@@ -527,8 +539,22 @@
         if self.countSimFailures() > 0:
             for reason, count in self._failed_sim.items():
                 items['Failed operation'] = "%s : %d times" % (reason, count,)
+        output.write("* Client\n")
         self.printMiscellaneous(output, items)
         output.write("\n")
+
+        if self._expired_data is not None:
+            items = {
+                "Req/sec" : "%.1f" % (self._expired_data[0],),
+                "Response": "%.1f (ms)" % (self._expired_data[1],),
+                "Slots": "%.2f" % (self._expired_data[2],),
+                "CPU": "%.1f%%" % (self._expired_data[3],),
+            }
+            output.write("* Server (Last 5 minutes)\n")
+            self.printMiscellaneous(output, items)
+            output.write("\n")
+        output.write("* Details\n")
+
         self.printHeader(output, [
                 (label, width)
                 for (label, width, _ignore_fmt)

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/sim.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/sim.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/sim.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -23,11 +23,15 @@
 from plistlib import readPlist
 from random import Random
 from sys import argv, stdout
+from urlparse import urlsplit
 from xml.parsers.expat import ExpatError
+import json
+import shutil
+import socket
 
 from twisted.python import context
 from twisted.python.filepath import FilePath
-from twisted.python.log import startLogging, addObserver, removeObserver
+from twisted.python.log import startLogging, addObserver, removeObserver, msg
 from twisted.python.usage import UsageError, Options
 from twisted.python.reflect import namedAny
 
@@ -56,6 +60,11 @@
 
 
 
+def safeDivision(value, total, factor=1):
+    return value * factor / total if total else 0
+
+
+
 def generateRecords(count, uidPattern="user%d", passwordPattern="user%d",
     namePattern="User %d", emailPattern="user%d at example.com"):
     for i in xrange(count):
@@ -121,6 +130,7 @@
     """
     config = None
     _defaultConfig = FilePath(__file__).sibling("config.plist")
+    _defaultClients = FilePath(__file__).sibling("clients.plist")
 
     optParameters = [
         ("runtime", "t", None,
@@ -129,6 +139,9 @@
         ("config", None, _defaultConfig,
          "Configuration plist file name from which to read simulation parameters.",
          FilePath),
+        ("clients", None, _defaultClients,
+         "Configuration plist file name from which to read client parameters.",
+         FilePath),
         ]
 
 
@@ -181,7 +194,23 @@
         finally:
             configFile.close()
 
+        try:
+            clientFile = self['clients'].open()
+        except IOError, e:
+            raise UsageError("--clients %s: %s" % (
+                    self['clients'].path, e.strerror))
+        try:
+            try:
+                client_config = readPlist(clientFile)
+                self.config["clients"] = client_config["clients"]
+                if "arrivalInterval" in client_config:
+                    self.config["arrival"]["params"]["interval"] = client_config["arrivalInterval"]
+            except ExpatError, e:
+                raise UsageError("--clients %s: %s" % (self['clients'].path, e))
+        finally:
+            clientFile.close()
 
+
 Arrival = namedtuple('Arrival', 'factory parameters')
 
 
@@ -200,7 +229,7 @@
         user information about the accounts on the server being put
         under load.
     """
-    def __init__(self, server, principalPathTemplate, webadminPort, serializationPath, arrival, parameters, observers=None,
+    def __init__(self, server, principalPathTemplate, webadminPort, serverStats, serializationPath, arrival, parameters, observers=None,
                  records=None, reactor=None, runtime=None, workers=None,
                  configTemplate=None, workerID=None, workerCount=1):
         if reactor is None:
@@ -208,6 +237,7 @@
         self.server = server
         self.principalPathTemplate = principalPathTemplate
         self.webadminPort = webadminPort
+        self.serverStats = serverStats
         self.serializationPath = serializationPath
         self.arrival = arrival
         self.parameters = parameters
@@ -260,15 +290,17 @@
                 principalPathTemplate = config['principalPathTemplate']
 
             if 'clientDataSerialization' in config:
-                if config['clientDataSerialization']['Enabled']:
-                    serializationPath = config['clientDataSerialization']['Path']
-                    if not isdir(serializationPath):
-                        try:
-                            mkdir(serializationPath)
-                        except OSError:
-                            print("Unable to create client data serialization directory: %s" % (serializationPath))
-                            print("Please consult the clientDataSerialization stanza of contrib/performance/loadtest/config.plist")
-                            raise
+                serializationPath = config['clientDataSerialization']['Path']
+                if not config['clientDataSerialization']['UseOldData']:
+                    shutil.rmtree(serializationPath)
+                serializationPath = config['clientDataSerialization']['Path']
+                if not isdir(serializationPath):
+                    try:
+                        mkdir(serializationPath)
+                    except OSError:
+                        print("Unable to create client data serialization directory: %s" % (serializationPath))
+                        print("Please consult the clientDataSerialization stanza of contrib/performance/loadtest/config.plist")
+                        raise
 
             if 'arrival' in config:
                 arrival = Arrival(
@@ -310,6 +342,11 @@
             if config['webadmin']['enabled']:
                 webadminPort = config['webadmin']['HTTPPort']
 
+        serverStats = None
+        if 'serverStats' in config:
+            if config['serverStats']['enabled']:
+                serverStats = config['serverStats']
+
         observers = []
         if 'observers' in config:
             for observer in config['observers']:
@@ -324,11 +361,23 @@
             records.extend(namedAny(loader)(**params))
             output.write("Loaded {0} accounts.\n".format(len(records)))
 
-        return cls(server, principalPathTemplate, webadminPort, serializationPath,
-                   arrival, parameters, observers=observers,
-                   records=records, runtime=runtime, reactor=reactor,
-                   workers=workers, configTemplate=configTemplate,
-                   workerID=workerID, workerCount=workerCount)
+        return cls(
+            server,
+            principalPathTemplate,
+            webadminPort,
+            serverStats,
+            serializationPath,
+            arrival,
+            parameters,
+            observers=observers,
+            records=records,
+            runtime=runtime,
+            reactor=reactor,
+            workers=workers,
+            configTemplate=configTemplate,
+            workerID=workerID,
+            workerCount=workerCount,
+        )
 
 
     @classmethod
@@ -409,7 +458,7 @@
     def run(self, output=stdout):
         self.attachServices(output)
         if self.runtime is not None:
-            self.reactor.callLater(self.runtime, self.reactor.stop)
+            self.reactor.callLater(self.runtime, self.stopAndReport)
         if self.webadminPort:
             self.reactor.listenTCP(self.webadminPort, server.Site(LoadSimAdminResource(self)))
         self.reactor.run()
@@ -417,16 +466,65 @@
 
     def stop(self):
         if self.ms.running:
+            self.updateStats()
             self.ms.stopService()
-            self.reactor.callLater(5, self.reactor.stop)
+            self.reactor.callLater(5, self.stopAndReport)
 
 
     def shutdown(self):
         if self.ms.running:
+            self.updateStats()
             return self.ms.stopService()
 
 
+    def updateStats(self):
+        """
+        Capture server stats and stop.
+        """
 
+        if self.serverStats is not None:
+            _ignore_scheme, hostname, _ignore_path, _ignore_query, _ignore_fragment = urlsplit(self.server)
+            data = self.readStatsSock((hostname.split(":")[0], self.serverStats["Port"],), True)
+            if "Failed" not in data:
+                data = data["5 Minutes"]
+                result = (
+                    safeDivision(float(data["requests"]), 5 * 60),
+                    safeDivision(data["t"], data["requests"]),
+                    safeDivision(float(data["slots"]), data["requests"]),
+                    safeDivision(data["cpu"], data["requests"]),
+                )
+                msg(type="sim-expired", reason=result)
+
+
+    def stopAndReport(self):
+        """
+        Runtime has expired - capture server stats and stop.
+        """
+
+        self.updateStats()
+        self.reactor.stop()
+
+
+    def readStatsSock(self, sockname, useTCP):
+        try:
+            s = socket.socket(socket.AF_INET if useTCP else socket.AF_UNIX, socket.SOCK_STREAM)
+            s.connect(sockname)
+            data = ""
+            while True:
+                d = s.recv(1024)
+                if d:
+                    data += d
+                else:
+                    break
+            s.close()
+            data = json.loads(data)
+        except socket.error:
+            data = {"Failed": "Unable to read statistics from server: %s" % (sockname,)}
+        data["Server"] = sockname
+        return data
+
+
+
 def attachService(reactor, loadsim, service):
     """
     Attach a given L{IService} provider to the given L{IReactorCore}; cause it
@@ -557,7 +655,6 @@
 
 
     def errReceived(self, error):
-        from twisted.python.log import msg
         msg("stderr received from " + str(self.transport.pid))
         msg("    " + repr(error))
 

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/test_sim.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/test_sim.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/loadtest/test_sim.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -253,7 +253,7 @@
         exc = self.assertRaises(
             SystemExit, StubSimulator.main, ['--config', config.path])
         self.assertEquals(
-            exc.args, (StubSimulator(None, None, None, None, None, None).run(),))
+            exc.args, (StubSimulator(None, None, None, None, None, None, None).run(),))
 
 
     def test_createSimulator(self):
@@ -264,7 +264,7 @@
         """
         server = 'http://127.0.0.7:1243/'
         reactor = object()
-        sim = LoadSimulator(server, None, None, None, None, None, reactor=reactor)
+        sim = LoadSimulator(server, None, None, None, None, None, None, reactor=reactor)
         calsim = sim.createSimulator()
         self.assertIsInstance(calsim, CalendarClientSimulator)
         self.assertIsInstance(calsim.reactor, LagTrackingReactor)
@@ -447,7 +447,7 @@
 
         reactor = object()
         sim = LoadSimulator(
-            None, None, None, None, Arrival(FakeArrival, {'x': 3, 'y': 2}), None, reactor=reactor)
+            None, None, None, None, None, Arrival(FakeArrival, {'x': 3, 'y': 2}), None, reactor=reactor)
         arrival = sim.createArrivalPolicy()
         self.assertIsInstance(arrival, FakeArrival)
         self.assertIdentical(arrival.reactor, sim.reactor)
@@ -478,7 +478,9 @@
                             "weight": 3,
                             }]}))
 
-        sim = LoadSimulator.fromCommandLine(['--config', config.path])
+        sim = LoadSimulator.fromCommandLine(
+            ['--config', config.path, '--clients', config.path]
+        )
         expectedParameters = PopulationParameters()
         expectedParameters.addClient(
             3, ClientType(OS_X_10_6, {"foo": "bar"}, [ProfileType(Eventer, {
@@ -495,7 +497,9 @@
         """
         config = FilePath(self.mktemp())
         config.setContent(writePlistToString({"clients": []}))
-        sim = LoadSimulator.fromCommandLine(['--config', config.path])
+        sim = LoadSimulator.fromCommandLine(
+            ['--config', config.path, '--clients', config.path]
+        )
         expectedParameters = PopulationParameters()
         expectedParameters.addClient(
             1, ClientType(OS_X_10_6, {}, [Eventer, Inviter, Accepter]))
@@ -528,6 +532,7 @@
             "/principals/users/%s/",
             None,
             None,
+            None,
             Arrival(lambda reactor: NullArrival(), {}),
             None, observers, reactor=Reactor())
         io = StringIO()

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/sqlusage/requests/httpTests.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/sqlusage/requests/httpTests.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/sqlusage/requests/httpTests.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -89,12 +89,21 @@
             pos = line.find(": ")
             return float(line[pos + 2:])
 
+        # Need to skip over stats that are unlabeled
         data = open(self.logFilePath).read()
         lines = data.splitlines()
-        count = extractInt(lines[4])
-        rows = extractInt(lines[5])
-        timing = extractFloat(lines[6])
-        self.result = HTTPTestBase.SQLResults(count, rows, timing)
+        offset = 0
+        while True:
+            if lines[offset] == "*** SQL Stats ***":
+                if lines[offset + 2].split()[1] != "unlabeled":
+                    count = extractInt(lines[offset + 4])
+                    rows = extractInt(lines[offset + 5])
+                    timing = extractFloat(lines[offset + 6])
+                    self.result = HTTPTestBase.SQLResults(count, rows, timing)
+                    break
+            offset += 1
+        else:
+            self.result = HTTPTestBase.SQLResults(-1, -1, 0.0)
 
         with open("%s-%d-%s" % (self.logFilePath, event_count, self.label), "w") as f:
             f.write(data)

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/sqlusage/sqlusage.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/sqlusage/sqlusage.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/performance/sqlusage/sqlusage.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -127,11 +127,17 @@
         ]
         self.requestLabels = [request.label for request in requests]
 
-        # Warm-up server by doing calendar home and calendar propfinds
-        props = (davxml.resourcetype,)
-        for session in sessions:
-            session.getPropertiesOnHierarchy(URL(path=session.homeHref), props)
-            session.getPropertiesOnHierarchy(URL(path=session.calendarHref), props)
+        def _warmUp():
+            # Warm-up server by doing calendar home and child collection propfinds.
+            # Do this twice because the very first time might provision DB objects and
+            # blow any DB cache - the second time will warm the DB cache.
+            props = (davxml.resourcetype,)
+            for _ignore in range(2):
+                for session in sessions:
+                    session.getPropertiesOnHierarchy(URL(path=session.homeHref), props)
+                    session.getPropertiesOnHierarchy(URL(path=session.calendarHref), props)
+                    session.getPropertiesOnHierarchy(URL(path=session.inboxHref), props)
+                    session.getPropertiesOnHierarchy(URL(path=session.notificationHref), props)
 
         # Now loop over sets of events
         for count in event_counts:
@@ -140,6 +146,7 @@
             result = {}
             for request in requests:
                 print("  Test = %s" % (request.label,))
+                _warmUp()
                 result[request.label] = request.execute(count)
             self.results[count] = result
 

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/tools/fix_calendar
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/tools/fix_calendar	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/tools/fix_calendar	2013-10-31 22:23:31 UTC (rev 11861)
@@ -28,9 +28,9 @@
 def usage():
     print """Usage: xattr_fix CALENDARS
 Options:
-    
+
 CALENDARS - a list of directories that are to be treated as calendars
-    
+
 Description:
 This utility will add xattrs to the specified directories and their contents
 to make them appear to be calendars and calendar resources when used with
@@ -40,8 +40,10 @@
 root without properly preserving the xattrs.
 """
 
+
+
 def fixCalendar(path):
-    
+
     # First fix the resourcetype & getctag on the calendar
     x = xattr.xattr(path)
     x["WebDAV:{DAV:}resourcetype"] = """<?xml version='1.0' encoding='UTF-8'?>
@@ -60,7 +62,7 @@
         if not child.endswith(".ics"):
             continue
         fullpath = os.path.join(path, child)
-        
+
         # getcontenttype
         x = xattr.xattr(fullpath)
         x["WebDAV:{DAV:}getcontenttype"] = """<?xml version='1.0' encoding='UTF-8'?>
@@ -94,7 +96,7 @@
             if not os.path.exists(arg):
                 print "Path does not exist: '%s'. Ignoring." % (arg,)
                 continue
-            
+
             if os.path.basename(arg) in ("inbox", "outbox", "dropbox",):
                 print "Cannot be used on inbox, outbox or dropbox."
                 continue
@@ -103,4 +105,3 @@
 
     except Exception, e:
         sys.exit(str(e))
-    
\ No newline at end of file

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/tools/protocolanalysis.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/tools/protocolanalysis.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/contrib/tools/protocolanalysis.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -293,6 +293,12 @@
         self.userCounts = collections.defaultdict(int)
         self.userResponseTimes = collections.defaultdict(float)
 
+        self.newEvents = 0
+        self.newInvites = 0
+        self.updateEvents = 0
+        self.updateInvites = 0
+        self.attendeeInvites = 0
+
         self.otherUserCalendarRequests = {}
 
         self.currentLine = None
@@ -416,6 +422,19 @@
                 self.hourlyByStatus[" TOTAL"][timeBucketIndex] += 1
                 self.hourlyByStatus[self.currentLine.status][timeBucketIndex] += 1
 
+                if self.currentLine.status == 201:
+                    if adjustedMethod == METHOD_PUT_ICS:
+                        self.newEvents += 1
+                    elif adjustedMethod == METHOD_PUT_ORGANIZER:
+                        self.newInvites += 1
+                elif isOK:
+                    if adjustedMethod == METHOD_PUT_ICS:
+                        self.updateEvents += 1
+                    elif adjustedMethod == METHOD_PUT_ORGANIZER:
+                        self.updateInvites += 1
+                    elif adjustedMethod == METHOD_PUT_ATTENDEE:
+                        self.attendeeInvites += 1
+
                 # Cache analysis
                 if adjustedMethod == METHOD_PROPFIND_CALENDAR and self.currentLine.status == 207:
                     responses = int(self.currentLine.extended.get("responses", 0))
@@ -1029,7 +1048,10 @@
             #print("User Response times")
             #self.printUserResponseTimes(doTabs)
 
+            print("Sim values")
+            self.printSimStats(doTabs)
 
+
     def printInfo(self, doTabs):
 
         table = tables.Table()
@@ -1083,6 +1105,7 @@
         totalRequests = 0
         totalDepth = 0
         totalTime = 0.0
+        self.timeCounts = 0
         for ctr in xrange(self.timeBucketCount):
             hour = self.getHourFromIndex(ctr)
             if hour is None:
@@ -1101,12 +1124,13 @@
             totalRequests += countRequests
             totalDepth += countDepth
             totalTime += countTime
+            self.timeCounts += 1
 
         table.addFooter(
             (
                 "Total:",
                 totalRequests,
-                (1.0 * totalRequests) / self.timeBucketCount / self.resolutionMinutes / 60,
+                safePercent(totalRequests, self.timeCounts * self.resolutionMinutes * 60, 1.0),
                 safePercent(totalTime, totalRequests, 1.0),
                 safePercent(float(totalDepth), totalRequests, 1),
             ),
@@ -1545,7 +1569,38 @@
         print("")
 
 
+    def printSimStats(self, doTabs):
+        users = len(self.userCounts.keys())
+        hours = self.timeCounts / self.resolutionMinutes / 60
+        table = tables.Table()
+        table.setDefaultColumnFormats((
+                tables.Table.ColumnFormat("%s", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+                tables.Table.ColumnFormat("%s", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+                tables.Table.ColumnFormat("%s", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+                tables.Table.ColumnFormat("%s", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+                ))
+        table.addHeader(("Item", "Value", "Items, per User, per Day", "Interval (sec), per item, per user"))
+        table.addRow(("Unique Users", users, "", ""))
 
+        def _addRow(title, item):
+            table.addRow((title, item, "%.1f" % (safePercent(24 * item, hours * users, 1.0),), "%.1f" % (safePercent(hours * 60 * 60 * users, item, 1.0),),))
+
+        _addRow("New Events", self.newEvents)
+        _addRow("New Invites", self.newInvites)
+        _addRow("Updated Events", self.updateEvents)
+        _addRow("Updated Invites", self.updateInvites)
+        _addRow("Attendee Invites", self.attendeeInvites)
+        table.addRow((
+            "Recipients",
+            "%.1f" % (safePercent(sum(self.averagedHourlyByRecipientCount["iTIP Average"]), self.timeCounts, 1.0),),
+            "",
+            "",
+        ))
+        table.printTabDelimitedData() if doTabs else table.printTable()
+        print("")
+
+
+
 class TablePrinter(object):
 
     @classmethod

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/support/version.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/support/version.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/support/version.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -26,7 +26,7 @@
     # Compute the version number.
     #
 
-    base_version = "5.1"
+    base_version = "5.2"
 
     branches = tuple(
         branch.format(version=base_version)
@@ -36,7 +36,7 @@
             "trunk",
         )
     )
-    
+
     source_root = dirname(dirname(__file__))
 
     for branch in branches:

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/twext/enterprise/dal/syntax.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/twext/enterprise/dal/syntax.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/twext/enterprise/dal/syntax.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -1686,7 +1686,46 @@
             SQLFragment(' in %s mode' % (self.mode,)))
 
 
+class DatabaseLock(_LockingStatement):
+    """
+    An SQL exclusive session level advisory lock
+    """
 
+    def _toSQL(self, queryGenerator):
+        assert(queryGenerator.dialect == POSTGRES_DIALECT)
+        return SQLFragment('select pg_advisory_lock(1)')
+
+
+    def on(self, txn, *a, **kw):
+        """
+        Override on() to only execute on Postgres
+        """
+        if txn.dialect == POSTGRES_DIALECT:
+            return super(DatabaseLock, self).on(txn, *a, **kw)
+
+        return succeed(None)
+
+
+class DatabaseUnlock(_LockingStatement):
+    """
+    An SQL exclusive session level advisory lock
+    """
+
+    def _toSQL(self, queryGenerator):
+        assert(queryGenerator.dialect == POSTGRES_DIALECT)
+        return SQLFragment('select pg_advisory_unlock(1)')
+
+
+    def on(self, txn, *a, **kw):
+        """
+        Override on() to only execute on Postgres
+        """
+        if txn.dialect == POSTGRES_DIALECT:
+            return super(DatabaseUnlock, self).on(txn, *a, **kw)
+
+        return succeed(None)
+
+
 class Savepoint(_LockingStatement):
     """
     An SQL 'savepoint' statement.

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/twext/enterprise/dal/test/test_sqlsyntax.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/twext/enterprise/dal/test/test_sqlsyntax.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/twext/enterprise/dal/test/test_sqlsyntax.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -25,7 +25,8 @@
     TableMismatch, Parameter, Max, Len, NotEnoughValues,
     Savepoint, RollbackToSavepoint, ReleaseSavepoint, SavepointAction,
     Union, Intersect, Except, SetExpression, DALError,
-    ResultAliasSyntax, Count, QueryGenerator, ALL_COLUMNS)
+    ResultAliasSyntax, Count, QueryGenerator, ALL_COLUMNS,
+    DatabaseLock, DatabaseUnlock)
 from twext.enterprise.dal.syntax import FixedPlaceholder, NumericPlaceholder
 from twext.enterprise.dal.syntax import Function
 from twext.enterprise.dal.syntax import SchemaSyntax
@@ -1314,6 +1315,22 @@
                           SQLFragment("lock table FOO in exclusive mode"))
 
 
+    def test_databaseLock(self):
+        """
+        L{DatabaseLock} generates a ('pg_advisory_lock') statement
+        """
+        self.assertEquals(DatabaseLock().toSQL(),
+                          SQLFragment("select pg_advisory_lock(1)"))
+
+
+    def test_databaseUnlock(self):
+        """
+        L{DatabaseUnlock} generates a ('pg_advisory_unlock') statement
+        """
+        self.assertEquals(DatabaseUnlock().toSQL(),
+                          SQLFragment("select pg_advisory_unlock(1)"))
+
+
     def test_savepoint(self):
         """
         L{Savepoint} generates a ('savepoint') statement.

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/twext/patches.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/twext/patches.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/twext/patches.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -26,6 +26,8 @@
 from twisted.python.versions import Version
 from twisted.python.modules import getModule
 
+
+
 def _hasIPv6ClientSupport():
     """
     Does the loaded version of Twisted have IPv6 client support?
@@ -34,8 +36,9 @@
     if version > lastVersionWithoutIPv6Clients:
         return True
     elif version == lastVersionWithoutIPv6Clients:
-        # It could be a snapshot of trunk or a branch with this bug fixed. Don't
-        # load the module, though, as that would be a bunch of unnecessary work.
+        # It could be a snapshot of trunk or a branch with this bug fixed.
+        # Don't load the module, though, as that would be a bunch of
+        # unnecessary work.
         return "_resolveIPv6" in (getModule("twisted.internet.tcp")
                                   .filePath.getContent())
     else:
@@ -45,8 +48,8 @@
 
 def _addBackports():
     """
-    We currently require 2 backported bugfixes from a future release of Twisted,
-    for IPv6 support:
+    We currently require 2 backported bugfixes from a future release of
+    Twisted, for IPv6 support:
 
         - U{IPv6 client support <http://tm.tl/5085>}
 

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/aggregate.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/aggregate.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/aggregate.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -45,13 +45,16 @@
 
         for service in services:
             if not IDirectoryService.implementedBy(service.__class__):
-                raise ValueError("Not a directory service: %s" % (service,))
+                raise ValueError(
+                    "Not a directory service: {0}".format(service)
+                )
 
             for recordType in service.recordTypes():
                 if recordType in recordTypes:
                     raise DirectoryConfigurationError(
-                        "Aggregated services may not vend the same record type: %s"
-                        % (recordType,)
+                        "Aggregated services may not vend "
+                        "the same record type: {0}"
+                        .format(recordType)
                     )
                 recordTypes.add(recordType)
 

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/directory.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/directory.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/directory.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -47,7 +47,7 @@
     fieldName  = FieldName
 
     normalizedFields = {
-        FieldName.guid:           lambda g: UUID(g).hex,
+        FieldName.guid: lambda g: UUID(g).hex,
         FieldName.emailAddresses: lambda e: e.lower(),
     }
 
@@ -57,9 +57,9 @@
 
 
     def __repr__(self):
-        return "<%s %r>" % (
-            self.__class__.__name__,
-            self.realmName,
+        return (
+            "<{self.__class__.__name__} {self.realmName!r}>"
+            .format(self=self)
         )
 
 
@@ -76,7 +76,9 @@
             the whole directory should be searched.
         @type records: L{set} or L{frozenset}
         """
-        return fail(QueryNotSupportedError("Unknown expression: %s" % (expression,)))
+        return fail(QueryNotSupportedError(
+            "Unknown expression: {0}".format(expression)
+        ))
 
 
     @inlineCallbacks
@@ -109,7 +111,9 @@
             elif operand == Operand.OR:
                 results |= recordsMatchingExpression
             else:
-                raise QueryNotSupportedError("Unknown operand: %s" % (operand,))
+                raise QueryNotSupportedError(
+                    "Unknown operand: {0}".format(operand)
+                )
 
         returnValue(results)
 
@@ -120,12 +124,16 @@
 
     @inlineCallbacks
     def recordWithUID(self, uid):
-        returnValue(uniqueResult((yield self.recordsWithFieldValue(FieldName.uid, uid))))
-               
+        returnValue(uniqueResult(
+            (yield self.recordsWithFieldValue(FieldName.uid, uid))
+        ))
 
+
     @inlineCallbacks
     def recordWithGUID(self, guid):
-        returnValue(uniqueResult((yield self.recordsWithFieldValue(FieldName.guid, guid))))
+        returnValue(uniqueResult(
+            (yield self.recordsWithFieldValue(FieldName.guid, guid))
+        ))
 
 
     def recordsWithRecordType(self, recordType):
@@ -136,12 +144,15 @@
     def recordWithShortName(self, recordType, shortName):
         returnValue(uniqueResult((yield self.recordsFromQuery((
             MatchExpression(FieldName.recordType, recordType),
-            MatchExpression(FieldName.shortNames, shortName ),
+            MatchExpression(FieldName.shortNames, shortName),
         )))))
 
 
     def recordsWithEmailAddress(self, emailAddress):
-        return self.recordsWithFieldValue(FieldName.emailAddresses, emailAddress)
+        return self.recordsWithFieldValue(
+            FieldName.emailAddresses,
+            emailAddress,
+        )
 
 
     def updateRecords(self, records, create=False):
@@ -168,21 +179,31 @@
     def __init__(self, service, fields):
         for fieldName in self.requiredFields:
             if fieldName not in fields or not fields[fieldName]:
-                raise ValueError("%s field is required." % (fieldName,))
+                raise ValueError("{0} field is required.".format(fieldName))
 
             if FieldName.isMultiValue(fieldName):
                 values = fields[fieldName]
                 if len(values) == 0:
-                    raise ValueError("%s field must have at least one value." % (fieldName,))
+                    raise ValueError(
+                        "{0} field must have at least one value."
+                        .format(fieldName)
+                    )
                 for value in values:
                     if not value:
-                        raise ValueError("%s field must not be empty." % (fieldName,))
+                        raise ValueError(
+                            "{0} field must not be empty.".format(fieldName)
+                        )
 
-        if fields[FieldName.recordType] not in service.recordType.iterconstants():
-            raise ValueError("Record type must be one of %r, not %r." % (
-                tuple(service.recordType.iterconstants()),
-                fields[FieldName.recordType]
-            ))
+        if (
+            fields[FieldName.recordType] not in
+            service.recordType.iterconstants()
+        ):
+            raise ValueError(
+                "Record type must be one of {0!r}, not {1!r}.".format(
+                    tuple(service.recordType.iterconstants()),
+                    fields[FieldName.recordType],
+                )
+            )
 
         # Normalize fields
         normalizedFields = {}
@@ -197,16 +218,18 @@
                 normalizedFields[name] = tuple((normalize(v) for v in value))
             else:
                 normalizedFields[name] = normalize(value)
-        
+
         self.service = service
         self.fields  = normalizedFields
 
 
     def __repr__(self):
-        return "<%s (%s)%s>" % (
-            self.__class__.__name__,
-            describe(self.recordType),
-            self.shortNames[0],
+        return (
+            "<{self.__class__.__name__} ({recordType}){shortName}>".format(
+                self=self,
+                recordType=describe(self.recordType),
+                shortName=self.shortNames[0],
+            )
         )
 
 
@@ -262,9 +285,9 @@
 
     def members(self):
         if self.recordType == RecordType.group:
-            raise NotImplementedError()
+            raise NotImplementedError("Subclasses must implement members()")
         return succeed(())
 
 
     def groups(self):
-        raise NotImplementedError()
+        raise NotImplementedError("Subclasses must implement groups()")

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/expression.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/expression.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/expression.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -72,7 +72,11 @@
     @ivar flags: L{NamedConstant} specifying additional options
     """
 
-    def __init__(self, fieldName, fieldValue, matchType=MatchType.equals, flags=None):
+    def __init__(
+        self,
+        fieldName, fieldValue,
+        matchType=MatchType.equals, flags=None
+    ):
         self.fieldName  = fieldName
         self.fieldValue = fieldValue
         self.matchType  = matchType
@@ -85,12 +89,16 @@
         if self.flags is None:
             flags = ""
         else:
-            flags = " (%s)" % (describe(self.flags),)
+            flags = " ({0})".format(describe(self.flags))
 
-        return "<%s: %r %s %r%s>" % (
-            self.__class__.__name__,
-            describe(self.fieldName),
-            describe(self.matchType),
-            describe(self.fieldValue),
-            flags
+        return (
+            "<{self.__class__.__name__}: {fieldName!r} "
+            "{matchType} {fieldValue!r}{flags}>"
+            .format(
+                self=self,
+                fieldName=describe(self.fieldName),
+                matchType=describe(self.matchType),
+                fieldValue=describe(self.fieldValue),
+                flags=flags,
+            )
         )

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/idirectory.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/idirectory.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/idirectory.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -51,16 +51,22 @@
     Directory service generic error.
     """
 
+
+
 class DirectoryConfigurationError(DirectoryServiceError):
     """
     Directory configurtion error.
     """
 
+
+
 class DirectoryAvailabilityError(DirectoryServiceError):
     """
     Directory not available.
     """
 
+
+
 class UnknownRecordTypeError(DirectoryServiceError):
     """
     Unknown record type.
@@ -69,16 +75,22 @@
         DirectoryServiceError.__init__(self, token)
         self.token = token
 
+
+
 class QueryNotSupportedError(DirectoryServiceError):
     """
     Query not supported.
     """
 
+
+
 class NoSuchRecordError(DirectoryServiceError):
     """
     Record does not exist.
     """
 
+
+
 class NotAllowedError(DirectoryServiceError):
     """
     Apparently, you can't do that.
@@ -123,6 +135,7 @@
     fullNames.multiValue      = True
     emailAddresses.multiValue = True
 
+
     @staticmethod
     def isMultiValue(name):
         return getattr(name, "multiValue", False)
@@ -157,14 +170,18 @@
     A directory service may allow support the editing, removal and
     addition of records.
     """
-    realmName = Attribute("The name of the authentication realm this service represents.")
+    realmName = Attribute(
+        "The name of the authentication realm this service represents."
+    )
 
+
     def recordTypes():
         """
         @return: an iterable of L{NamedConstant}s denoting the record
             types that are kept in this directory.
         """
 
+
     def recordsFromExpression(self, expression):
         """
         Find records matching an expression.
@@ -175,6 +192,7 @@
             supported by this directory service.
         """
 
+
     def recordsFromQuery(expressions, operand=Operand.AND):
         """
         Find records by composing a query consisting of an iterable of
@@ -188,6 +206,7 @@
             supported by this directory service.
         """
 
+
     def recordsWithFieldValue(fieldName, value):
         """
         Find records that have the given field name with the given
@@ -199,6 +218,7 @@
         @return: a deferred iterable of L{IDirectoryRecord}s.
         """
 
+
     def recordWithUID(uid):
         """
         Find the record that has the given UID.
@@ -207,7 +227,8 @@
         @return: a deferred iterable of L{IDirectoryRecord}s, or
             C{None} if there is no such record.
         """
-               
+
+
     def recordWithGUID(guid):
         """
         Find the record that has the given GUID.
@@ -217,6 +238,7 @@
             C{None} if there is no such record.
         """
 
+
     def recordsWithRecordType(recordType):
         """
         Find the records that have the given record type.
@@ -225,6 +247,7 @@
         @return: a deferred iterable of L{IDirectoryRecord}s.
         """
 
+
     def recordWithShortName(recordType, shortName):
         """
         Find the record that has the given record type and short name.
@@ -236,6 +259,7 @@
             C{None} if there is no such record.
         """
 
+
     def recordsWithEmailAddress(emailAddress):
         """
         Find the records that have the given email address.
@@ -245,6 +269,7 @@
             C{None} if there is no such record.
         """
 
+
     def updateRecords(records, create=False):
         """
         Updates existing directory records.
@@ -254,6 +279,7 @@
         @type create: boolean
         """
 
+
     def removeRecords(uids):
         """
         Removes the records with the given UIDs.
@@ -294,6 +320,7 @@
     service = Attribute("The L{IDirectoryService} this record exists in.")
     fields  = Attribute("A mapping with L{NamedConstant} keys.")
 
+
     def members():
         """
         Find the records that are members of this group.  Only direct
@@ -302,6 +329,7 @@
             direct members of this group.
         """
 
+
     def groups():
         """
         Find the group records that this record is a member of.  Only

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/index.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/index.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/index.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -29,7 +29,8 @@
 from twisted.python.constants import Names, NamedConstant
 from twisted.internet.defer import succeed, inlineCallbacks, returnValue
 
-from twext.who.util import ConstantsContainer, describe, uniqueResult, iterFlags
+from twext.who.util import ConstantsContainer
+from twext.who.util import describe, uniqueResult, iterFlags
 from twext.who.idirectory import FieldName as BaseFieldName
 from twext.who.expression import MatchExpression, MatchType, MatchFlags
 from twext.who.directory import DirectoryService as BaseDirectoryService
@@ -57,7 +58,10 @@
     XML directory service.
     """
 
-    fieldName = ConstantsContainer(chain(BaseDirectoryService.fieldName.iterconstants(), FieldName.iterconstants()))
+    fieldName = ConstantsContainer(chain(
+        BaseDirectoryService.fieldName.iterconstants(),
+        FieldName.iterconstants()
+    ))
 
     indexedFields = (
         BaseFieldName.recordType,
@@ -90,7 +94,7 @@
         """
         Load records.
         """
-        raise NotImplementedError("Subclasses should implement loadRecords().")
+        raise NotImplementedError("Subclasses must implement loadRecords().")
 
 
     def flush(self):
@@ -112,7 +116,9 @@
                 elif flag == MatchFlags.caseInsensitive:
                     normalize = lambda x: x.lower()
                 else:
-                    raise NotImplementedError("Unknown query flag: %s" % (describe(flag),))
+                    raise NotImplementedError(
+                        "Unknown query flag: {0}".format(describe(flag))
+                    )
 
         return predicate, normalize
 
@@ -131,16 +137,27 @@
         matchType  = expression.matchType
 
         if matchType == MatchType.startsWith:
-            indexKeys = (key for key in fieldIndex if predicate(normalize(key).startswith(matchValue)))
+            indexKeys = (
+                key for key in fieldIndex
+                if predicate(normalize(key).startswith(matchValue))
+            )
         elif matchType == MatchType.contains:
-            indexKeys = (key for key in fieldIndex if predicate(matchValue in normalize(key)))
+            indexKeys = (
+                key for key in fieldIndex
+                if predicate(matchValue in normalize(key))
+            )
         elif matchType == MatchType.equals:
             if predicate(True):
                 indexKeys = (matchValue,)
             else:
-                indexKeys = (key for key in fieldIndex if normalize(key) != matchValue)
+                indexKeys = (
+                    key for key in fieldIndex
+                    if normalize(key) != matchValue
+                )
         else:
-            raise NotImplementedError("Unknown match type: %s" % (describe(matchType),))
+            raise NotImplementedError(
+                "Unknown match type: {0}".format(describe(matchType))
+            )
 
         matchingRecords = set()
         for key in indexKeys:
@@ -165,18 +182,25 @@
         matchType  = expression.matchType
 
         if matchType == MatchType.startsWith:
-            match = lambda fieldValue: predicate(fieldValue.startswith(matchValue))
+            match = lambda fieldValue: predicate(
+                fieldValue.startswith(matchValue)
+            )
         elif matchType == MatchType.contains:
             match = lambda fieldValue: predicate(matchValue in fieldValue)
         elif matchType == MatchType.equals:
             match = lambda fieldValue: predicate(fieldValue == matchValue)
         else:
-            raise NotImplementedError("Unknown match type: %s" % (describe(matchType),))
+            raise NotImplementedError(
+                "Unknown match type: {0}".format(describe(matchType))
+            )
 
         result = set()
 
         if records is None:
-            records = (uniqueResult(values) for values in self.index[self.fieldName.uid].itervalues())
+            records = (
+                uniqueResult(values) for values
+                in self.index[self.fieldName.uid].itervalues()
+            )
 
         for record in records:
             fieldValues = record.fields.get(expression.fieldName, None)
@@ -194,11 +218,17 @@
     def recordsFromExpression(self, expression, records=None):
         if isinstance(expression, MatchExpression):
             if expression.fieldName in self.indexedFields:
-                return self.indexedRecordsFromMatchExpression(expression, records=records)
+                return self.indexedRecordsFromMatchExpression(
+                    expression, records=records
+                )
             else:
-                return self.unIndexedRecordsFromMatchExpression(expression, records=records)
+                return self.unIndexedRecordsFromMatchExpression(
+                    expression, records=records
+                )
         else:
-            return BaseDirectoryService.recordsFromExpression(self, expression, records=records)
+            return BaseDirectoryService.recordsFromExpression(
+                self, expression, records=records
+            )
 
 
 
@@ -206,6 +236,7 @@
     """
     XML directory record
     """
+
     @inlineCallbacks
     def members(self):
         members = set()
@@ -215,4 +246,6 @@
 
 
     def groups(self):
-        return self.service.recordsWithFieldValue(FieldName.memberUIDs, self.uid)
+        return self.service.recordsWithFieldValue(
+            FieldName.memberUIDs, self.uid
+        )

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/util.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/util.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/util.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -40,7 +40,7 @@
         myConstants = {}
         for constant in constants:
             if constant.name in myConstants:
-                raise ValueError("Name conflict: %r" % (constant.name,))
+                raise ValueError("Name conflict: {0}".format(constant.name))
             myConstants[constant.name] = constant
 
         self._constants = myConstants
@@ -67,7 +67,9 @@
         if result is None:
             result = value
         else:
-            raise DirectoryServiceError("Multiple values found where one expected.")
+            raise DirectoryServiceError(
+                "Multiple values found where one expected."
+            )
     return result
 
 

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/xml.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/xml.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/twext/who/xml.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -144,9 +144,11 @@
         else:
             realmName = repr(realmName)
 
-        return "<%s %s>" % (
-            self.__class__.__name__,
-            realmName,
+        return (
+            "<{self.__class__.__name__} {realmName}>".format(
+                self=self,
+                realmName=realmName,
+            )
         )
 
 
@@ -201,7 +203,10 @@
         #
         if stat:
             self.filePath.restat()
-            cacheTag = (self.filePath.getModificationTime(), self.filePath.getsize())
+            cacheTag = (
+                self.filePath.getModificationTime(),
+                self.filePath.getsize()
+            )
             if cacheTag == self._cacheTag:
                 return
         else:
@@ -225,9 +230,13 @@
         #
         directoryNode = etree.getroot()
         if directoryNode.tag != self.element.directory.value:
-            raise ParseError("Incorrect root element: %s" % (directoryNode.tag,))
+            raise ParseError(
+                "Incorrect root element: {0}".format(directoryNode.tag)
+            )
 
-        realmName = directoryNode.get(self.attribute.realm.value, "").encode("utf-8")
+        realmName = directoryNode.get(
+            self.attribute.realm.value, ""
+        ).encode("utf-8")
 
         if not realmName:
             raise ParseError("No realm name.")
@@ -239,7 +248,9 @@
 
         for recordNode in directoryNode:
             try:
-                records.add(self.parseRecordNode(recordNode, unknownFieldElements))
+                records.add(
+                    self.parseRecordNode(recordNode, unknownFieldElements)
+                )
             except UnknownRecordTypeError as e:
                 unknownRecordTypes.add(e.token)
 
@@ -277,10 +288,14 @@
 
 
     def parseRecordNode(self, recordNode, unknownFieldElements=None):
-        recordTypeAttribute = recordNode.get(self.attribute.recordType.value, "").encode("utf-8")
+        recordTypeAttribute = recordNode.get(
+            self.attribute.recordType.value, ""
+        ).encode("utf-8")
         if recordTypeAttribute:
             try:
-                recordType = self.value.lookupByValue(recordTypeAttribute).recordType
+                recordType = (
+                    self.value.lookupByValue(recordTypeAttribute).recordType
+                )
             except (ValueError, AttributeError):
                 raise UnknownRecordTypeError(recordTypeAttribute)
         else:
@@ -357,9 +372,14 @@
             for (name, value) in record.fields.items():
                 if name == self.fieldName.recordType:
                     if value in recordTypes:
-                        recordNode.set(self.attribute.recordType.value, recordTypes[value])
+                        recordNode.set(
+                            self.attribute.recordType.value,
+                            recordTypes[value]
+                        )
                     else:
-                        raise AssertionError("Unknown record type: %r" % (value,))
+                        raise AssertionError(
+                            "Unknown record type: {0}".format(value)
+                        )
 
                 else:
                     if name in fieldNames:
@@ -376,7 +396,9 @@
                             recordNode.append(subNode)
 
                     else:
-                        raise AssertionError("Unknown field name: %r" % (name,))
+                        raise AssertionError(
+                            "Unknown field name: {0!r}".format(name)
+                        )
 
         # Walk through the record nodes in the XML tree and apply
         # updates.

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/directory/directory.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/directory/directory.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/directory/directory.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -533,10 +533,11 @@
         )
         for record in resources:
             guid = record.guid
-            assignments.append(("%s#calendar-proxy-write" % (guid,),
-                               record.externalProxies()))
-            assignments.append(("%s#calendar-proxy-read" % (guid,),
-                               record.externalReadOnlyProxies()))
+            if record.enabledForCalendaring:
+                assignments.append(("%s#calendar-proxy-write" % (guid,),
+                                   record.externalProxies()))
+                assignments.append(("%s#calendar-proxy-read" % (guid,),
+                                   record.externalReadOnlyProxies()))
 
         return assignments
 

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/directory/ldapdirectory.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/directory/ldapdirectory.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/directory/ldapdirectory.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -391,6 +391,12 @@
 
         # Build filter
         filterstr = "(|(%s=*)(%s=*))" % (readAttr, writeAttr)
+        # ...taking into account only calendar-enabled records
+        enabledAttr = self.rdnSchema["locations"]["calendarEnabledAttr"]
+        enabledValue = self.rdnSchema["locations"]["calendarEnabledValue"]
+        if enabledAttr and enabledValue:
+            filterstr = "(&(%s=%s)%s)" % (enabledAttr, enabledValue, filterstr)
+
         attrlist = [guidAttr, readAttr, writeAttr]
 
         # Query the LDAP server
@@ -1046,7 +1052,7 @@
 
                 try:
                     record = self._ldapResultToRecord(dn, attrs, recordType)
-                    self.log.debug("Got LDAP record %s" % (record,))
+                    self.log.debug("Got LDAP record {rec}", rec=record)
 
                     if not unrestricted:
                         self.log.debug("%s is not enabled because it's not a member of group: %s" % (dn, self.restrictToGroup))

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/directory/test/test_directory.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/directory/test/test_directory.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/directory/test/test_directory.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -540,7 +540,167 @@
                 groups,
             )
 
+        #
+        # Now remove all external assignments, and those should take effect.
+        #
+        def fakeExternalProxiesEmpty():
+            return []
 
+        updater = GroupMembershipCacheUpdater(
+            calendaruserproxy.ProxyDBService, self.directoryService, 30, 30, 30,
+            cache=cache, useExternalProxies=True,
+            externalProxiesSource=fakeExternalProxiesEmpty)
+
+        yield updater.updateCache()
+
+        delegates = (
+
+            # record name
+            # read-write delegators
+            # read-only delegators
+            # groups delegate is in (restricted to only those groups
+            #   participating in delegation)
+
+            # Note: "transporter" is now gone for everyone
+
+            ("wsanchez",
+             set(["mercury", "apollo", "orion", "gemini"]),
+             set(["non_calendar_proxy"]),
+             set(['left_coast',
+                  'both_coasts',
+                  'recursive1_coasts',
+                  'recursive2_coasts',
+                  'gemini#calendar-proxy-write',
+                ]),
+            ),
+            ("cdaboo",
+             set(["apollo", "orion", "non_calendar_proxy"]),
+             set(["non_calendar_proxy"]),
+             set(['both_coasts',
+                  'non_calendar_group',
+                  'recursive1_coasts',
+                  'recursive2_coasts',
+                ]),
+            ),
+            ("lecroy",
+             set(["apollo", "mercury", "non_calendar_proxy"]),
+             set(),
+             set(['both_coasts',
+                  'left_coast',
+                      'non_calendar_group',
+                ]),
+            ),
+        )
+
+        for name, write, read, groups in delegates:
+            delegate = self._getPrincipalByShortName(DirectoryService.recordType_users, name)
+
+            proxyFor = (yield delegate.proxyFor(True))
+            self.assertEquals(
+                set([p.record.guid for p in proxyFor]),
+                write,
+            )
+            proxyFor = (yield delegate.proxyFor(False))
+            self.assertEquals(
+                set([p.record.guid for p in proxyFor]),
+                read,
+            )
+            groupsIn = (yield delegate.groupMemberships())
+            uids = set()
+            for group in groupsIn:
+                try:
+                    uid = group.uid # a sub-principal
+                except AttributeError:
+                    uid = group.record.guid # a regular group
+                uids.add(uid)
+            self.assertEquals(
+                set(uids),
+                groups,
+            )
+
+        #
+        # Now add back an external assignments, and those should take effect.
+        #
+        def fakeExternalProxiesAdded():
+            return [
+                (
+                    "transporter#calendar-proxy-write",
+                    set(["8B4288F6-CC82-491D-8EF9-642EF4F3E7D0"])
+                ),
+            ]
+
+        updater = GroupMembershipCacheUpdater(
+            calendaruserproxy.ProxyDBService, self.directoryService, 30, 30, 30,
+            cache=cache, useExternalProxies=True,
+            externalProxiesSource=fakeExternalProxiesAdded)
+
+        yield updater.updateCache()
+
+        delegates = (
+
+            # record name
+            # read-write delegators
+            # read-only delegators
+            # groups delegate is in (restricted to only those groups
+            #   participating in delegation)
+
+            ("wsanchez",
+             set(["mercury", "apollo", "orion", "gemini"]),
+             set(["non_calendar_proxy"]),
+             set(['left_coast',
+                  'both_coasts',
+                  'recursive1_coasts',
+                  'recursive2_coasts',
+                  'gemini#calendar-proxy-write',
+                ]),
+            ),
+            ("cdaboo",
+             set(["apollo", "orion", "non_calendar_proxy"]),
+             set(["non_calendar_proxy"]),
+             set(['both_coasts',
+                  'non_calendar_group',
+                  'recursive1_coasts',
+                  'recursive2_coasts',
+                ]),
+            ),
+            ("lecroy",
+             set(["apollo", "mercury", "non_calendar_proxy", "transporter"]),
+             set(),
+             set(['both_coasts',
+                  'left_coast',
+                  'non_calendar_group',
+                  'transporter#calendar-proxy-write',
+                ]),
+            ),
+        )
+
+        for name, write, read, groups in delegates:
+            delegate = self._getPrincipalByShortName(DirectoryService.recordType_users, name)
+
+            proxyFor = (yield delegate.proxyFor(True))
+            self.assertEquals(
+                set([p.record.guid for p in proxyFor]),
+                write,
+            )
+            proxyFor = (yield delegate.proxyFor(False))
+            self.assertEquals(
+                set([p.record.guid for p in proxyFor]),
+                read,
+            )
+            groupsIn = (yield delegate.groupMemberships())
+            uids = set()
+            for group in groupsIn:
+                try:
+                    uid = group.uid # a sub-principal
+                except AttributeError:
+                    uid = group.record.guid # a regular group
+                uids.add(uid)
+            self.assertEquals(
+                set(uids),
+                groups,
+            )
+
+
     def test_diffAssignments(self):
         """
         Ensure external proxy assignment diffing works

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/resource.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/resource.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/resource.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -333,6 +333,12 @@
             else:
                 yield transaction.commit()
 
+                # Log extended item
+                if transaction.logItems:
+                    if not hasattr(request, "extendedLogItems"):
+                        request.extendedLogItems = {}
+                    request.extendedLogItems.update(transaction.logItems)
+
                 # May need to reset the last-modified header in the response as txn.commit() can change it due to pre-commit hooks
                 if response.headers.hasHeader("last-modified"):
                     response.headers.setHeader("last-modified", self.lastModified())
@@ -2551,15 +2557,6 @@
         return self._newStoreHome.hasCalendarResourceUIDSomewhereElse(uid, ok_object._newStoreObject, mode)
 
 
-    def getCalendarResourcesForUID(self, uid, allow_shared=False):
-        """
-        Return all child object resources with the specified UID.
-
-        Pass through direct to store.
-        """
-        return self._newStoreHome.getCalendarResourcesForUID(uid, allow_shared)
-
-
     def defaultAccessControlList(self):
         myPrincipal = self.principalForRecord()
 

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/scheduling_store/caldav/resource.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/scheduling_store/caldav/resource.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/scheduling_store/caldav/resource.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -422,8 +422,12 @@
                 authz = (yield request.locateResource(principalURL))
                 self._associatedTransaction._authz_uid = authz.record.guid
 
+        # Log extended item
+        if not hasattr(request, "extendedLogItems"):
+            request.extendedLogItems = {}
+
         # This is a local CALDAV scheduling operation.
-        scheduler = CalDAVScheduler(self._associatedTransaction, self.parent._newStoreHome.uid())
+        scheduler = CalDAVScheduler(self._associatedTransaction, self.parent._newStoreHome.uid(), logItems=request.extendedLogItems)
 
         # Do the POST processing treating
         result = (yield scheduler.doSchedulingViaPOST(originator, recipients, calendar))

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/stdconfig.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/stdconfig.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/twistedcaldav/stdconfig.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -307,9 +307,15 @@
     "FailIfUpgradeNeeded"  : True, # Set to True to prevent the server or utility tools
                                    # tools from running if the database needs a schema
                                    # upgrade.
-    "StopAfterUpgradeTriggerFile" : "stop_after_upgrade", # if this file exists
-        # in ConfigRoot, stop the service after finishing upgrade phase
+    "StopAfterUpgradeTriggerFile" : "stop_after_upgrade",   # if this file exists in ConfigRoot, stop
+                                                            # the service after finishing upgrade phase
 
+    "UpgradeHomePrefix"    : "",    # When upgrading, only upgrade homes where the owner UID starts with
+                                    # with the specified prefix. The upgrade will only be partial and only
+                                    # apply to upgrade pieces that affect entire homes. The upgrade will
+                                    # need to be run again without this prefix set to complete the overall
+                                    # upgrade.
+
     #
     # Types of service provided
     #
@@ -564,8 +570,8 @@
         }
     },
 
-    "EnableTimezonesByReference" : False, # Strip out VTIMEZONES that are known
-    "UsePackageTimezones" : False, # Use timezone data from twistedcaldav.zoneinfo - don't copy to Data directory
+    "EnableTimezonesByReference" : True, # Strip out VTIMEZONES that are known
+    "UsePackageTimezones"        : False, # Use timezone data from twistedcaldav.zoneinfo - don't copy to Data directory
 
     "EnableBatchUpload"       : True, # POST batch uploads
     "MaxResourcesBatchUpload" : 100, # Maximum number of resources in a batch POST

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/base/datastore/subpostgres.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/base/datastore/subpostgres.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/base/datastore/subpostgres.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -454,6 +454,10 @@
             self.deactivateDelayedShutdown()
 
         def gotReady(result):
+            """
+            We started postgres; we're responsible for stopping it later.
+            Call pgCtl status to get the pid.
+            """
             log.warn("{cmd} exited", cmd=pgCtl)
             self.shouldStopDatabase = True
             d = Deferred()
@@ -465,13 +469,32 @@
             )
             return d.addCallback(gotStatus)
 
-        def reportit(f):
-            log.failure("starting postgres", f)
+        def couldNotStart(f):
+            """
+            There was an error trying to start postgres.  Try to connect
+            because it might already be running.  In this case, we won't
+            be the one to stop it.
+            """
+            d = Deferred()
+            statusMonitor = CapturingProcessProtocol(d, None)
+            self.reactor.spawnProcess(
+                statusMonitor, pgCtl, [pgCtl, "status"],
+                env=self.env, path=self.workingDir.path,
+                uid=self.uid, gid=self.gid,
+            )
+            return d.addCallback(gotStatus).addErrback(giveUp)
+
+        def giveUp(f):
+            """
+            We can't start postgres or connect to a running instance.  Shut
+            down.
+            """
+            log.failure("Can't start or connect to postgres", f)
             self.deactivateDelayedShutdown()
             self.reactor.stop()
-            
+
         self.monitor.completionDeferred.addCallback(
-            gotReady).addErrback(reportit)
+            gotReady).addErrback(couldNotStart)
 
     shouldStopDatabase = False
 
@@ -549,6 +572,7 @@
 #        d.addCallback(maybeStopSubprocess)
 #        return d
 
+
     def hardStop(self):
         """
         Stop postgres quickly by sending it SIGQUIT
@@ -556,5 +580,5 @@
         if self._postgresPid is not None:
             try:
                 os.kill(self._postgresPid, signal.SIGQUIT)
-            except OSError: 
+            except OSError:
                 pass

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/base/datastore/test/test_subpostgres.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/base/datastore/test/test_subpostgres.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/base/datastore/test/test_subpostgres.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -189,5 +189,3 @@
         cursor.execute("select * from import_test_table")
         values = cursor.fetchall()
         self.assertEquals(values, [["value1"], ["value2"]])
-
-

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/base/datastore/util.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/base/datastore/util.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/base/datastore/util.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -92,6 +92,12 @@
         return "objectWithName:%s:%s" % (homeResourceID, name)
 
 
+    # Home child objects by id
+
+    def keyForObjectWithResourceID(self, homeResourceID, resourceID):
+        return "objectWithName:%s:%s" % (homeResourceID, resourceID)
+
+
     # Home metadata (Created/Modified)
 
     def keyForHomeMetaData(self, homeResourceID):

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/file.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/file.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/file.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -128,7 +128,7 @@
     @inlineCallbacks
     def hasCalendarResourceUIDSomewhereElse(self, uid, ok_object, type):
 
-        objectResources = (yield self.objectResourcesWithUID(uid, ("inbox",)))
+        objectResources = (yield self.getCalendarResourcesForUID(uid))
         for objectResource in objectResources:
             if ok_object and objectResource._path == ok_object._path:
                 continue
@@ -140,14 +140,9 @@
 
 
     @inlineCallbacks
-    def getCalendarResourcesForUID(self, uid, allow_shared=False):
+    def getCalendarResourcesForUID(self, uid):
 
-        results = []
-        objectResources = (yield self.objectResourcesWithUID(uid, ("inbox",)))
-        for objectResource in objectResources:
-            if allow_shared or objectResource._parentCollection.owned():
-                results.append(objectResource)
-
+        results = (yield self.objectResourcesWithUID(uid, ("inbox",)))
         returnValue(results)
 
 

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/schedule.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/schedule.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/schedule.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -112,8 +112,8 @@
         return self._calendarHome.hasCalendarResourceUIDSomewhereElse(uid, ok_object, type)
 
 
-    def getCalendarResourcesForUID(self, uid, allow_shared=False):
-        return self._calendarHome.getCalendarResourcesForUID(uid, allow_shared)
+    def getCalendarResourcesForUID(self, uid):
+        return self._calendarHome.getCalendarResourcesForUID(uid)
 
 
 

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/scheduling/imip/inbound.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/scheduling/imip/inbound.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/scheduling/imip/inbound.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -143,6 +143,7 @@
         yield scheduleNextMailPoll(self.store, seconds)
 
 
+
 def shouldDeleteAllMail(serverHostName, inboundServer, username):
     """
     Given the hostname of the calendar server, the hostname of the pop/imap
@@ -165,6 +166,7 @@
     )
 
 
+
 @inlineCallbacks
 def scheduleNextMailPoll(store, seconds):
     txn = store.newTransaction()

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/scheduling/imip/test/test_inbound.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/scheduling/imip/test/test_inbound.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/scheduling/imip/test/test_inbound.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -436,13 +436,13 @@
         self.assertEquals(self.flagDeletedResult, "xyzzy")
 
 
+
 class StubFactory(object):
 
     def __init__(self, actionTaken, deleteAllMail):
         self.actionTaken = actionTaken
         self.deleteAllMail = deleteAllMail
 
+
     def handleMessage(self, messageData):
         return succeed(self.actionTaken)
-
-

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/scheduling/implicit.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/scheduling/implicit.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/scheduling/implicit.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -56,10 +56,10 @@
     STATUS_ORPHANED_CANCELLED_EVENT = 1
     STATUS_ORPHANED_EVENT = 2
 
-    def __init__(self):
+    def __init__(self, logItems=None):
 
         self.return_status = ImplicitScheduler.STATUS_OK
-        self.logItems = {}
+        self.logItems = logItems
         self.allowed_to_schedule = True
         self.suppress_refresh = False
 
@@ -383,7 +383,7 @@
             if self.txn.doing_attendee_refresh == 0:
                 delattr(self.txn, "doing_attendee_refresh")
 
-        if refreshCount:
+        if refreshCount and self.logItems is not None:
             self.logItems["itip.refreshes"] = refreshCount
 
 
@@ -925,7 +925,8 @@
         if self.action in ("create", "modify",):
             total += (yield self.processRequests())
 
-        self.logItems["itip.requests"] = total
+        if self.logItems is not None:
+            self.logItems["itip.requests"] = total
 
 
     @inlineCallbacks
@@ -1304,7 +1305,8 @@
         # First make sure we are allowed to schedule
         self.testSchedulingAllowed()
 
-        self.logItems["itip.reply"] = "reply"
+        if self.logItems is not None:
+            self.logItems["itip.reply"] = "reply"
 
         itipmsg = iTipGenerator.generateAttendeeReply(self.calendar, self.attendee, changedRids=changedRids)
 
@@ -1317,7 +1319,8 @@
         # First make sure we are allowed to schedule
         self.testSchedulingAllowed()
 
-        self.logItems["itip.reply"] = "cancel"
+        if self.logItems is not None:
+            self.logItems["itip.reply"] = "cancel"
 
         itipmsg = iTipGenerator.generateAttendeeReply(self.calendar, self.attendee, force_decline=True)
 

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/scheduling/utils.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/scheduling/utils.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/scheduling/utils.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -21,7 +21,7 @@
 log = Logger()
 
 @inlineCallbacks
-def getCalendarObjectForRecord(txn, record, uid, allow_shared=False):
+def getCalendarObjectForRecord(txn, record, uid):
     """
     Get a copy of the event for a calendar user identified by a directory record.
 
@@ -34,7 +34,7 @@
         calendar_home = yield txn.calendarHomeWithUID(record.uid)
 
         # Get matching newstore objects
-        objectResources = (yield calendar_home.getCalendarResourcesForUID(uid, allow_shared))
+        objectResources = (yield calendar_home.getCalendarResourcesForUID(uid))
 
         if len(objectResources) > 1:
             # Delete all but the first one

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/sql.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/sql.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -527,9 +527,7 @@
         # refer to calendar *object* UIDs, since calendar *resources* are an
         # HTTP protocol layer thing, not a data store thing.  (See also
         # objectResourcesWithUID.)
-        objectResources = (
-            yield self.objectResourcesWithUID(uid, ["inbox"], False)
-        )
+        objectResources = (yield self.getCalendarResourcesForUID(uid))
         for objectResource in objectResources:
             if ok_object and objectResource._resourceID == ok_object._resourceID:
                 continue
@@ -541,15 +539,22 @@
 
 
     @inlineCallbacks
-    def getCalendarResourcesForUID(self, uid, allow_shared=False):
+    def getCalendarResourcesForUID(self, uid):
+        """
+        Find all calendar object resources in the calendar home that are not in the "inbox" collection
+        and not in shared collections.
+        Cache the result of this query as it can happen multiple times during scheduling under slightly
+        different circumstances.
 
-        results = []
-        objectResources = (yield self.objectResourcesWithUID(uid, ["inbox"]))
-        for objectResource in objectResources:
-            if allow_shared or objectResource._parentCollection.owned():
-                results.append(objectResource)
+        @param uid: the UID of the calendar object resources to find
+        @type uid: C{str}
+        """
 
-        returnValue(results)
+        if not hasattr(self, "_cachedCalendarResourcesForUID"):
+            self._cachedCalendarResourcesForUID = {}
+        if uid not in self._cachedCalendarResourcesForUID:
+            self._cachedCalendarResourcesForUID[uid] = (yield self.objectResourcesWithUID(uid, ["inbox"], allowShared=False))
+        returnValue(self._cachedCalendarResourcesForUID[uid])
 
 
     @inlineCallbacks
@@ -1576,10 +1581,6 @@
                 if calsize > config.MaxResourceSize:
                     raise ObjectResourceTooBigError()
 
-        # Possible timezone stripping
-        if config.EnableTimezonesByReference:
-            component.stripKnownTimezones()
-
         # Do validation on external requests
         if internal_state == ComponentUpdateState.NORMAL:
 
@@ -1597,6 +1598,10 @@
             # calendar data
             component.normalizeCalendarUserAddresses(normalizationLookup, self.directoryService().recordWithCalendarUserAddress)
 
+        # Possible timezone stripping
+        if config.EnableTimezonesByReference:
+            component.stripKnownTimezones()
+
         # Check location/resource organizer requirement
         self.validLocationResourceOrganizer(component, inserting, internal_state)
 
@@ -1953,7 +1958,7 @@
                 user_uuid = self._parentCollection.viewerHome().uid()
                 component = PerUserDataFilter(user_uuid).filter(component.duplicate())
 
-            scheduler = ImplicitScheduler()
+            scheduler = ImplicitScheduler(logItems=self._txn.logItems)
 
             # PUT
             do_implicit_action, is_scheduling_resource = (yield scheduler.testImplicitSchedulingPUT(
@@ -2610,7 +2615,7 @@
         if not isinbox and internal_state == ComponentRemoveState.NORMAL:
             # Get data we need for implicit scheduling
             calendar = (yield self.componentForUser())
-            scheduler = ImplicitScheduler()
+            scheduler = ImplicitScheduler(logItems=self._txn.logItems)
             do_implicit_action, _ignore = (yield scheduler.testImplicitSchedulingDELETE(
                 self.calendar(),
                 self,
@@ -2929,7 +2934,7 @@
 
         # Only allow organizers to manipulate managed attachments for now
         calendar = (yield self.componentForUser())
-        scheduler = ImplicitScheduler()
+        scheduler = ImplicitScheduler(logItems=self._txn.logItems)
         is_attendee = (yield scheduler.testAttendeeEvent(self.calendar(), self, calendar,))
         if is_attendee:
             raise InvalidAttachmentOperation("Attendees are not allowed to manipulate managed attachments")

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/test/common.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/test/common.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/test/common.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -84,73 +84,75 @@
 
 OTHER_HOME_UID = "home_splits"
 
-test_event_text = (
-    "BEGIN:VCALENDAR\r\n"
-      "VERSION:2.0\r\n"
-      "PRODID:-//Apple Inc.//iCal 4.0.1//EN\r\n"
-      "CALSCALE:GREGORIAN\r\n"
-      "BEGIN:VTIMEZONE\r\n"
-        "TZID:US/Pacific\r\n"
-        "BEGIN:DAYLIGHT\r\n"
-          "TZOFFSETFROM:-0800\r\n"
-          "RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU\r\n"
-          "DTSTART:20070311T020000\r\n"
-          "TZNAME:PDT\r\n"
-          "TZOFFSETTO:-0700\r\n"
-        "END:DAYLIGHT\r\n"
-        "BEGIN:STANDARD\r\n"
-          "TZOFFSETFROM:-0700\r\n"
-          "RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU\r\n"
-          "DTSTART:20071104T020000\r\n"
-          "TZNAME:PST\r\n"
-          "TZOFFSETTO:-0800\r\n"
-        "END:STANDARD\r\n"
-      "END:VTIMEZONE\r\n"
-      "BEGIN:VEVENT\r\n"
-        "CREATED:20100203T013849Z\r\n"
-        "UID:uid-test\r\n"
-        "DTEND;TZID=US/Pacific:20100207T173000\r\n"
-        "TRANSP:OPAQUE\r\n"
-        "SUMMARY:New Event\r\n"
-        "DTSTART;TZID=US/Pacific:20100207T170000\r\n"
-        "DTSTAMP:20100203T013909Z\r\n"
-        "SEQUENCE:3\r\n"
-        "X-APPLE-DROPBOX:/calendars/users/wsanchez/dropbox/uid-test.dropbox\r\n"
-        "BEGIN:VALARM\r\n"
-          "X-WR-ALARMUID:1377CCC7-F85C-4610-8583-9513D4B364E1\r\n"
-          "TRIGGER:-PT20M\r\n"
-          "ATTACH:Basso\r\n"
-          "ACTION:AUDIO\r\n"
-        "END:VALARM\r\n"
-      "END:VEVENT\r\n"
-    "END:VCALENDAR\r\n"
-)
+test_event_text = """BEGIN:VCALENDAR
+VERSION:2.0
+PRODID:-//Apple Inc.//iCal 4.0.1//EN
+CALSCALE:GREGORIAN
+BEGIN:VTIMEZONE
+TZID:US/Pacific
+BEGIN:DAYLIGHT
+TZOFFSETFROM:-0800
+RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU
+DTSTART:20070311T020000
+TZNAME:PDT
+TZOFFSETTO:-0700
+END:DAYLIGHT
+BEGIN:STANDARD
+TZOFFSETFROM:-0700
+RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU
+DTSTART:20071104T020000
+TZNAME:PST
+TZOFFSETTO:-0800
+END:STANDARD
+END:VTIMEZONE
+BEGIN:VEVENT
+CREATED:20100203T013849Z
+UID:uid-test
+DTEND;TZID=US/Pacific:20100207T173000
+TRANSP:OPAQUE
+SUMMARY:New Event
+DTSTART;TZID=US/Pacific:20100207T170000
+DTSTAMP:20100203T013909Z
+SEQUENCE:3
+X-APPLE-DROPBOX:/calendars/users/wsanchez/dropbox/uid-test.dropbox
+BEGIN:VALARM
+X-WR-ALARMUID:1377CCC7-F85C-4610-8583-9513D4B364E1
+TRIGGER:-PT20M
+ATTACH:Basso
+ACTION:AUDIO
+END:VALARM
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n")
 
 
 
-test_event_notCalDAV_text = (
-    "BEGIN:VCALENDAR\r\n"
-      "VERSION:2.0\r\n"
-      "PRODID:-//Apple Inc.//iCal 4.0.1//EN\r\n"
-      "CALSCALE:GREGORIAN\r\n"
-      "BEGIN:VEVENT\r\n"
-        "CREATED:20100203T013849Z\r\n"
-        "UID:test\r\n"
-        "DTEND;TZID=US/Pacific:20100207T173000\r\n" # TZID without VTIMEZONE
-        "TRANSP:OPAQUE\r\n"
-        "SUMMARY:New Event\r\n"
-        "DTSTART;TZID=US/Pacific:20100207T170000\r\n"
-        "DTSTAMP:20100203T013909Z\r\n"
-        "SEQUENCE:3\r\n"
-        "BEGIN:VALARM\r\n"
-          "X-WR-ALARMUID:1377CCC7-F85C-4610-8583-9513D4B364E1\r\n"
-          "TRIGGER:-PT20M\r\n"
-          "ATTACH:Basso\r\n"
-          "ACTION:AUDIO\r\n"
-        "END:VALARM\r\n"
-      "END:VEVENT\r\n"
-    "END:VCALENDAR\r\n"
-)
+test_event_notCalDAV_text = """BEGIN:VCALENDAR
+VERSION:2.0
+PRODID:-//Apple Inc.//iCal 4.0.1//EN
+CALSCALE:GREGORIAN
+BEGIN:VEVENT
+CREATED:20100203T013849Z
+UID:test-bad1
+DTEND:20100207T173000Z
+TRANSP:OPAQUE
+SUMMARY:New Event
+DTSTART:20100207T170000Z
+DTSTAMP:20100203T013909Z
+SEQUENCE:3
+END:VEVENT
+BEGIN:VEVENT
+CREATED:20100203T013849Z
+UID:test-bad2
+DTEND:20100207T173000Z
+TRANSP:OPAQUE
+SUMMARY:New Event
+DTSTART:20100207T170000Z
+DTSTAMP:20100203T013909Z
+SEQUENCE:3
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n")
 
 
 
@@ -450,9 +452,7 @@
         yield notifications.writeNotificationObject("abc", inviteNotification,
             inviteNotification.toxml())
 
-        yield self.commit()
-
-        # Make sure notification fired after commit
+        # notify is called prior to commit
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -460,6 +460,7 @@
                 "/CalDAV/example.com/home1/notification/",
             ])
         )
+        yield self.commit()
 
         notifications = yield self.transactionUnderTest().notificationsWithUID(
             "home1"
@@ -469,9 +470,7 @@
         abc = yield notifications.notificationObjectWithUID("abc")
         self.assertEquals(abc, None)
 
-        yield self.commit()
-
-        # Make sure notification fired after commit
+        # notify is called prior to commit
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -479,6 +478,7 @@
                 "/CalDAV/example.com/home1/notification/",
             ])
         )
+        yield self.commit()
 
 
     @inlineCallbacks
@@ -697,11 +697,10 @@
         self.assertNotIdentical((yield home.calendarWithName(name)), None)
         calendarProperties = (yield home.calendarWithName(name)).properties()
         self.assertEqual(len(calendarProperties), 0)
+        # notify is called prior to commit
+        self.assertTrue("/CalDAV/example.com/home1/" in self.notifierFactory.history)
         yield self.commit()
 
-        # Make sure notification fired after commit
-        self.assertTrue("/CalDAV/example.com/home1/" in self.notifierFactory.history)
-
         # Make sure it's available in a new transaction; i.e. test the commit.
         home = yield self.homeUnderTest()
         self.assertNotIdentical((yield home.calendarWithName(name)), None)
@@ -915,8 +914,7 @@
                 None
             )
 
-        # Make sure notifications are fired after commit
-        yield self.commit()
+        # notify is called prior to commit
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -924,6 +922,7 @@
                 "/CalDAV/example.com/home1/calendar_1/",
             ])
         )
+        yield self.commit()
 
 
     @inlineCallbacks
@@ -1471,9 +1470,7 @@
         self.assertEquals((yield calendarObject.componentForUser()), component)
         self.assertEquals((yield calendarObject.getMetadata()), metadata)
 
-        yield self.commit()
-
-        # Make sure notifications fire after commit
+        # notify is called prior to commit
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -1481,6 +1478,7 @@
                 "/CalDAV/example.com/home1/calendar_1/",
             ])
         )
+        yield self.commit()
 
 
     @inlineCallbacks
@@ -1591,9 +1589,7 @@
         calendarObject = yield calendar1.calendarObjectWithName("1.ics")
         self.assertEquals((yield calendarObject.componentForUser()), component)
 
-        yield self.commit()
-
-        # Make sure notification fired after commit
+        # notify is called prior to commit
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -1601,6 +1597,7 @@
                 "/CalDAV/example.com/home1/calendar_1/",
             ])
         )
+        yield self.commit()
 
 
     def checkPropertiesMethod(self, thunk):

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/test/test_util.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/test/test_util.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/caldav/datastore/test/test_util.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -525,16 +525,25 @@
                 "different-name": self.sampleEvent("other-uid", "tgt other"),
             },
         )
+
         txn = self.transactionUnderTest()
-        c1 = yield txn.calendarHomeWithUID("conflict1")
         c2 = yield txn.calendarHomeWithUID("conflict2")
         otherCal = yield c2.createCalendarWithName("othercal")
-        otherCal.createCalendarObjectWithName(
+        yield otherCal.createCalendarObjectWithName(
             "some-name", Component.fromString(
                 self.sampleEvent("oc", "target calendar")[0]
             )
         )
+        yield self.commit()
+
+        txn = self.transactionUnderTest()
+        c1 = yield txn.calendarHomeWithUID("conflict1")
+        c2 = yield txn.calendarHomeWithUID("conflict2")
         yield migrateHome(c1, c2, merge=True)
+        yield self.commit()
+
+        txn = self.transactionUnderTest()
+        c2 = yield txn.calendarHomeWithUID("conflict2")
         targetCal = yield c2.calendarWithName("conflicted")
         yield self.checkSummary("same-name", "target", targetCal)
         yield self.checkSummary("different-name", "tgt other", targetCal)

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/carddav/datastore/test/common.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/carddav/datastore/test/common.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/carddav/datastore/test/common.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -371,11 +371,10 @@
         #self.assertIdentical((yield home.addressbookWithName(name)), None)
         yield home.removeAddressBookWithName(name)
         self.assertNotIdentical((yield home.addressbookWithName(name)), None)
+        # notify is called prior to commit
+        self.assertTrue("/CardDAV/example.com/home1/" in self.notifierFactory.history)
         yield self.commit()
 
-        # Make sure notification fired after commit
-        self.assertTrue("/CardDAV/example.com/home1/" in self.notifierFactory.history)
-
         # Make sure it's available in a new transaction; i.e. test the commit.
         home = yield self.homeUnderTest()
         self.assertNotIdentical((yield home.addressbookWithName(name)), None)
@@ -396,9 +395,7 @@
             ab = yield home.addressbookWithName(name)
             self.assertEquals((yield ab.listAddressBookObjects()), [])
 
-        yield self.commit()
-
-        # Make sure notification fired after commit
+        # notify is called prior to commit
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -407,7 +404,9 @@
             ])
         )
 
+        yield self.commit()
 
+
     @inlineCallbacks
     def test_removeAddressBookWithName_absent(self):
         """
@@ -530,8 +529,6 @@
                 (yield addressbook.addressbookObjectWithName(name)), None
             )
 
-        # Make sure notifications are fired after commit
-        yield self.commit()
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -692,9 +689,7 @@
         addressbookObject = yield addressbook1.addressbookObjectWithName(name)
         self.assertEquals((yield addressbookObject.component()), component)
 
-        yield self.commit()
-
-        # Make sure notifications fire after commit
+        # notify is called prior to commit
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -703,7 +698,9 @@
             ])
         )
 
+        yield self.commit()
 
+
     @inlineCallbacks
     def test_createAddressBookObjectWithName_exists(self):
         """
@@ -808,9 +805,7 @@
         addressbookObject = yield addressbook1.addressbookObjectWithName("1.vcf")
         self.assertEquals((yield addressbookObject.component()), component)
 
-        yield self.commit()
-
-        # Make sure notification fired after commit
+        # notify is called prior to commit
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -819,7 +814,9 @@
             ])
         )
 
+        yield self.commit()
 
+
     def checkPropertiesMethod(self, thunk):
         """
         Verify that the given object has a properties method that returns an

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/file.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/file.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/file.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -926,6 +926,7 @@
         return (self._notifierPrefix, self.uid(),)
 
 
+    @inlineCallbacks
     def notifyChanged(self):
         """
         Trigger a notification of a change
@@ -933,8 +934,14 @@
 
         # Only send one set of change notifications per transaction
         if self._notifiers and not self._transaction.isNotifiedAlready(self):
-            for notifier in self._notifiers.values():
+            # cache notifiers run in post commit
+            notifier = self._notifiers.get("cache", None)
+            if notifier:
                 self._transaction.postCommit(notifier.notify)
+            # push notifiers add their work items immediately
+            notifier = self._notifiers.get("push", None)
+            if notifier:
+                yield notifier.notify(self._transaction)
             self._transaction.notificationAddedForObject(self)
 
 
@@ -1272,6 +1279,7 @@
         return self.ownerHome().notifierID()
 
 
+    @inlineCallbacks
     def notifyChanged(self):
         """
         Trigger a notification of a change
@@ -1279,8 +1287,14 @@
 
         # Only send one set of change notifications per transaction
         if self._notifiers and not self._transaction.isNotifiedAlready(self):
-            for notifier in self._notifiers.values():
+            # cache notifiers run in post commit
+            notifier = self._notifiers.get("cache", None)
+            if notifier:
                 self._transaction.postCommit(notifier.notify)
+            # push notifiers add their work items immediately
+            notifier = self._notifiers.get("push", None)
+            if notifier:
+                yield notifier.notify(self._transaction)
             self._transaction.notificationAddedForObject(self)
 
 

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/sql.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/sql.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -29,9 +29,10 @@
 
 from pycalendar.datetime import PyCalendarDateTime
 
-from twext.enterprise.dal.syntax import \
-    Delete, utcNowSQL, Union, Insert, Len, Max, Parameter, SavepointAction, \
-    Select, Update, ColumnSyntax, TableSyntax, Upper, Count, ALL_COLUMNS, Sum
+from twext.enterprise.dal.syntax import (
+    Delete, utcNowSQL, Union, Insert, Len, Max, Parameter, SavepointAction,
+    Select, Update, ColumnSyntax, TableSyntax, Upper, Count, ALL_COLUMNS, Sum,
+    DatabaseLock, DatabaseUnlock)
 from twext.enterprise.ienterprise import AlreadyFinishedError
 from twext.enterprise.queue import LocalQueuer
 from twext.enterprise.util import parseSQLTimestamp
@@ -315,6 +316,7 @@
         self.label = label
         self.logFileName = logFileName
         self.statements = []
+        self.startTime = time.time()
 
 
     def startStatement(self, sql, args):
@@ -330,7 +332,7 @@
         """
         args = ["%s" % (arg,) for arg in args]
         args = [((arg[:10] + "...") if len(arg) > 40 else arg) for arg in args]
-        self.statements.append(["%s %s" % (sql, args,), 0, 0])
+        self.statements.append(["%s %s" % (sql, args,), 0, 0, 0])
         return len(self.statements) - 1, time.time()
 
 
@@ -344,8 +346,10 @@
         @type rows: C{int}
         """
         index, tstamp = context
+        t = time.time()
         self.statements[index][1] = len(rows) if rows else 0
-        self.statements[index][2] = time.time() - tstamp
+        self.statements[index][2] = t - tstamp
+        self.statements[index][3] = t
 
 
     def printReport(self):
@@ -353,19 +357,28 @@
         Print a report of all the SQL statements executed to date.
         """
 
+        total_statements = len(self.statements)
+        total_rows = sum([statement[1] for statement in self.statements])
+        total_time = sum([statement[2] for statement in self.statements]) * 1000.0
+
         toFile = StringIO()
         toFile.write("*** SQL Stats ***\n")
         toFile.write("\n")
         toFile.write("Label: %s\n" % (self.label,))
         toFile.write("Unique statements: %d\n" % (len(set([statement[0] for statement in self.statements]),),))
-        toFile.write("Total statements: %d\n" % (len(self.statements),))
-        toFile.write("Total rows: %d\n" % (sum([statement[1] for statement in self.statements]),))
-        toFile.write("Total time (ms): %.3f\n" % (sum([statement[2] for statement in self.statements]) * 1000.0,))
-        for sql, rows, t in self.statements:
+        toFile.write("Total statements: %d\n" % (total_statements,))
+        toFile.write("Total rows: %d\n" % (total_rows,))
+        toFile.write("Total time (ms): %.3f\n" % (total_time,))
+        t_last_end = self.startTime
+        for sql, rows, t_taken, t_end in self.statements:
             toFile.write("\n")
             toFile.write("SQL: %s\n" % (sql,))
             toFile.write("Rows: %s\n" % (rows,))
-            toFile.write("Time (ms): %.3f\n" % (t * 1000.0,))
+            toFile.write("Time (ms): %.3f\n" % (t_taken * 1000.0,))
+            toFile.write("Idle (ms): %.3f\n" % ((t_end - t_taken - t_last_end) * 1000.0,))
+            toFile.write("Elapsed (ms): %.3f\n" % ((t_end - self.startTime) * 1000.0,))
+            t_last_end = t_end
+        toFile.write("Commit (ms): %.3f\n" % ((time.time() - t_last_end) * 1000.0,))
         toFile.write("***\n\n")
 
         if self.logFileName:
@@ -373,8 +386,10 @@
         else:
             log.error(toFile.getvalue())
 
+        return (total_statements, total_rows, total_time,)
 
 
+
 class CommonStoreTransactionMonitor(object):
     """
     Object that monitors the state of a transaction over time and logs or times out
@@ -484,7 +499,9 @@
         self.iudCount = 0
         self.currentStatement = None
 
+        self.logItems = {}
 
+
     def enqueue(self, workItem, **kw):
         """
         Enqueue a L{twext.enterprise.queue.WorkItem} for later execution.
@@ -1033,7 +1050,7 @@
 
         # Do stats logging as a postCommit because there might be some pending preCommit SQL we want to log
         if self._stats:
-            self.postCommit(self._stats.printReport)
+            self.postCommit(self.statsReport)
         return self._sqlTxn.commit()
 
 
@@ -1044,6 +1061,16 @@
         return self._sqlTxn.abort()
 
 
+    def statsReport(self):
+        """
+        Print the stats report and record log items
+        """
+        sql_statements, sql_rows, sql_time = self._stats.printReport()
+        self.logItems["sql-s"] = str(sql_statements)
+        self.logItems["sql-r"] = str(sql_rows)
+        self.logItems["sql-t"] = "%.1f" % (sql_time,)
+
+
     def _oldEventsBase(self, limit):
         ch = schema.CALENDAR_HOME
         co = schema.CALENDAR_OBJECT
@@ -1376,11 +1403,11 @@
 
 
     def acquireUpgradeLock(self):
-        return self.execSQL("select pg_advisory_lock(1)")
+        return DatabaseLock().on(self)
 
 
     def releaseUpgradeLock(self):
-        return self.execSQL("select pg_advisory_unlock(1)")
+        return DatabaseUnlock().on(self)
 
 
 
@@ -1418,6 +1445,7 @@
         self._txn = transaction
         self._ownerUID = ownerUID
         self._resourceID = None
+        self._dataVersion = None
         self._childrenLoaded = False
         self._children = {}
         self._notifiers = None
@@ -1663,6 +1691,23 @@
             yield queryCacher.invalidateAfterCommit(self._txn, cacheKey)
 
 
+    @classproperty
+    def _dataVersionQuery(cls): #@NoSelf
+        ch = cls._homeSchema
+        return Select(
+            [ch.DATAVERSION], From=ch,
+            Where=ch.RESOURCE_ID == Parameter("resourceID")
+        )
+
+
+    @inlineCallbacks
+    def dataVersion(self):
+        if self._dataVersion is None:
+            self._dataVersion = (yield self._dataVersionQuery.on(
+                self._txn, resourceID=self._resourceID))[0][0]
+        returnValue(self._dataVersion)
+
+
     def name(self):
         """
         Implement L{IDataStoreObject.name} to return the uid.
@@ -2164,6 +2209,7 @@
         the resource has changed.  We ensure we only do this once per object
         per transaction.
         """
+
         if self._txn.isNotifiedAlready(self):
             returnValue(None)
         self._txn.notificationAddedForObject(self)
@@ -2174,8 +2220,14 @@
 
         # Send notifications
         if self._notifiers:
-            for notifier in self._notifiers.values():
+            # cache notifiers run in post commit
+            notifier = self._notifiers.get("cache", None)
+            if notifier:
                 self._txn.postCommit(notifier.notify)
+            # push notifiers add their work items immediately
+            notifier = self._notifiers.get("push", None)
+            if notifier:
+                yield notifier.notify(self._txn)
 
 
     @classproperty
@@ -2289,16 +2341,20 @@
         raise NotImplementedError()
 
 
-    @classproperty
-    def _objectNamesSinceRevisionQuery(cls): #@NoSelf
+    @classmethod
+    def _objectNamesSinceRevisionQuery(cls, deleted=True): #@NoSelf
         """
         DAL query for (resource, deleted-flag)
         """
         rev = cls._revisionsSchema
-        return Select([rev.RESOURCE_NAME, rev.DELETED],
-                      From=rev,
-                      Where=(rev.REVISION > Parameter("revision")).And(
-                          rev.RESOURCE_ID == Parameter("resourceID")))
+        where = (rev.REVISION > Parameter("revision")).And(rev.RESOURCE_ID == Parameter("resourceID"))
+        if not deleted:
+            where = where.And(rev.DELETED == False)
+        return Select(
+            [rev.RESOURCE_NAME, rev.DELETED],
+            From=rev,
+            Where=where,
+        )
 
 
     def resourceNamesSinceToken(self, token):
@@ -2323,10 +2379,10 @@
         """
 
         results = [
-            (name if name else "", deleted)
-            for name, deleted in
-            (yield self._objectNamesSinceRevisionQuery.on(
-                self._txn, revision=revision, resourceID=self._resourceID))
+            (name if name else "", deleted) for name, deleted in
+                (yield self._objectNamesSinceRevisionQuery(deleted=(revision != 0)).on(
+                    self._txn, revision=revision, resourceID=self._resourceID)
+                )
         ]
         results.sort(key=lambda x: x[1])
 
@@ -2989,7 +3045,9 @@
             queryCacher = self._txn._queryCacher
             if queryCacher:
                 cacheKey = queryCacher.keyForObjectWithName(shareeView._home._resourceID, shareeView._name)
-                queryCacher.invalidateAfterCommit(self._txn, cacheKey)
+                yield queryCacher.invalidateAfterCommit(self._txn, cacheKey)
+                cacheKey = queryCacher.keyForObjectWithResourceID(shareeView._home._resourceID, shareeView._resourceID)
+                yield queryCacher.invalidateAfterCommit(self._txn, cacheKey)
 
             shareeView._name = sharedname[0][0]
 
@@ -3047,7 +3105,9 @@
             queryCacher = self._txn._queryCacher
             if queryCacher:
                 cacheKey = queryCacher.keyForObjectWithName(shareeHome._resourceID, shareeChild._name)
-                queryCacher.invalidateAfterCommit(self._txn, cacheKey)
+                yield queryCacher.invalidateAfterCommit(self._txn, cacheKey)
+                cacheKey = queryCacher.keyForObjectWithResourceID(shareeHome._resourceID, shareeChild._resourceID)
+                yield queryCacher.invalidateAfterCommit(self._txn, cacheKey)
         else:
             deletedBindName = None
 
@@ -3313,10 +3373,9 @@
     def invalidateQueryCache(self):
         queryCacher = self._txn._queryCacher
         if queryCacher is not None:
-            cacheKey = queryCacher.keyForHomeChildMetaData(self._resourceID)
-            yield queryCacher.invalidateAfterCommit(self._txn, cacheKey)
-            cacheKey = queryCacher.keyForObjectWithName(self._home._resourceID, self._name)
-            yield queryCacher.invalidateAfterCommit(self._txn, cacheKey)
+            yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForHomeChildMetaData(self._resourceID))
+            yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForObjectWithName(self._home._resourceID, self._name))
+            yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForObjectWithResourceID(self._home._resourceID, self._resourceID))
 
 
 
@@ -3493,6 +3552,7 @@
             if rows and queryCacher:
                 # Cache the result
                 queryCacher.setAfterCommit(home._txn, cacheKey, rows)
+                queryCacher.setAfterCommit(home._txn, queryCacher.keyForObjectWithResourceID(home._resourceID, rows[0][2]), rows)
 
         if not rows:
             returnValue(None)
@@ -3533,8 +3593,24 @@
         @return: an L{CommonHomeChild} or C{None} if no such child
             exists.
         """
-        rows = yield cls._bindForResourceIDAndHomeID.on(
-            home._txn, resourceID=resourceID, homeID=home._resourceID)
+
+        rows = None
+        queryCacher = home._txn._queryCacher
+
+        if queryCacher:
+            # Retrieve data from cache
+            cacheKey = queryCacher.keyForObjectWithResourceID(home._resourceID, resourceID)
+            rows = yield queryCacher.get(cacheKey)
+
+        if rows is None:
+            # No cached copy
+            rows = yield cls._bindForResourceIDAndHomeID.on(home._txn, resourceID=resourceID, homeID=home._resourceID)
+
+            if rows and queryCacher:
+                # Cache the result (under both the ID and name values)
+                queryCacher.setAfterCommit(home._txn, cacheKey, rows)
+                queryCacher.setAfterCommit(home._txn, queryCacher.keyForObjectWithName(home._resourceID, rows[0][3]), rows)
+
         if not rows:
             returnValue(None)
 
@@ -3715,6 +3791,8 @@
         if queryCacher:
             cacheKey = queryCacher.keyForObjectWithName(self._home._resourceID, oldName)
             yield queryCacher.invalidateAfterCommit(self._home._txn, cacheKey)
+            cacheKey = queryCacher.keyForObjectWithResourceID(self._home._resourceID, self._resourceID)
+            yield queryCacher.invalidateAfterCommit(self._home._txn, cacheKey)
 
         yield self._renameQuery.on(self._txn, name=name,
                                    resourceID=self._resourceID,
@@ -3748,6 +3826,8 @@
         if queryCacher:
             cacheKey = queryCacher.keyForObjectWithName(self._home._resourceID, self._name)
             yield queryCacher.invalidateAfterCommit(self._home._txn, cacheKey)
+            cacheKey = queryCacher.keyForObjectWithResourceID(self._home._resourceID, self._resourceID)
+            yield queryCacher.invalidateAfterCommit(self._home._txn, cacheKey)
 
         yield self._deletedSyncToken()
         yield self._deleteQuery.on(self._txn, NoSuchHomeChildError,
@@ -4299,8 +4379,14 @@
 
         # Send notifications
         if self._notifiers:
-            for notifier in self._notifiers.values():
+            # cache notifiers run in post commit
+            notifier = self._notifiers.get("cache", None)
+            if notifier:
                 self._txn.postCommit(notifier.notify)
+            # push notifiers add their work items immediately
+            notifier = self._notifiers.get("push", None)
+            if notifier:
+                yield notifier.notify(self._txn)
 
 
     @classproperty
@@ -4523,7 +4609,7 @@
     @inlineCallbacks
     def create(cls, parent, name, component, options=None):
 
-        child = (yield cls.objectWithName(parent, name, None))
+        child = (yield parent.objectResourceWithName(name))
         if child:
             raise ObjectResourceNameAlreadyExistsError(name)
 
@@ -5120,15 +5206,21 @@
         the resource has changed.  We ensure we only do this once per object
         per transaction.
         """
-        yield
         if self._txn.isNotifiedAlready(self):
             returnValue(None)
         self._txn.notificationAddedForObject(self)
 
         # Send notifications
         if self._notifiers:
-            for notifier in self._notifiers.values():
+            # cache notifiers run in post commit
+            notifier = self._notifiers.get("cache", None)
+            if notifier:
                 self._txn.postCommit(notifier.notify)
+            # push notifiers add their work items immediately
+            notifier = self._notifiers.get("push", None)
+            if notifier:
+                yield notifier.notify(self._txn)
+
         returnValue(None)
 
 

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_19_to_20.sql
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_19_to_20.sql	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_19_to_20.sql	2013-10-31 22:23:31 UTC (rev 11861)
@@ -113,8 +113,12 @@
 -- Alter  ADDRESSBOOK_HOME --
 -----------------------------
 
+-- This is tricky as we have to create a new not null column and populate it, but we can't do
+-- not null immediately without a default - which we do not want. So we create the column without not null,
+-- do the updates, then add the constraint.
+
 alter table ADDRESSBOOK_HOME
-	add ("ADDRESSBOOK_PROPERTY_STORE_ID" integer not null);
+	add ("ADDRESSBOOK_PROPERTY_STORE_ID" integer);
 
 update ADDRESSBOOK_HOME
 	set	ADDRESSBOOK_PROPERTY_STORE_ID = (
@@ -133,8 +137,11 @@
 			ADDRESSBOOK_BIND.BIND_MODE = 0 and 	-- CALENDAR_BIND_MODE 'own'
 			ADDRESSBOOK_BIND.ADDRESSBOOK_RESOURCE_NAME = 'addressbook'
   	);
-	
 
+alter table ADDRESSBOOK_HOME
+	modify ("ADDRESSBOOK_PROPERTY_STORE_ID" not null);
+
+
 --------------------------------
 -- change  ADDRESSBOOK_OBJECT --
 --------------------------------

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_13_to_14.sql
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_13_to_14.sql	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_13_to_14.sql	2013-10-31 22:23:31 UTC (rev 11861)
@@ -26,6 +26,11 @@
  drop column SEEN_BY_OWNER;
 alter table CALENDAR_BIND
  drop column SEEN_BY_SHAREE;
+
+-- Don't allow nulls in the column we are about to constrain
+update CALENDAR_BIND
+	set CALENDAR_RESOURCE_NAME = 'Shared_' || CALENDAR_RESOURCE_ID || '_' || CALENDAR_HOME_RESOURCE_ID
+	where CALENDAR_RESOURCE_NAME is null;
 alter table CALENDAR_BIND
  alter column CALENDAR_RESOURCE_NAME 
   set not null;
@@ -34,6 +39,11 @@
  drop column SEEN_BY_OWNER;
 alter table ADDRESSBOOK_BIND
  drop column SEEN_BY_SHAREE;
+
+-- Don't allow nulls in the column we are about to constrain
+update ADDRESSBOOK_BIND
+	set ADDRESSBOOK_RESOURCE_NAME = 'Shared_' || ADDRESSBOOK_RESOURCE_ID || '_' || ADDRESSBOOK_HOME_RESOURCE_ID
+	where ADDRESSBOOK_RESOURCE_NAME is null;
 alter table ADDRESSBOOK_BIND
  alter column ADDRESSBOOK_RESOURCE_NAME
   set not null;

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/test/util.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/test/util.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/test/util.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -726,7 +726,7 @@
         return "/%s/%s/%s/" % (prefix, self.hostname, id)
 
 
-    def send(self, prefix, id):
+    def send(self, prefix, id, txn):
         self.history.append(self.pushKeyForId(prefix, id))
 
 

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/test/test_upgrade.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/test/test_upgrade.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/test/test_upgrade.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -25,8 +25,8 @@
 from twisted.trial.unittest import TestCase
 from txdav.common.datastore.sql_dump import dumpSchema
 from txdav.common.datastore.test.util import theStoreBuilder, StubNotifierFactory
-from txdav.common.datastore.upgrade.sql.upgrade import UpgradeDatabaseSchemaStep, \
-    UpgradeDatabaseAddressBookDataStep, UpgradeDatabaseCalendarDataStep
+from txdav.common.datastore.upgrade.sql.upgrade import (
+    UpgradeDatabaseSchemaStep, UpgradeDatabaseAddressBookDataStep, UpgradeDatabaseCalendarDataStep, NotAllowedToUpgrade)
 import re
 
 class SchemaUpgradeTests(TestCase):
@@ -215,12 +215,12 @@
         old_version = yield _loadVersion()
         try:
             yield upgrader.databaseUpgrade()
-        except RuntimeError:
+        except NotAllowedToUpgrade:
             pass
         except Exception:
-            self.fail("RuntimeError not raised")
+            self.fail("NotAllowedToUpgrade not raised")
         else:
-            self.fail("RuntimeError not raised")
+            self.fail("NotAllowedToUpgrade not raised")
         new_version = yield _loadVersion()
         yield _unloadOldSchema()
 

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrade.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrade.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrade.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -74,11 +74,15 @@
         yield sqlTxn.commit()
 
 
-    def stepWithFailure(self, failure):
-        return self.stepWithResult(None)
 
+class NotAllowedToUpgrade(Exception):
+    """
+    Exception indicating an upgrade is needed but we're not configured to
+    perform it.
+    """
 
 
+
 class UpgradeDatabaseCoreStep(object):
     """
     Base class for either schema or data upgrades on the database.
@@ -136,8 +140,7 @@
             self.log.error(msg)
             raise RuntimeError(msg)
         elif self.failIfUpgradeNeeded:
-                # TODO: change this exception to be upgrade-specific
-            raise RuntimeError("Database upgrade is needed but not allowed.")
+            raise NotAllowedToUpgrade()
         else:
             self.sqlStore.setUpgrading(True)
             yield self.upgradeVersion(actual_version, required_version, dialect)

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/addressbook_upgrade_from_1_to_2.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/addressbook_upgrade_from_1_to_2.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/addressbook_upgrade_from_1_to_2.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -22,7 +22,8 @@
 from txdav.base.propertystore.base import PropertyName
 from txdav.common.datastore.sql_tables import _ABO_KIND_GROUP, schema
 from txdav.common.datastore.upgrade.sql.upgrades.util import updateAddressBookDataVersion, \
-    doToEachHomeNotAtVersion, removeProperty, cleanPropertyStore
+    doToEachHomeNotAtVersion, removeProperty, cleanPropertyStore, \
+    logUpgradeStatus
 from txdav.xml import element
 
 """
@@ -73,14 +74,20 @@
                 #update rest
                 yield abObject.setComponent(component)
 
+    logUpgradeStatus("Starting Addressbook Populate Members")
+
     # Do this to each calendar home not already at version 2
-    yield doToEachHomeNotAtVersion(sqlStore, schema.ADDRESSBOOK_HOME, UPGRADE_TO_VERSION, doIt)
+    yield doToEachHomeNotAtVersion(sqlStore, schema.ADDRESSBOOK_HOME, UPGRADE_TO_VERSION, doIt, "Populate Members")
 
 
 
 @inlineCallbacks
 def removeResourceType(sqlStore):
+    logUpgradeStatus("Starting Addressbook Remove Resource Type")
+
     sqlTxn = sqlStore.newTransaction()
     yield removeProperty(sqlTxn, PropertyName.fromElement(element.ResourceType))
     yield sqlTxn.commit()
     yield cleanPropertyStore()
+
+    logUpgradeStatus("End Addressbook Remove Resource Type")

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_1_to_2.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_1_to_2.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_1_to_2.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -16,12 +16,16 @@
 ##
 
 from twext.enterprise.dal.syntax import Update
-from txdav.xml.parser import WebDAVDocument
+
 from twisted.internet.defer import inlineCallbacks
+
 from twistedcaldav import caldavxml
+
 from txdav.common.datastore.sql_tables import schema
 from txdav.common.datastore.upgrade.sql.upgrades.util import rowsForProperty,\
-    removeProperty, updateCalendarDataVersion, doToEachHomeNotAtVersion
+    removeProperty, updateCalendarDataVersion, doToEachHomeNotAtVersion, \
+    logUpgradeStatus, logUpgradeError
+from txdav.xml.parser import WebDAVDocument
 
 """
 Calendar data upgrade from database version 1 to 2
@@ -50,9 +54,14 @@
     extracting the new format value from the XML property.
     """
 
+    logUpgradeStatus("Starting Move supported-component-set")
+
     sqlTxn = sqlStore.newTransaction()
     try:
+        calendar_rid = None
         rows = (yield rowsForProperty(sqlTxn, caldavxml.SupportedCalendarComponentSet))
+        total = len(rows)
+        count = 0
         for calendar_rid, value in rows:
             prop = WebDAVDocument.fromString(value).root_element
             supported_components = ",".join(sorted([comp.attributes["name"].upper() for comp in prop.children]))
@@ -63,11 +72,19 @@
                 },
                 Where=(meta.RESOURCE_ID == calendar_rid)
             ).on(sqlTxn)
+            count += 1
+            logUpgradeStatus("Move supported-component-set", count, total)
 
         yield removeProperty(sqlTxn, caldavxml.SupportedCalendarComponentSet)
         yield sqlTxn.commit()
+
+        logUpgradeStatus("End Move supported-component-set")
     except RuntimeError:
         yield sqlTxn.abort()
+        logUpgradeError(
+            "Move supported-component-set",
+            "Last calendar: {}".format(calendar_rid)
+        )
         raise
 
 
@@ -86,5 +103,7 @@
         home = yield txn.calendarHomeWithResourceID(homeResourceID)
         yield home.splitCalendars()
 
+    logUpgradeStatus("Starting Split Calendars")
+
     # Do this to each calendar home not already at version 2
-    yield doToEachHomeNotAtVersion(sqlStore, schema.CALENDAR_HOME, UPGRADE_TO_VERSION, doIt)
+    yield doToEachHomeNotAtVersion(sqlStore, schema.CALENDAR_HOME, UPGRADE_TO_VERSION, doIt, "Split Calendars")

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_3_to_4.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_3_to_4.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_3_to_4.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -15,19 +15,17 @@
 # limitations under the License.
 ##
 
-from twext.enterprise.dal.syntax import Select, Delete, Parameter
-
 from twisted.internet.defer import inlineCallbacks
 
 from twistedcaldav import caldavxml, customxml
 
 from txdav.base.propertystore.base import PropertyName
-from txdav.common.datastore.sql_tables import schema, _BIND_MODE_OWN
-from txdav.common.datastore.upgrade.sql.upgrades.util import rowsForProperty, updateCalendarDataVersion, \
-    updateAllCalendarHomeDataVersions, removeProperty, cleanPropertyStore
-from txdav.xml.parser import WebDAVDocument
+from txdav.caldav.icalendarstore import InvalidDefaultCalendar
+from txdav.common.datastore.sql_tables import schema
+from txdav.common.datastore.upgrade.sql.upgrades.util import updateCalendarDataVersion, \
+    removeProperty, cleanPropertyStore, logUpgradeStatus, doToEachHomeNotAtVersion
 from txdav.xml import element
-from twisted.python.failure import Failure
+from twistedcaldav.config import config
 
 """
 Data upgrade from database version 3 to 4
@@ -41,165 +39,111 @@
     """
     Do the required upgrade steps.
     """
-    yield moveDefaultCalendarProperties(sqlStore)
-    yield moveCalendarTranspProperties(sqlStore)
-    yield moveDefaultAlarmProperties(sqlStore)
-    yield removeResourceType(sqlStore)
+    yield updateCalendarHomes(sqlStore, config.UpgradeHomePrefix)
 
-    # Always bump the DB value
-    yield updateCalendarDataVersion(sqlStore, UPGRADE_TO_VERSION)
-    yield updateAllCalendarHomeDataVersions(sqlStore, UPGRADE_TO_VERSION)
+    # Don't do remaining upgrade if we are only process a subset of the homes
+    if not config.UpgradeHomePrefix:
+        yield removeResourceType(sqlStore)
 
+        # Always bump the DB value
+        yield updateCalendarDataVersion(sqlStore, UPGRADE_TO_VERSION)
 
 
+
 @inlineCallbacks
-def moveDefaultCalendarProperties(sqlStore):
+def updateCalendarHomes(sqlStore, prefix=None):
     """
-    Need to move all the CalDAV:default-calendar and CS:default-tasks properties in the
-    RESOURCE_PROPERTY table to the new CALENDAR_HOME_METADATA table columns, extracting
-    the new value from the XML property.
+    For each calendar home, update the associated properties on the home or its owned calendars.
     """
 
-    meta = schema.CALENDAR_HOME_METADATA
-    yield _processDefaultCalendarProperty(sqlStore, caldavxml.ScheduleDefaultCalendarURL, meta.DEFAULT_EVENTS)
-    yield _processDefaultCalendarProperty(sqlStore, customxml.ScheduleDefaultTasksURL, meta.DEFAULT_TASKS)
+    yield doToEachHomeNotAtVersion(sqlStore, schema.CALENDAR_HOME, UPGRADE_TO_VERSION, updateCalendarHome, "Update Calendar Home", filterOwnerUID=prefix)
 
 
 
 @inlineCallbacks
-def _processDefaultCalendarProperty(sqlStore, propname, colname):
+def updateCalendarHome(txn, homeResourceID):
     """
-    Move the specified property value to the matching CALENDAR_HOME_METADATA table column.
-
-    Since the number of calendar homes may well be large, we need to do this in batches.
+    For this calendar home, update the associated properties on the home or its owned calendars.
     """
 
-    cb = schema.CALENDAR_BIND
-    rp = schema.RESOURCE_PROPERTY
+    home = yield txn.calendarHomeWithResourceID(homeResourceID)
+    yield moveDefaultCalendarProperties(home)
+    yield moveCalendarTranspProperties(home)
+    yield moveDefaultAlarmProperties(home)
+    yield cleanPropertyStore()
 
-    try:
-        while True:
-            sqlTxn = sqlStore.newTransaction()
-            rows = (yield rowsForProperty(sqlTxn, propname, batch=BATCH_SIZE))
-            if len(rows) == 0:
-                yield sqlTxn.commit()
-                break
-            delete_ids = []
-            for inbox_rid, value in rows:
-                delete_ids.append(inbox_rid)
-                ids = yield Select(
-                    [cb.CALENDAR_HOME_RESOURCE_ID, ],
-                    From=cb,
-                    Where=cb.CALENDAR_RESOURCE_ID == inbox_rid,
-                ).on(sqlTxn)
-                if len(ids) > 0:
 
-                    calendarHome = (yield sqlTxn.calendarHomeWithResourceID(ids[0][0]))
-                    if calendarHome is not None:
 
-                        prop = WebDAVDocument.fromString(value).root_element
-                        defaultCalendar = str(prop.children[0])
-                        parts = defaultCalendar.split("/")
-                        if len(parts) == 5:
+ at inlineCallbacks
+def moveDefaultCalendarProperties(home):
+    """
+    Need to move any the CalDAV:default-calendar and CS:default-tasks properties in the
+    RESOURCE_PROPERTY table to the new CALENDAR_HOME_METADATA table columns, extracting
+    the new value from the XML property.
+    """
 
-                            calendarName = parts[-1]
-                            calendarHomeUID = parts[-2]
-                            expectedHome = (yield sqlTxn.calendarHomeWithUID(calendarHomeUID))
-                            if expectedHome is not None and expectedHome.id() == calendarHome.id():
+    yield _processDefaultCalendarProperty(home, caldavxml.ScheduleDefaultCalendarURL)
+    yield _processDefaultCalendarProperty(home, customxml.ScheduleDefaultTasksURL)
 
-                                calendar = (yield calendarHome.calendarWithName(calendarName))
-                                if calendar is not None:
-                                    yield calendarHome.setDefaultCalendar(
-                                        calendar, tasks=(propname == customxml.ScheduleDefaultTasksURL)
-                                    )
 
-            # Always delete the rows so that batch processing works correctly
-            yield Delete(
-                From=rp,
-                Where=(rp.RESOURCE_ID.In(Parameter("ids", len(delete_ids)))).And
-                      (rp.NAME == PropertyName.fromElement(propname).toString()),
-            ).on(sqlTxn, ids=delete_ids)
 
-            yield sqlTxn.commit()
+ at inlineCallbacks
+def _processDefaultCalendarProperty(home, propname):
+    """
+    Move the specified property value to the matching CALENDAR_HOME_METADATA table column.
+    """
 
-        yield cleanPropertyStore()
+    inbox = (yield home.calendarWithName("inbox"))
+    prop = inbox.properties().get(PropertyName.fromElement(propname))
+    if prop is not None:
+        defaultCalendar = str(prop.children[0])
+        parts = defaultCalendar.split("/")
+        if len(parts) == 5:
 
-    except RuntimeError:
-        f = Failure()
-        yield sqlTxn.abort()
-        f.raiseException()
+            calendarName = parts[-1]
+            calendarHomeUID = parts[-2]
+            if calendarHomeUID == home.uid():
 
+                calendar = (yield home.calendarWithName(calendarName))
+                if calendar is not None:
+                    try:
+                        yield home.setDefaultCalendar(
+                            calendar, tasks=(propname == customxml.ScheduleDefaultTasksURL)
+                        )
+                    except InvalidDefaultCalendar:
+                        # Ignore these - the server will recover
+                        pass
 
+        del inbox.properties()[PropertyName.fromElement(propname)]
 
+
+
 @inlineCallbacks
-def moveCalendarTranspProperties(sqlStore):
+def moveCalendarTranspProperties(home):
     """
     Need to move all the CalDAV:schedule-calendar-transp properties in the
     RESOURCE_PROPERTY table to the new CALENDAR_BIND table columns, extracting
     the new value from the XML property.
     """
 
-    cb = schema.CALENDAR_BIND
-    rp = schema.RESOURCE_PROPERTY
+    # Iterate over each calendar (both owned and shared)
+    calendars = (yield home.loadChildren())
+    for calendar in calendars:
+        if calendar.isInbox():
+            continue
+        prop = calendar.properties().get(PropertyName.fromElement(caldavxml.ScheduleCalendarTransp))
+        if prop is not None:
+            yield calendar.setUsedForFreeBusy(prop == caldavxml.ScheduleCalendarTransp(caldavxml.Opaque()))
+            del calendar.properties()[PropertyName.fromElement(caldavxml.ScheduleCalendarTransp)]
+    inbox = (yield home.calendarWithName("inbox"))
+    prop = inbox.properties().get(PropertyName.fromElement(caldavxml.CalendarFreeBusySet))
+    if prop is not None:
+        del inbox.properties()[PropertyName.fromElement(caldavxml.CalendarFreeBusySet)]
 
-    try:
-        calendars_for_id = {}
-        while True:
-            sqlTxn = sqlStore.newTransaction()
-            rows = (yield rowsForProperty(sqlTxn, caldavxml.ScheduleCalendarTransp, with_uid=True, batch=BATCH_SIZE))
-            if len(rows) == 0:
-                yield sqlTxn.commit()
-                break
-            delete_ids = []
-            for calendar_rid, value, viewer in rows:
-                delete_ids.append(calendar_rid)
-                if calendar_rid not in calendars_for_id:
-                    ids = yield Select(
-                        [cb.CALENDAR_HOME_RESOURCE_ID, cb.BIND_MODE, ],
-                        From=cb,
-                        Where=cb.CALENDAR_RESOURCE_ID == calendar_rid,
-                    ).on(sqlTxn)
-                    calendars_for_id[calendar_rid] = ids
 
-                if viewer:
-                    calendarHome = (yield sqlTxn.calendarHomeWithUID(viewer))
-                else:
-                    calendarHome = None
-                    for row in calendars_for_id[calendar_rid]:
-                        home_id, bind_mode = row
-                        if bind_mode == _BIND_MODE_OWN:
-                            calendarHome = (yield sqlTxn.calendarHomeWithResourceID(home_id))
-                            break
 
-                if calendarHome is not None:
-                    prop = WebDAVDocument.fromString(value).root_element
-                    calendar = (yield calendarHome.childWithID(calendar_rid))
-                    if calendar is not None:
-                        yield calendar.setUsedForFreeBusy(prop == caldavxml.ScheduleCalendarTransp(caldavxml.Opaque()))
-
-            # Always delete the rows so that batch processing works correctly
-            yield Delete(
-                From=rp,
-                Where=(rp.RESOURCE_ID.In(Parameter("ids", len(delete_ids)))).And
-                      (rp.NAME == PropertyName.fromElement(caldavxml.ScheduleCalendarTransp).toString()),
-            ).on(sqlTxn, ids=delete_ids)
-
-            yield sqlTxn.commit()
-
-        sqlTxn = sqlStore.newTransaction()
-        yield removeProperty(sqlTxn, PropertyName.fromElement(caldavxml.CalendarFreeBusySet))
-        yield sqlTxn.commit()
-        yield cleanPropertyStore()
-
-    except RuntimeError:
-        f = Failure()
-        yield sqlTxn.abort()
-        f.raiseException()
-
-
-
 @inlineCallbacks
-def moveDefaultAlarmProperties(sqlStore):
+def moveDefaultAlarmProperties(home):
     """
     Need to move all the CalDAV:default-calendar and CS:default-tasks properties in the
     RESOURCE_PROPERTY table to the new CALENDAR_HOME_METADATA table columns, extracting
@@ -207,25 +151,25 @@
     """
 
     yield _processDefaultAlarmProperty(
-        sqlStore,
+        home,
         caldavxml.DefaultAlarmVEventDateTime,
         True,
         True,
     )
     yield _processDefaultAlarmProperty(
-        sqlStore,
+        home,
         caldavxml.DefaultAlarmVEventDate,
         True,
         False,
     )
     yield _processDefaultAlarmProperty(
-        sqlStore,
+        home,
         caldavxml.DefaultAlarmVToDoDateTime,
         False,
         True,
     )
     yield _processDefaultAlarmProperty(
-        sqlStore,
+        home,
         caldavxml.DefaultAlarmVToDoDate,
         False,
         False,
@@ -234,90 +178,40 @@
 
 
 @inlineCallbacks
-def _processDefaultAlarmProperty(sqlStore, propname, vevent, timed):
+def _processDefaultAlarmProperty(home, propname, vevent, timed):
     """
     Move the specified property value to the matching CALENDAR_HOME_METADATA or CALENDAR_BIND table column.
 
     Since the number of properties may well be large, we need to do this in batches.
     """
 
-    hm = schema.CALENDAR_HOME_METADATA
-    cb = schema.CALENDAR_BIND
-    rp = schema.RESOURCE_PROPERTY
+    # Check the home first
+    prop = home.properties().get(PropertyName.fromElement(propname))
+    if prop is not None:
+        alarm = str(prop.children[0]) if prop.children else None
+        yield home.setDefaultAlarm(alarm, vevent, timed)
+        del home.properties()[PropertyName.fromElement(propname)]
 
-    try:
-        calendars_for_id = {}
-        while True:
-            sqlTxn = sqlStore.newTransaction()
-            rows = (yield rowsForProperty(sqlTxn, propname, with_uid=True, batch=BATCH_SIZE))
-            if len(rows) == 0:
-                yield sqlTxn.commit()
-                break
-            delete_ids = []
-            for rid, value, viewer in rows:
-                delete_ids.append(rid)
+    # Now each child
+    calendars = (yield home.loadChildren())
+    for calendar in calendars:
+        if calendar.isInbox():
+            continue
+        prop = calendar.properties().get(PropertyName.fromElement(propname))
+        if prop is not None:
+            alarm = str(prop.children[0]) if prop.children else None
+            yield calendar.setDefaultAlarm(alarm, vevent, timed)
+            del calendar.properties()[PropertyName.fromElement(propname)]
 
-                prop = WebDAVDocument.fromString(value).root_element
-                alarm = str(prop.children[0]) if prop.children else None
 
-                # First check if the rid is a home - this is the most common case
-                ids = yield Select(
-                    [hm.RESOURCE_ID, ],
-                    From=hm,
-                    Where=hm.RESOURCE_ID == rid,
-                ).on(sqlTxn)
 
-                if len(ids) > 0:
-                    # Home object
-                    calendarHome = (yield sqlTxn.calendarHomeWithResourceID(ids[0][0]))
-                    if calendarHome is not None:
-                        yield calendarHome.setDefaultAlarm(alarm, vevent, timed)
-                else:
-                    # rid is a calendar - we need to find the per-user calendar for the resource viewer
-                    if rid not in calendars_for_id:
-                        ids = yield Select(
-                            [cb.CALENDAR_HOME_RESOURCE_ID, cb.BIND_MODE, ],
-                            From=cb,
-                            Where=cb.CALENDAR_RESOURCE_ID == rid,
-                        ).on(sqlTxn)
-                        calendars_for_id[rid] = ids
-
-                    if viewer:
-                        calendarHome = (yield sqlTxn.calendarHomeWithUID(viewer))
-                    else:
-                        calendarHome = None
-                        for row in calendars_for_id[rid]:
-                            home_id, bind_mode = row
-                            if bind_mode == _BIND_MODE_OWN:
-                                calendarHome = (yield sqlTxn.calendarHomeWithResourceID(home_id))
-                                break
-
-                    if calendarHome is not None:
-                        calendar = yield calendarHome.childWithID(rid)
-                        if calendar is not None:
-                            yield calendar.setDefaultAlarm(alarm, vevent, timed)
-
-            # Always delete the rows so that batch processing works correctly
-            yield Delete(
-                From=rp,
-                Where=(rp.RESOURCE_ID.In(Parameter("ids", len(delete_ids)))).And
-                      (rp.NAME == PropertyName.fromElement(propname).toString()),
-            ).on(sqlTxn, ids=delete_ids)
-
-            yield sqlTxn.commit()
-
-        yield cleanPropertyStore()
-
-    except RuntimeError:
-        f = Failure()
-        yield sqlTxn.abort()
-        f.raiseException()
-
-
-
 @inlineCallbacks
 def removeResourceType(sqlStore):
+    logUpgradeStatus("Starting Calendar Remove Resource Type")
+
     sqlTxn = sqlStore.newTransaction()
     yield removeProperty(sqlTxn, PropertyName.fromElement(element.ResourceType))
     yield sqlTxn.commit()
     yield cleanPropertyStore()
+
+    logUpgradeStatus("End Calendar Remove Resource Type")

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_4_to_5.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_4_to_5.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_4_to_5.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -15,21 +15,18 @@
 # limitations under the License.
 ##
 
-from twext.enterprise.dal.syntax import Select, Delete, Parameter
+from twext.web2.dav.resource import TwistedQuotaUsedProperty, TwistedGETContentMD5
 
 from twisted.internet.defer import inlineCallbacks
-from twisted.python.failure import Failure
 
 from twistedcaldav import caldavxml, customxml
+from twistedcaldav.config import config
 
 from txdav.base.propertystore.base import PropertyName
-from txdav.common.datastore.sql_tables import schema, _BIND_MODE_OWN
-from txdav.common.datastore.upgrade.sql.upgrades.util import rowsForProperty, updateCalendarDataVersion, \
-    updateAllCalendarHomeDataVersions, removeProperty, cleanPropertyStore
+from txdav.common.datastore.sql_tables import schema
+from txdav.common.datastore.upgrade.sql.upgrades.util import updateCalendarDataVersion, \
+    removeProperty, cleanPropertyStore, logUpgradeStatus, doToEachHomeNotAtVersion
 from txdav.xml import element
-from txdav.xml.parser import WebDAVDocument
-from twext.web2.dav.resource import TwistedQuotaUsedProperty, \
-    TwistedGETContentMD5
 
 """
 Data upgrade from database version 4 to 5
@@ -43,136 +40,75 @@
     """
     Do the required upgrade steps.
     """
-    yield moveCalendarTimezoneProperties(sqlStore)
-    yield moveCalendarAvailabilityProperties(sqlStore)
-    yield removeOtherProperties(sqlStore)
+    yield updateCalendarHomes(sqlStore, config.UpgradeHomePrefix)
 
-    # Always bump the DB value
-    yield updateCalendarDataVersion(sqlStore, UPGRADE_TO_VERSION)
-    yield updateAllCalendarHomeDataVersions(sqlStore, UPGRADE_TO_VERSION)
+    # Don't do remaining upgrade if we are only process a subset of the homes
+    if not config.UpgradeHomePrefix:
+        yield removeOtherProperties(sqlStore)
 
+        # Always bump the DB value
+        yield updateCalendarDataVersion(sqlStore, UPGRADE_TO_VERSION)
 
 
+
 @inlineCallbacks
-def moveCalendarTimezoneProperties(sqlStore):
+def updateCalendarHomes(sqlStore, prefix=None):
     """
-    Need to move all the CalDAV:calendar-timezone properties in the
-    RESOURCE_PROPERTY table to the new CALENDAR_BIND table columns, extracting
-    the new value from the XML property.
+    For each calendar home, update the associated properties on the home or its owned calendars.
     """
 
-    cb = schema.CALENDAR_BIND
-    rp = schema.RESOURCE_PROPERTY
+    yield doToEachHomeNotAtVersion(sqlStore, schema.CALENDAR_HOME, UPGRADE_TO_VERSION, updateCalendarHome, "Update Calendar Home", filterOwnerUID=prefix)
 
-    try:
-        calendars_for_id = {}
-        while True:
-            sqlTxn = sqlStore.newTransaction()
-            rows = (yield rowsForProperty(sqlTxn, caldavxml.CalendarTimeZone, with_uid=True, batch=BATCH_SIZE))
-            if len(rows) == 0:
-                yield sqlTxn.commit()
-                break
-            delete_ids = []
-            for calendar_rid, value, viewer in rows:
-                delete_ids.append(calendar_rid)
-                if calendar_rid not in calendars_for_id:
-                    ids = yield Select(
-                        [cb.CALENDAR_HOME_RESOURCE_ID, cb.BIND_MODE, ],
-                        From=cb,
-                        Where=cb.CALENDAR_RESOURCE_ID == calendar_rid,
-                    ).on(sqlTxn)
-                    calendars_for_id[calendar_rid] = ids
 
-                if viewer:
-                    calendarHome = (yield sqlTxn.calendarHomeWithUID(viewer))
-                else:
-                    calendarHome = None
-                    for row in calendars_for_id[calendar_rid]:
-                        home_id, bind_mode = row
-                        if bind_mode == _BIND_MODE_OWN:
-                            calendarHome = (yield sqlTxn.calendarHomeWithResourceID(home_id))
-                            break
 
-                if calendarHome is not None:
-                    prop = WebDAVDocument.fromString(value).root_element
-                    calendar = (yield calendarHome.childWithID(calendar_rid))
-                    if calendar is not None:
-                        yield calendar.setTimezone(prop.calendar())
+ at inlineCallbacks
+def updateCalendarHome(txn, homeResourceID):
+    """
+    For this calendar home, update the associated properties on the home or its owned calendars.
+    """
 
-            # Always delete the rows so that batch processing works correctly
-            yield Delete(
-                From=rp,
-                Where=(rp.RESOURCE_ID.In(Parameter("ids", len(delete_ids)))).And
-                      (rp.NAME == PropertyName.fromElement(caldavxml.CalendarTimeZone).toString()),
-            ).on(sqlTxn, ids=delete_ids)
+    home = yield txn.calendarHomeWithResourceID(homeResourceID)
+    yield moveCalendarTimezoneProperties(home)
+    yield moveCalendarAvailabilityProperties(home)
+    yield cleanPropertyStore()
 
-            yield sqlTxn.commit()
 
-        yield cleanPropertyStore()
 
-    except RuntimeError:
-        f = Failure()
-        yield sqlTxn.abort()
-        f.raiseException()
+ at inlineCallbacks
+def moveCalendarTimezoneProperties(home):
+    """
+    Need to move all the CalDAV:calendar-timezone properties in the
+    RESOURCE_PROPERTY table to the new CALENDAR_BIND table columns, extracting
+    the new value from the XML property.
+    """
 
+    # Iterate over each calendar (both owned and shared)
+    calendars = (yield home.loadChildren())
+    for calendar in calendars:
+        if calendar.isInbox():
+            continue
+        prop = calendar.properties().get(PropertyName.fromElement(caldavxml.CalendarTimeZone))
+        if prop is not None:
+            yield calendar.setTimezone(prop.calendar())
+            del calendar.properties()[PropertyName.fromElement(caldavxml.CalendarTimeZone)]
 
 
+
 @inlineCallbacks
-def moveCalendarAvailabilityProperties(sqlStore):
+def moveCalendarAvailabilityProperties(home):
     """
     Need to move all the CS:calendar-availability properties in the
     RESOURCE_PROPERTY table to the new CALENDAR_BIND table columns, extracting
     the new value from the XML property.
     """
+    inbox = (yield home.calendarWithName("inbox"))
+    prop = inbox.properties().get(PropertyName.fromElement(customxml.CalendarAvailability))
+    if prop is not None:
+        yield home.setAvailability(prop.calendar())
+        del inbox.properties()[customxml.CalendarAvailability]
 
-    cb = schema.CALENDAR_BIND
-    rp = schema.RESOURCE_PROPERTY
 
-    try:
-        while True:
-            sqlTxn = sqlStore.newTransaction()
-            rows = (yield rowsForProperty(sqlTxn, customxml.CalendarAvailability, batch=BATCH_SIZE))
-            if len(rows) == 0:
-                yield sqlTxn.commit()
-                break
 
-            # Map each calendar to a home id using a single query for efficiency
-            calendar_ids = [row[0] for row in rows]
-
-            home_map = yield Select(
-                [cb.CALENDAR_RESOURCE_ID, cb.CALENDAR_HOME_RESOURCE_ID, ],
-                From=cb,
-                Where=(cb.CALENDAR_RESOURCE_ID.In(Parameter("ids", len(calendar_ids)))).And(cb.BIND_MODE == _BIND_MODE_OWN),
-            ).on(sqlTxn, ids=calendar_ids)
-            calendar_to_home = dict(home_map)
-
-            # Move property to each home
-            for calendar_rid, value in rows:
-                if calendar_rid in calendar_to_home:
-                    calendarHome = (yield sqlTxn.calendarHomeWithResourceID(calendar_to_home[calendar_rid]))
-
-                    if calendarHome is not None:
-                        prop = WebDAVDocument.fromString(value).root_element
-                        yield calendarHome.setAvailability(prop.calendar())
-
-            # Always delete the rows so that batch processing works correctly
-            yield Delete(
-                From=rp,
-                Where=(rp.RESOURCE_ID.In(Parameter("ids", len(calendar_ids)))).And
-                      (rp.NAME == PropertyName.fromElement(customxml.CalendarAvailability).toString()),
-            ).on(sqlTxn, ids=calendar_ids)
-
-            yield sqlTxn.commit()
-
-        yield cleanPropertyStore()
-
-    except RuntimeError:
-        f = Failure()
-        yield sqlTxn.abort()
-        f.raiseException()
-
-
-
 @inlineCallbacks
 def removeOtherProperties(sqlStore):
     """
@@ -190,6 +126,8 @@
     {http://twistedmatrix.com/xml_namespace/dav/}schedule-auto-respond
 
     """
+    logUpgradeStatus("Starting Calendar Remove Other Properties")
+
     sqlTxn = sqlStore.newTransaction()
 
     yield removeProperty(sqlTxn, PropertyName.fromElement(element.ACL))
@@ -205,3 +143,5 @@
 
     yield sqlTxn.commit()
     yield cleanPropertyStore()
+
+    logUpgradeStatus("End Calendar Remove Other Properties")

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/test/test_upgrade_from_3_to_4.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/test/test_upgrade_from_3_to_4.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/test/test_upgrade_from_3_to_4.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -13,23 +13,27 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ##
+
+from twext.enterprise.dal.syntax import Update, Insert
+
+from twistedcaldav import caldavxml
 from twistedcaldav.caldavxml import ScheduleDefaultCalendarURL, \
-    CalendarFreeBusySet, Opaque, ScheduleCalendarTransp
+    CalendarFreeBusySet, Opaque, ScheduleCalendarTransp, Transparent
+
 from txdav.base.propertystore.base import PropertyName
 from txdav.caldav.datastore.test.util import CommonStoreTests
+from txdav.common.datastore.sql_tables import _BIND_MODE_WRITE, schema
+from txdav.common.datastore.upgrade.sql.upgrades.calendar_upgrade_from_3_to_4 import updateCalendarHomes, \
+    doUpgrade
+from txdav.xml import element
 from txdav.xml.element import HRef
-from twext.enterprise.dal.syntax import Update, Insert
-from txdav.common.datastore.upgrade.sql.upgrades.calendar_upgrade_from_3_to_4 import moveDefaultCalendarProperties, \
-    moveCalendarTranspProperties, removeResourceType, moveDefaultAlarmProperties
-from txdav.xml import element
-from twistedcaldav import caldavxml
-from txdav.common.datastore.sql_tables import _BIND_MODE_WRITE, schema
+from twistedcaldav.config import config
 
 """
 Tests for L{txdav.common.datastore.upgrade.sql.upgrade}.
 """
 
-from twisted.internet.defer import inlineCallbacks
+from twisted.internet.defer import inlineCallbacks, returnValue
 
 class Upgrade_from_3_to_4(CommonStoreTests):
     """
@@ -37,7 +41,7 @@
     """
 
     @inlineCallbacks
-    def test_defaultCalendarUpgrade(self):
+    def _defaultCalendarUpgrade_setup(self):
 
         # Set dead property on inbox
         for user in ("user01", "user02",):
@@ -52,39 +56,132 @@
                 Where=chm.RESOURCE_ID == home._resourceID,
             ).on(self.transactionUnderTest())
 
-        # Force data version to previous
-        ch = home._homeSchema
-        yield Update(
-            {ch.DATAVERSION: 3},
-            Where=ch.RESOURCE_ID == home._resourceID,
-        ).on(self.transactionUnderTest())
+            # Force data version to previous
+            ch = home._homeSchema
+            yield Update(
+                {ch.DATAVERSION: 3},
+                Where=ch.RESOURCE_ID == home._resourceID,
+            ).on(self.transactionUnderTest())
 
         yield self.commit()
 
-        # Trigger upgrade
-        yield moveDefaultCalendarProperties(self._sqlCalendarStore)
 
+    @inlineCallbacks
+    def _defaultCalendarUpgrade_check(self, changed_users, unchanged_users):
+
         # Test results
-        for user in ("user01", "user02",):
+        for user in changed_users:
             home = (yield self.homeUnderTest(name=user))
+            version = (yield home.dataVersion())
+            self.assertEqual(version, 4)
             calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
             self.assertTrue(home.isDefaultCalendar(calendar))
             inbox = (yield self.calendarUnderTest(name="inbox", home=user))
             self.assertTrue(PropertyName.fromElement(ScheduleDefaultCalendarURL) not in inbox.properties())
 
+        for user in unchanged_users:
+            home = (yield self.homeUnderTest(name=user))
+            version = (yield home.dataVersion())
+            self.assertEqual(version, 3)
+            calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
+            self.assertFalse(home.isDefaultCalendar(calendar))
+            inbox = (yield self.calendarUnderTest(name="inbox", home=user))
+            self.assertTrue(PropertyName.fromElement(ScheduleDefaultCalendarURL) in inbox.properties())
 
+
     @inlineCallbacks
-    def test_calendarTranspUpgrade(self):
+    def test_defaultCalendarUpgrade(self):
+        yield self._defaultCalendarUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore)
+        yield self._defaultCalendarUpgrade_check(("user01", "user02",), ())
 
+
+    @inlineCallbacks
+    def test_partialDefaultCalendarUpgrade(self):
+        yield self._defaultCalendarUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore, "user01")
+        yield self._defaultCalendarUpgrade_check(("user01",), ("user02",))
+
+
+    @inlineCallbacks
+    def _invalidDefaultCalendarUpgrade_setup(self):
+
         # Set dead property on inbox
         for user in ("user01", "user02",):
             inbox = (yield self.calendarUnderTest(name="inbox", home=user))
+            inbox.properties()[PropertyName.fromElement(ScheduleDefaultCalendarURL)] = ScheduleDefaultCalendarURL(HRef.fromString("/calendars/__uids__/%s/tasks_1" % (user,)))
+
+            # Force current default to null
+            home = (yield self.homeUnderTest(name=user))
+            chm = home._homeMetaDataSchema
+            yield Update(
+                {chm.DEFAULT_EVENTS: None},
+                Where=chm.RESOURCE_ID == home._resourceID,
+            ).on(self.transactionUnderTest())
+
+            # Create tasks only calendar
+            tasks = (yield home.createCalendarWithName("tasks_1"))
+            yield tasks.setSupportedComponents("VTODO")
+
+            # Force data version to previous
+            ch = home._homeSchema
+            yield Update(
+                {ch.DATAVERSION: 3},
+                Where=ch.RESOURCE_ID == home._resourceID,
+            ).on(self.transactionUnderTest())
+
+        yield self.commit()
+
+
+    @inlineCallbacks
+    def _invalidDefaultCalendarUpgrade_check(self, changed_users, unchanged_users):
+
+        # Test results
+        for user in changed_users:
+            home = (yield self.homeUnderTest(name=user))
+            version = (yield home.dataVersion())
+            self.assertEqual(version, 4)
+            calendar = (yield self.calendarUnderTest(name="tasks_1", home=user))
+            self.assertFalse(home.isDefaultCalendar(calendar))
+            inbox = (yield self.calendarUnderTest(name="inbox", home=user))
+            self.assertTrue(PropertyName.fromElement(ScheduleDefaultCalendarURL) not in inbox.properties())
+
+        for user in unchanged_users:
+            home = (yield self.homeUnderTest(name=user))
+            version = (yield home.dataVersion())
+            self.assertEqual(version, 3)
+            calendar = (yield self.calendarUnderTest(name="tasks_1", home=user))
+            self.assertFalse(home.isDefaultCalendar(calendar))
+            inbox = (yield self.calendarUnderTest(name="inbox", home=user))
+            self.assertTrue(PropertyName.fromElement(ScheduleDefaultCalendarURL) in inbox.properties())
+
+
+    @inlineCallbacks
+    def test_invalidDefaultCalendarUpgrade(self):
+        yield self._invalidDefaultCalendarUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore)
+        yield self._invalidDefaultCalendarUpgrade_check(("user01", "user02",), ())
+
+
+    @inlineCallbacks
+    def test_partialInvalidDefaultCalendarUpgrade(self):
+        yield self._invalidDefaultCalendarUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore, "user01")
+        yield self._invalidDefaultCalendarUpgrade_check(("user01",), ("user02",))
+
+
+    @inlineCallbacks
+    def _calendarTranspUpgrade_setup(self):
+
+        # Set dead property on inbox
+        for user in ("user01", "user02",):
+            inbox = (yield self.calendarUnderTest(name="inbox", home=user))
             inbox.properties()[PropertyName.fromElement(CalendarFreeBusySet)] = CalendarFreeBusySet(HRef.fromString("/calendars/__uids__/%s/calendar_1" % (user,)))
 
             # Force current to transparent
             calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
             yield calendar.setUsedForFreeBusy(False)
-            calendar.properties()[PropertyName.fromElement(ScheduleCalendarTransp)] = ScheduleCalendarTransp(Opaque())
+            calendar.properties()[PropertyName.fromElement(ScheduleCalendarTransp)] = ScheduleCalendarTransp(Opaque() if user == "user01" else Transparent())
 
             # Force data version to previous
             home = (yield self.homeUnderTest(name=user))
@@ -118,21 +215,55 @@
         ).on(txn)
         yield self.commit()
 
-        # Trigger upgrade
-        yield moveCalendarTranspProperties(self._sqlCalendarStore)
 
+    @inlineCallbacks
+    def _calendarTranspUpgrade_check(self, changed_users, unchanged_users):
+
         # Test results
-        for user in ("user01", "user02",):
+        for user in changed_users:
             home = (yield self.homeUnderTest(name=user))
+            version = (yield home.dataVersion())
+            self.assertEqual(version, 4)
             calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
-            self.assertTrue(calendar.isUsedForFreeBusy())
+            if user == "user01":
+                self.assertTrue(calendar.isUsedForFreeBusy())
+            else:
+                self.assertFalse(calendar.isUsedForFreeBusy())
+            self.assertTrue(PropertyName.fromElement(caldavxml.ScheduleCalendarTransp) not in calendar.properties())
             inbox = (yield self.calendarUnderTest(name="inbox", home=user))
             self.assertTrue(PropertyName.fromElement(CalendarFreeBusySet) not in inbox.properties())
 
+        for user in unchanged_users:
+            home = (yield self.homeUnderTest(name=user))
+            version = (yield home.dataVersion())
+            self.assertEqual(version, 3)
+            calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
+            if user == "user01":
+                self.assertFalse(calendar.isUsedForFreeBusy())
+            else:
+                self.assertFalse(calendar.isUsedForFreeBusy())
+            self.assertTrue(PropertyName.fromElement(caldavxml.ScheduleCalendarTransp) in calendar.properties())
+            inbox = (yield self.calendarUnderTest(name="inbox", home=user))
+            self.assertTrue(PropertyName.fromElement(CalendarFreeBusySet) in inbox.properties())
 
+
     @inlineCallbacks
-    def test_defaultAlarmUpgrade(self):
+    def test_calendarTranspUpgrade(self):
+        yield self._calendarTranspUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore)
+        yield self._calendarTranspUpgrade_check(("user01", "user02",), ())
 
+
+    @inlineCallbacks
+    def test_partialCalendarTranspUpgrade(self):
+        yield self._calendarTranspUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore, "user01")
+        yield self._calendarTranspUpgrade_check(("user01",), ("user02",))
+
+
+    @inlineCallbacks
+    def _defaultAlarmUpgrade_setup(self):
+
         alarmhome1 = """BEGIN:VALARM
 ACTION:AUDIO
 TRIGGER;RELATED=START:-PT1M
@@ -236,13 +367,28 @@
         shared = yield self.calendarUnderTest(name=shared_name, home="user02")
         for _ignore_vevent, _ignore_timed, alarm, prop in detailsshared:
             shared.properties()[PropertyName.fromElement(prop)] = prop(alarm)
+
+        for user in ("user01", "user02",):
+            # Force data version to previous
+            home = (yield self.homeUnderTest(name=user))
+            ch = home._homeSchema
+            yield Update(
+                {ch.DATAVERSION: 3},
+                Where=ch.RESOURCE_ID == home._resourceID,
+            ).on(self.transactionUnderTest())
+
         yield self.commit()
 
-        # Trigger upgrade
-        yield moveDefaultAlarmProperties(self._sqlCalendarStore)
+        returnValue((detailshome, detailscalendar, detailsshared, shared_name,))
 
+
+    @inlineCallbacks
+    def _defaultAlarmUpgrade_check(self, changed_users, unchanged_users, detailshome, detailscalendar, detailsshared, shared_name):
+
         # Check each type of collection
         home = yield self.homeUnderTest(name="user01")
+        version = (yield home.dataVersion())
+        self.assertEqual(version, 4)
         for vevent, timed, alarm, prop in detailshome:
             alarm_result = (yield home.getDefaultAlarm(vevent, timed))
             self.assertEquals(alarm_result, alarm)
@@ -252,18 +398,67 @@
         for vevent, timed, alarm, prop in detailscalendar:
             alarm_result = (yield calendar.getDefaultAlarm(vevent, timed))
             self.assertEquals(alarm_result, alarm)
-            self.assertTrue(PropertyName.fromElement(prop) not in home.properties())
+            self.assertTrue(PropertyName.fromElement(prop) not in calendar.properties())
 
-        shared = yield self.calendarUnderTest(name=shared_name, home="user02")
-        for vevent, timed, alarm, prop in detailsshared:
-            alarm_result = (yield shared.getDefaultAlarm(vevent, timed))
-            self.assertEquals(alarm_result, alarm)
-            self.assertTrue(PropertyName.fromElement(prop) not in home.properties())
+        if "user02" in changed_users:
+            home = (yield self.homeUnderTest(name="user02"))
+            version = (yield home.dataVersion())
+            self.assertEqual(version, 4)
+            shared = yield self.calendarUnderTest(name=shared_name, home="user02")
+            for vevent, timed, alarm, prop in detailsshared:
+                alarm_result = (yield shared.getDefaultAlarm(vevent, timed))
+                self.assertEquals(alarm_result, alarm)
+                self.assertTrue(PropertyName.fromElement(prop) not in shared.properties())
+        else:
+            home = (yield self.homeUnderTest(name="user02"))
+            version = (yield home.dataVersion())
+            self.assertEqual(version, 3)
+            shared = yield self.calendarUnderTest(name=shared_name, home="user02")
+            for vevent, timed, alarm, prop in detailsshared:
+                alarm_result = (yield shared.getDefaultAlarm(vevent, timed))
+                self.assertEquals(alarm_result, None)
+                self.assertTrue(PropertyName.fromElement(prop) in shared.properties())
 
 
     @inlineCallbacks
-    def test_resourceTypeUpgrade(self):
+    def test_defaultAlarmUpgrade(self):
+        detailshome, detailscalendar, detailsshared, shared_name = (yield self._defaultAlarmUpgrade_setup())
+        yield updateCalendarHomes(self._sqlCalendarStore)
+        yield self._defaultAlarmUpgrade_check(("user01", "user02",), (), detailshome, detailscalendar, detailsshared, shared_name)
 
+
+    @inlineCallbacks
+    def test_partialDefaultAlarmUpgrade(self):
+        detailshome, detailscalendar, detailsshared, shared_name = (yield self._defaultAlarmUpgrade_setup())
+        yield updateCalendarHomes(self._sqlCalendarStore, "user01")
+        yield self._defaultAlarmUpgrade_check(("user01",), ("user02",), detailshome, detailscalendar, detailsshared, shared_name)
+
+
+    @inlineCallbacks
+    def test_combinedUpgrade(self):
+        yield self._defaultCalendarUpgrade_setup()
+        yield self._calendarTranspUpgrade_setup()
+        detailshome, detailscalendar, detailsshared, shared_name = (yield self._defaultAlarmUpgrade_setup())
+        yield updateCalendarHomes(self._sqlCalendarStore)
+        yield self._defaultCalendarUpgrade_check(("user01", "user02",), ())
+        yield self._calendarTranspUpgrade_check(("user01", "user02",), ())
+        yield self._defaultAlarmUpgrade_check(("user01", "user02",), (), detailshome, detailscalendar, detailsshared, shared_name)
+
+
+    @inlineCallbacks
+    def test_partialCombinedUpgrade(self):
+        yield self._defaultCalendarUpgrade_setup()
+        yield self._calendarTranspUpgrade_setup()
+        detailshome, detailscalendar, detailsshared, shared_name = (yield self._defaultAlarmUpgrade_setup())
+        yield updateCalendarHomes(self._sqlCalendarStore, "user01")
+        yield self._defaultCalendarUpgrade_check(("user01",), ("user02",))
+        yield self._calendarTranspUpgrade_check(("user01",), ("user02",))
+        yield self._defaultAlarmUpgrade_check(("user01",), ("user02",), detailshome, detailscalendar, detailsshared, shared_name)
+
+
+    @inlineCallbacks
+    def _resourceTypeUpgrade_setup(self):
+
         # Set dead property on calendar
         for user in ("user01", "user02",):
             calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
@@ -273,12 +468,60 @@
         for user in ("user01", "user02",):
             calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
             self.assertTrue(PropertyName.fromElement(element.ResourceType) in calendar.properties())
+
+        yield self.transactionUnderTest().updateCalendarserverValue("CALENDAR-DATAVERSION", "3")
+
         yield self.commit()
 
-        # Trigger upgrade
-        yield removeResourceType(self._sqlCalendarStore)
 
+    @inlineCallbacks
+    def _resourceTypeUpgrade_check(self, full=True):
+
         # Test results
-        for user in ("user01", "user02",):
-            calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
-            self.assertTrue(PropertyName.fromElement(element.ResourceType) not in calendar.properties())
+        if full:
+            for user in ("user01", "user02",):
+                calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
+                self.assertTrue(PropertyName.fromElement(element.ResourceType) not in calendar.properties())
+            version = yield self.transactionUnderTest().calendarserverValue("CALENDAR-DATAVERSION")
+            self.assertEqual(int(version), 4)
+        else:
+            for user in ("user01", "user02",):
+                calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
+                self.assertTrue(PropertyName.fromElement(element.ResourceType) in calendar.properties())
+            version = yield self.transactionUnderTest().calendarserverValue("CALENDAR-DATAVERSION")
+            self.assertEqual(int(version), 3)
+
+
+    @inlineCallbacks
+    def test_resourceTypeUpgrade(self):
+        yield self._resourceTypeUpgrade_setup()
+        yield doUpgrade(self._sqlCalendarStore)
+        yield self._resourceTypeUpgrade_check()
+
+
+    @inlineCallbacks
+    def test_fullUpgrade(self):
+        self.patch(config, "UpgradeHomePrefix", "")
+        yield self._defaultCalendarUpgrade_setup()
+        yield self._calendarTranspUpgrade_setup()
+        detailshome, detailscalendar, detailsshared, shared_name = (yield self._defaultAlarmUpgrade_setup())
+        yield self._resourceTypeUpgrade_setup()
+        yield doUpgrade(self._sqlCalendarStore)
+        yield self._defaultCalendarUpgrade_check(("user01", "user02",), ())
+        yield self._calendarTranspUpgrade_check(("user01", "user02",), ())
+        yield self._defaultAlarmUpgrade_check(("user01", "user02",), (), detailshome, detailscalendar, detailsshared, shared_name)
+        yield self._resourceTypeUpgrade_check()
+
+
+    @inlineCallbacks
+    def test_partialFullUpgrade(self):
+        self.patch(config, "UpgradeHomePrefix", "user01")
+        yield self._defaultCalendarUpgrade_setup()
+        yield self._calendarTranspUpgrade_setup()
+        yield self._resourceTypeUpgrade_setup()
+        detailshome, detailscalendar, detailsshared, shared_name = (yield self._defaultAlarmUpgrade_setup())
+        yield doUpgrade(self._sqlCalendarStore)
+        yield self._defaultCalendarUpgrade_check(("user01",), ("user02",))
+        yield self._calendarTranspUpgrade_check(("user01",), ("user02",))
+        yield self._defaultAlarmUpgrade_check(("user01",), ("user02",), detailshome, detailscalendar, detailsshared, shared_name)
+        yield self._resourceTypeUpgrade_check(False)

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/test/test_upgrade_from_4_to_5.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/test/test_upgrade_from_4_to_5.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/test/test_upgrade_from_4_to_5.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -13,21 +13,24 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ##
-from twistedcaldav import caldavxml, customxml
-from txdav.common.datastore.upgrade.sql.upgrades.calendar_upgrade_from_4_to_5 import moveCalendarTimezoneProperties, \
-    removeOtherProperties, moveCalendarAvailabilityProperties
-from txdav.common.datastore.sql_tables import _BIND_MODE_WRITE, schema
-from txdav.xml import element
 
 """
 Tests for L{txdav.common.datastore.upgrade.sql.upgrade}.
 """
 
 from twext.enterprise.dal.syntax import Update, Insert
-from twisted.internet.defer import inlineCallbacks
+
+from twisted.internet.defer import inlineCallbacks, returnValue
+
+from twistedcaldav import caldavxml, customxml
+from twistedcaldav.config import config
 from twistedcaldav.ical import Component
+
 from txdav.base.propertystore.base import PropertyName
 from txdav.caldav.datastore.test.util import CommonStoreTests
+from txdav.common.datastore.sql_tables import _BIND_MODE_WRITE, schema
+from txdav.common.datastore.upgrade.sql.upgrades.calendar_upgrade_from_4_to_5 import updateCalendarHomes, doUpgrade
+from txdav.xml import element
 
 class Upgrade_from_4_to_5(CommonStoreTests):
     """
@@ -35,7 +38,7 @@
     """
 
     @inlineCallbacks
-    def test_calendarTimezoneUpgrade(self):
+    def _calendarTimezoneUpgrade_setup(self):
 
         tz1 = Component.fromString("""BEGIN:VCALENDAR
 VERSION:2.0
@@ -137,19 +140,47 @@
         ).on(txn)
         yield self.commit()
 
-        # Trigger upgrade
-        yield moveCalendarTimezoneProperties(self._sqlCalendarStore)
+        returnValue(user_details)
 
+
+    @inlineCallbacks
+    def _calendarTimezoneUpgrade_check(self, changed_users, unchanged_users, user_details):
+
         # Test results
         for user, calname, tz in user_details:
-            calendar = (yield self.calendarUnderTest(name=calname, home=user))
-            self.assertEqual(calendar.getTimezone(), tz)
-            self.assertTrue(PropertyName.fromElement(caldavxml.CalendarTimeZone) not in calendar.properties())
+            if user in changed_users:
+                home = (yield self.homeUnderTest(name=user))
+                version = (yield home.dataVersion())
+                self.assertEqual(version, 5)
+                calendar = (yield self.calendarUnderTest(name=calname, home=user))
+                self.assertEqual(calendar.getTimezone(), tz)
+                self.assertTrue(PropertyName.fromElement(caldavxml.CalendarTimeZone) not in calendar.properties())
+            else:
+                home = (yield self.homeUnderTest(name=user))
+                version = (yield home.dataVersion())
+                self.assertEqual(version, 4)
+                calendar = (yield self.calendarUnderTest(name=calname, home=user))
+                self.assertEqual(calendar.getTimezone(), None)
+                self.assertTrue(PropertyName.fromElement(caldavxml.CalendarTimeZone) in calendar.properties())
 
 
     @inlineCallbacks
-    def test_calendarAvailabilityUpgrade(self):
+    def test_calendarTimezoneUpgrade(self):
+        user_details = yield self._calendarTimezoneUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore)
+        yield self._calendarTimezoneUpgrade_check(("user01", "user02", "user03",), (), user_details)
 
+
+    @inlineCallbacks
+    def test_partialCalendarTimezoneUpgrade(self):
+        user_details = yield self._calendarTimezoneUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore, "user01")
+        yield self._calendarTimezoneUpgrade_check(("user01",), ("user02", "user03",), user_details)
+
+
+    @inlineCallbacks
+    def _calendarAvailabilityUpgrade_setup(self):
+
         av1 = Component.fromString("""BEGIN:VCALENDAR
 VERSION:2.0
 CALSCALE:GREGORIAN
@@ -220,20 +251,65 @@
             self.assertEqual(PropertyName.fromElement(customxml.CalendarAvailability) in calendar.properties(), av is not None)
         yield self.commit()
 
-        # Trigger upgrade
-        yield moveCalendarAvailabilityProperties(self._sqlCalendarStore)
+        returnValue(user_details)
 
+
+    @inlineCallbacks
+    def _calendarAvailabilityUpgrade_check(self, changed_users, unchanged_users, user_details):
+
         # Test results
         for user, av in user_details:
-            home = (yield self.homeUnderTest(name=user))
-            calendar = (yield self.calendarUnderTest(name="inbox", home=user))
-            self.assertEqual(home.getAvailability(), av)
-            self.assertTrue(PropertyName.fromElement(customxml.CalendarAvailability) not in calendar.properties())
+            if user in changed_users:
+                home = (yield self.homeUnderTest(name=user))
+                version = (yield home.dataVersion())
+                self.assertEqual(version, 5)
+                calendar = (yield self.calendarUnderTest(name="inbox", home=user))
+                self.assertEqual(home.getAvailability(), av)
+                self.assertTrue(PropertyName.fromElement(customxml.CalendarAvailability) not in calendar.properties())
+            else:
+                home = (yield self.homeUnderTest(name=user))
+                version = (yield home.dataVersion())
+                self.assertEqual(version, 4)
+                calendar = (yield self.calendarUnderTest(name="inbox", home=user))
+                self.assertEqual(home.getAvailability(), None)
+                self.assertTrue(PropertyName.fromElement(customxml.CalendarAvailability) in calendar.properties())
 
 
     @inlineCallbacks
-    def test_removeOtherPropertiesUpgrade(self):
+    def test_calendarAvailabilityUpgrade(self):
+        user_details = yield self._calendarAvailabilityUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore)
+        yield self._calendarAvailabilityUpgrade_check(("user01", "user02", "user03",), (), user_details)
 
+
+    @inlineCallbacks
+    def test_partialCalendarAvailabilityUpgrade(self):
+        user_details = yield self._calendarAvailabilityUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore, "user01")
+        yield self._calendarAvailabilityUpgrade_check(("user01",), ("user02", "user03",), user_details)
+
+
+    @inlineCallbacks
+    def test_combinedUpgrade(self):
+        user_details1 = yield self._calendarTimezoneUpgrade_setup()
+        user_details2 = yield self._calendarAvailabilityUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore)
+        yield self._calendarTimezoneUpgrade_check(("user01", "user02", "user03",), (), user_details1)
+        yield self._calendarAvailabilityUpgrade_check(("user01", "user02", "user03",), (), user_details2)
+
+
+    @inlineCallbacks
+    def test_partialCombinedUpgrade(self):
+        user_details1 = yield self._calendarTimezoneUpgrade_setup()
+        user_details2 = yield self._calendarAvailabilityUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore, "user01")
+        yield self._calendarTimezoneUpgrade_check(("user01",), ("user02", "user03",), user_details1)
+        yield self._calendarAvailabilityUpgrade_check(("user01",), ("user02", "user03",), user_details2)
+
+
+    @inlineCallbacks
+    def _removeOtherPropertiesUpgrade_setup(self):
+
         # Set dead property on calendar
         for user in ("user01", "user02",):
             calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
@@ -243,12 +319,55 @@
         for user in ("user01", "user02",):
             calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
             self.assertTrue(PropertyName.fromElement(element.ResourceID) in calendar.properties())
+
+        yield self.transactionUnderTest().updateCalendarserverValue("CALENDAR-DATAVERSION", "4")
+
         yield self.commit()
 
-        # Trigger upgrade
-        yield removeOtherProperties(self._sqlCalendarStore)
 
+    @inlineCallbacks
+    def _removeOtherPropertiesUpgrade_check(self, full=True):
+
         # Test results
         for user in ("user01", "user02",):
-            calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
-            self.assertTrue(PropertyName.fromElement(element.ResourceID) not in calendar.properties())
+            if full:
+                calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
+                self.assertTrue(PropertyName.fromElement(element.ResourceID) not in calendar.properties())
+                version = yield self.transactionUnderTest().calendarserverValue("CALENDAR-DATAVERSION")
+                self.assertEqual(int(version), 5)
+            else:
+                calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
+                self.assertTrue(PropertyName.fromElement(element.ResourceID) in calendar.properties())
+                version = yield self.transactionUnderTest().calendarserverValue("CALENDAR-DATAVERSION")
+                self.assertEqual(int(version), 4)
+
+
+    @inlineCallbacks
+    def test_removeOtherPropertiesUpgrade(self):
+        yield self._removeOtherPropertiesUpgrade_setup()
+        yield doUpgrade(self._sqlCalendarStore)
+        yield self._removeOtherPropertiesUpgrade_check()
+
+
+    @inlineCallbacks
+    def test_fullUpgrade(self):
+        self.patch(config, "UpgradeHomePrefix", "")
+        user_details1 = yield self._calendarTimezoneUpgrade_setup()
+        user_details2 = yield self._calendarAvailabilityUpgrade_setup()
+        yield self._removeOtherPropertiesUpgrade_setup()
+        yield doUpgrade(self._sqlCalendarStore)
+        yield self._calendarTimezoneUpgrade_check(("user01", "user02", "user03",), (), user_details1)
+        yield self._calendarAvailabilityUpgrade_check(("user01", "user02", "user03",), (), user_details2)
+        yield self._removeOtherPropertiesUpgrade_check()
+
+
+    @inlineCallbacks
+    def test_partialFullUpgrade(self):
+        self.patch(config, "UpgradeHomePrefix", "user01")
+        user_details1 = yield self._calendarTimezoneUpgrade_setup()
+        user_details2 = yield self._calendarAvailabilityUpgrade_setup()
+        yield self._removeOtherPropertiesUpgrade_setup()
+        yield doUpgrade(self._sqlCalendarStore)
+        yield self._calendarTimezoneUpgrade_check(("user01",), ("user02", "user03",), user_details1)
+        yield self._calendarAvailabilityUpgrade_check(("user01",), ("user02", "user03",), user_details2)
+        yield self._removeOtherPropertiesUpgrade_check(False)

Modified: CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/util.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/util.py	2013-10-31 21:40:44 UTC (rev 11860)
+++ CalendarServer/branches/users/gaya/sharedgroupfixes/txdav/common/datastore/upgrade/sql/upgrades/util.py	2013-10-31 22:23:31 UTC (rev 11861)
@@ -14,7 +14,7 @@
 # limitations under the License.
 ##
 
-from twext.enterprise.dal.syntax import Select, Delete, Update
+from twext.enterprise.dal.syntax import Select, Delete, Update, Count
 from twext.python.log import Logger
 from twisted.internet.defer import inlineCallbacks, returnValue
 from txdav.base.propertystore.base import PropertyName
@@ -44,6 +44,21 @@
 
 
 @inlineCallbacks
+def countProperty(txn, propelement):
+    pname = PropertyName.fromElement(propelement)
+
+    rp = schema.RESOURCE_PROPERTY
+    count = (yield Select(
+        [Count(rp.RESOURCE_ID), ],
+        From=rp,
+        Where=rp.NAME == pname.toString(),
+    ).on(txn))[0][0]
+
+    returnValue(count)
+
+
+
+ at inlineCallbacks
 def cleanPropertyStore():
     """
     We have manually manipulated the SQL property store by-passing the underlying implementation's caching
@@ -114,27 +129,43 @@
 
 
 @inlineCallbacks
-def doToEachHomeNotAtVersion(store, homeSchema, version, doIt):
+def doToEachHomeNotAtVersion(store, homeSchema, version, doIt, logStr, filterOwnerUID=None):
     """
     Do something to each home whose version column indicates it is older
-    than the specified version. Do this in batches as there may be a lot of work to do.
+    than the specified version. Do this in batches as there may be a lot of work to do. Also,
+    allow the GUID to be filtered to support a parallel mode of operation.
     """
 
+    txn = store.newTransaction("updateDataVersion")
+    where = homeSchema.DATAVERSION < version
+    if filterOwnerUID:
+        where = where.And(homeSchema.OWNER_UID.StartsWith(filterOwnerUID))
+    total = (yield Select(
+        [Count(homeSchema.RESOURCE_ID), ],
+        From=homeSchema,
+        Where=where,
+    ).on(txn))[0][0]
+    yield txn.commit()
+    count = 0
+
     while True:
 
+        logUpgradeStatus(logStr, count, total)
+
         # Get the next home with an old version
         txn = store.newTransaction("updateDataVersion")
         try:
             rows = yield Select(
                 [homeSchema.RESOURCE_ID, homeSchema.OWNER_UID, ],
                 From=homeSchema,
-                Where=homeSchema.DATAVERSION < version,
+                Where=where,
                 OrderBy=homeSchema.OWNER_UID,
                 Limit=1,
             ).on(txn)
 
             if len(rows) == 0:
                 yield txn.commit()
+                logUpgradeStatus("End {}".format(logStr), count, total)
                 returnValue(None)
 
             # Apply to the home
@@ -149,6 +180,26 @@
             yield txn.commit()
         except RuntimeError, e:
             f = Failure()
-            log.error("Failed to upgrade %s to %s: %s" % (homeSchema, version, e))
+            logUpgradeError(
+                logStr,
+                "Failed to upgrade {} to {}: {}".format(homeSchema, version, e)
+            )
             yield txn.abort()
             f.raiseException()
+
+        count += 1
+
+
+
+def logUpgradeStatus(title, count=None, total=None):
+    if total is None:
+        log.info("Database upgrade {title}", title=title)
+    else:
+        divisor = 1000 if total > 1000 else 100
+        if (divmod(count, divisor)[1] == 0) or (count == total):
+            log.info("Database upgrade {title}: {count} of {total}", title=title, count=count, total=total)
+
+
+
+def logUpgradeError(title, details):
+    log.error("Database upgrade {title} failed: {details}", title=title, details=details)
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20140312/72f655fa/attachment.html>


More information about the calendarserver-changes mailing list