[CalendarServer-changes] [11912] CalendarServer/branches/users/cdaboo/json

source_changes at macosforge.org source_changes at macosforge.org
Wed Mar 12 11:20:49 PDT 2014


Revision: 11912
          http://trac.calendarserver.org//changeset/11912
Author:   cdaboo at apple.com
Date:     2013-11-07 19:33:05 -0800 (Thu, 07 Nov 2013)
Log Message:
-----------
Merge from trunk.

Modified Paths:
--------------
    CalendarServer/branches/users/cdaboo/json/HACKING
    CalendarServer/branches/users/cdaboo/json/bin/_calendarserver_preamble.py
    CalendarServer/branches/users/cdaboo/json/calendarserver/accesslog.py
    CalendarServer/branches/users/cdaboo/json/calendarserver/push/amppush.py
    CalendarServer/branches/users/cdaboo/json/calendarserver/push/notifier.py
    CalendarServer/branches/users/cdaboo/json/calendarserver/push/test/test_notifier.py
    CalendarServer/branches/users/cdaboo/json/calendarserver/tap/caldav.py
    CalendarServer/branches/users/cdaboo/json/calendarserver/tap/util.py
    CalendarServer/branches/users/cdaboo/json/calendarserver/tools/shell/directory.py
    CalendarServer/branches/users/cdaboo/json/calendarserver/tools/test/deprovision/caldavd.plist
    CalendarServer/branches/users/cdaboo/json/calendarserver/tools/test/gateway/caldavd.plist
    CalendarServer/branches/users/cdaboo/json/calendarserver/tools/test/principals/caldavd.plist
    CalendarServer/branches/users/cdaboo/json/calendarserver/tools/upgrade.py
    CalendarServer/branches/users/cdaboo/json/conf/auth/accounts-test.xml
    CalendarServer/branches/users/cdaboo/json/conf/auth/augments.dtd
    CalendarServer/branches/users/cdaboo/json/conf/localservers-test.xml
    CalendarServer/branches/users/cdaboo/json/conf/localservers.xml
    CalendarServer/branches/users/cdaboo/json/conf/remoteservers-test.xml
    CalendarServer/branches/users/cdaboo/json/conf/remoteservers.xml
    CalendarServer/branches/users/cdaboo/json/conf/resources/caldavd-resources.plist
    CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/config.dist.plist
    CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/config.plist
    CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/population.py
    CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/sim.py
    CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/test_sim.py
    CalendarServer/branches/users/cdaboo/json/contrib/performance/sqlusage/requests/httpTests.py
    CalendarServer/branches/users/cdaboo/json/contrib/performance/sqlusage/sqlusage.py
    CalendarServer/branches/users/cdaboo/json/contrib/tools/protocolanalysis.py
    CalendarServer/branches/users/cdaboo/json/contrib/tools/request_monitor.py
    CalendarServer/branches/users/cdaboo/json/setup.py
    CalendarServer/branches/users/cdaboo/json/support/version.py
    CalendarServer/branches/users/cdaboo/json/twext/enterprise/dal/syntax.py
    CalendarServer/branches/users/cdaboo/json/twext/enterprise/dal/test/test_sqlsyntax.py
    CalendarServer/branches/users/cdaboo/json/twext/enterprise/fixtures.py
    CalendarServer/branches/users/cdaboo/json/twext/enterprise/queue.py
    CalendarServer/branches/users/cdaboo/json/twext/enterprise/test/test_queue.py
    CalendarServer/branches/users/cdaboo/json/twext/patches.py
    CalendarServer/branches/users/cdaboo/json/twext/protocols/test/test_memcache.py
    CalendarServer/branches/users/cdaboo/json/twext/who/aggregate.py
    CalendarServer/branches/users/cdaboo/json/twext/who/directory.py
    CalendarServer/branches/users/cdaboo/json/twext/who/expression.py
    CalendarServer/branches/users/cdaboo/json/twext/who/idirectory.py
    CalendarServer/branches/users/cdaboo/json/twext/who/index.py
    CalendarServer/branches/users/cdaboo/json/twext/who/test/test_aggregate.py
    CalendarServer/branches/users/cdaboo/json/twext/who/test/test_directory.py
    CalendarServer/branches/users/cdaboo/json/twext/who/test/test_util.py
    CalendarServer/branches/users/cdaboo/json/twext/who/test/test_xml.py
    CalendarServer/branches/users/cdaboo/json/twext/who/util.py
    CalendarServer/branches/users/cdaboo/json/twext/who/xml.py
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/config.py
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/augment.py
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/common.py
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/directory-principal-resource.html
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/directory.py
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/idirectory.py
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/ldapdirectory.py
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/principal.py
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/resource.py
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/augments-test-default.xml
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/augments-test.xml
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/resources/caldavd.plist
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/test_augment.py
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/test_directory.py
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/test_ldapdirectory.py
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/xmlaugmentsparser.py
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/extensions.py
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/ical.py
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/resource.py
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/scheduling_store/caldav/resource.py
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/stdconfig.py
    CalendarServer/branches/users/cdaboo/json/txdav/base/datastore/subpostgres.py
    CalendarServer/branches/users/cdaboo/json/txdav/base/datastore/util.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/file.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/schedule.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/addressmapping.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/caldav/delivery.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/caldav/scheduler.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/cuaddress.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/implicit.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/delivery.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/localservers.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/remoteservers.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/resource.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/scheduler.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/test/test_delivery.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/test/test_localservers.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/itip.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/scheduler.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/utils.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/sql.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/test/common.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/test/test_implicit.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/test/test_util.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/test/util.py
    CalendarServer/branches/users/cdaboo/json/txdav/caldav/icalendardirectoryservice.py
    CalendarServer/branches/users/cdaboo/json/txdav/carddav/datastore/test/common.py
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/file.py
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql.py
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/current-oracle-dialect.sql
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/current.sql
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_19_to_20.sql
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_25_to_26.sql
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_13_to_14.sql
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_25_to_26.sql
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_tables.py
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/test/util.py
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/test/test_upgrade.py
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrade.py
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/addressbook_upgrade_from_1_to_2.py
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_1_to_2.py
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_3_to_4.py
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_4_to_5.py
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/test/test_upgrade_from_3_to_4.py
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/test/test_upgrade_from_4_to_5.py
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/util.py

Added Paths:
-----------
    CalendarServer/branches/users/cdaboo/json/conf/auth/accounts-test-pod.xml
    CalendarServer/branches/users/cdaboo/json/conf/auth/augments-test-pod.xml
    CalendarServer/branches/users/cdaboo/json/conf/auth/proxies-test-pod.xml
    CalendarServer/branches/users/cdaboo/json/conf/auth/resources-test-pod.xml
    CalendarServer/branches/users/cdaboo/json/conf/caldavd-test-podA.plist
    CalendarServer/branches/users/cdaboo/json/conf/caldavd-test-podB.plist
    CalendarServer/branches/users/cdaboo/json/conf/localservers.dtd
    CalendarServer/branches/users/cdaboo/json/conf/remoteservers.dtd
    CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/clients.plist
    CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/
    CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/events-only.plist
    CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-accepts.plist
    CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-only-recurring.plist
    CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-only.plist
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/old/oracle-dialect/v26.sql
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/old/postgres-dialect/v26.sql
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_26_to_27.sql
    CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_26_to_27.sql

Removed Paths:
-------------
    CalendarServer/branches/users/cdaboo/json/bin/calendarserver_make_partition
    CalendarServer/branches/users/cdaboo/json/conf/caldavd-partitioning-primary.plist
    CalendarServer/branches/users/cdaboo/json/conf/caldavd-partitioning-secondary.plist
    CalendarServer/branches/users/cdaboo/json/conf/servers.dtd
    CalendarServer/branches/users/cdaboo/json/conf/servertoserver.dtd
    CalendarServer/branches/users/cdaboo/json/conf/sudoers.plist
    CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/events-only.plist
    CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-accepts.plist
    CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-only-recurring.plist
    CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-only.plist
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/sudoers.plist
    CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/sudoers2.plist

Property Changed:
----------------
    CalendarServer/branches/users/cdaboo/json/


Property changes on: CalendarServer/branches/users/cdaboo/json
___________________________________________________________________
Modified: svn:mergeinfo
   - /CalDAVTester/trunk:11193-11198
/CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/release/CalendarServer-4.3-dev:10180-10190,10192
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/fix-no-ischedule:11612
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
/CalendarServer/branches/users/cdaboo/managed-attachments:9985-10145
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/store-scheduling:10876-11129
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/gaya/sharedgroups-3:11088-11204
/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/digest-auth-redux:10624-10635
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/enforce-max-requests:11640-11643
/CalendarServer/branches/users/glyph/hang-fix:11465-11491
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/launchd-wrapper-bis:11413-11436
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/log-cleanups:11691-11731
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/one-home-list-api:10048-10073
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/q:9560-9688
/CalendarServer/branches/users/glyph/queue-locking-and-timing:10204-10289
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/start-service-start-loop:11060-11065
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/unshare-when-access-revoked:10562-10595
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/warning-cleanups:11347-11357
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/sagen/testing:10827-10851,10853-10855
/CalendarServer/branches/users/wsanchez/transations:5515-5593
/CalendarServer/trunk:11622-11773
   + /CalDAVTester/trunk:11193-11198
/CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/release/CalendarServer-4.3-dev:10180-10190,10192
/CalendarServer/branches/release/CalendarServer-5.1-dev:11846
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/fix-no-ischedule:11607-11871
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
/CalendarServer/branches/users/cdaboo/managed-attachments:9985-10145
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/performance-tweaks:11824-11836
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/reverse-proxy-pods:11875-11900
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/store-scheduling:10876-11129
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/gaya/sharedgroups-3:11088-11204
/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/digest-auth-redux:10624-10635
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/enforce-max-requests:11640-11643
/CalendarServer/branches/users/glyph/hang-fix:11465-11491
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/launchd-wrapper-bis:11413-11436
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/log-cleanups:11691-11731
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/one-home-list-api:10048-10073
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/q:9560-9688
/CalendarServer/branches/users/glyph/queue-locking-and-timing:10204-10289
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/start-service-start-loop:11060-11065
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/unshare-when-access-revoked:10562-10595
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/warning-cleanups:11347-11357
/CalendarServer/branches/users/glyph/whenNotProposed:11881-11897
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/sagen/testing:10827-10851,10853-10855
/CalendarServer/branches/users/wsanchez/transations:5515-5593
/CalendarServer/trunk:11622-11901

Modified: CalendarServer/branches/users/cdaboo/json/HACKING
===================================================================
--- CalendarServer/branches/users/cdaboo/json/HACKING	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/HACKING	2013-11-08 03:33:05 UTC (rev 11912)
@@ -144,10 +144,6 @@
 
 PEP-8 items we do not follow:
 
- * Lines need not be limited to 79 spaces, but longer lines are
-   undesirable.  If you can easily do so, try to keep lines under 80
-   columns.
-
  * PEP-8 recommends using a backslash to break long lines up:
 
    ::
@@ -181,14 +177,6 @@
 
    Because that's just silly.
 
- * Lining up assignments is OK, within reason:
-
-   ::
-
-     cars       =  4
-     motorbikes =  8
-     bicycles   = 18
-
 Additions:
 
  * Close parentheses and brackets such as ``()``, ``[]`` and ``{}`` at the
@@ -248,9 +236,8 @@
 
      process = subprocess.Popen(...)
 
-   This makes code shorter and removes the runtime indirection (which
-   can be relevant in tight loops). It also makes it easier to replace
-   one implementation with another.
+   This makes code shorter and makes it easier to replace one implementation
+   with another.
 
  * All files should have an ``__all__`` specification.  Put them at the
    top of the file, before imports (PEP-8 puts them at the top, but
@@ -259,8 +246,8 @@
 
  * It is more important that symbol names are meaningful than it is
    that they be concise.  ``x`` is rarely an appropriate name for a
-   variable.  ``transmogrifierStatus`` is more useful to the reader
-   than ``trmgStat``; avoid contractions.
+   variable.  Avoid contractions: ``transmogrifierStatus`` is more useful
+   to the reader than ``trmgStat``.
 
  * A deferred that will be immediately returned may be called ``d``:
 
@@ -271,9 +258,7 @@
      d.addErrback(onError)
      return d
 
- * We prefer ``inlineCallbacks`` over ``deferredGenerator``.
-   ``inlineCallbacks`` are more readable, and we do not support Python
-   versions old enough that ``deferredGenerator`` would be necessary.
+ * Do not use ``deferredGenerator``.  Use ``inlineCallbacks`` instead.
 
  * That said, avoid using ``inlineCallbacks`` when chaining deferreds
    is straightforward, as they are more expensive.  Use
@@ -306,17 +291,29 @@
    Use of underscores is reserved for implied dispatching and the like
    (eg. ``http_FOO()``).  See the Twisted Coding Standard for details.
 
- * Always use a tuple when using ``%``-formatting, even when only one
-   value is being provided:
+ * Do not use ``%``-formatting:
 
    ::
 
      error = "Unexpected value: %s" % (value,)
 
-   Do not use the non-tuple form:
+   Use PEP-3101 formatting instead:
 
    ::
 
+     error = "Unexpected value: {value}".format(value=value)
+
+ * If you must use ``%``-formatting for some reason, always use a tuple as
+   the format argument, even when only one value is being provided:
+
+   ::
+
+     error = "Unexpected value: %s" % (value,)
+
+   Never use the non-tuple form:
+
+   ::
+
      error = "Unexpected value: %s" % value
 
    Which is allowed in Python, but results in a programming error if
@@ -329,8 +326,9 @@
      numbers = (1,2,3,) # No
      numbers = (1,2,3)  # Yes
 
-   It's desirable on multiple lines, though, as that makes re-ordering
-   items easy, and avoids a diff on the last line when adding another:
+   The trailing comma is desirable on multiple lines, though, as that makes
+   re-ordering items easy, and avoids a diff on the last line when adding
+   another:
 
    ::
 
@@ -368,11 +366,11 @@
 ==============
 
  * If a callable is going to return a Deferred some of the time, it
-   should probably return a deferred all of the time.  Return
-   ``succeed(value)`` instead of ``value`` if necessary.  This avoids
-   forcing the caller to check as to whether the value is a deferred
-   or not (eg. by using ``maybeDeferred()``), which is both annoying
-   to code and potentially expensive at runtime.
+   should return a deferred all of the time.  Return ``succeed(value)``
+   instead of ``value`` if necessary.  This avoids forcing the caller
+   to check as to whether the value is a deferred or not (eg. by using
+   ``maybeDeferred()``), which is both annoying to code and potentially
+   expensive at runtime.
 
  * Be proactive about closing files and file-like objects.
 

Modified: CalendarServer/branches/users/cdaboo/json/bin/_calendarserver_preamble.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/bin/_calendarserver_preamble.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/bin/_calendarserver_preamble.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -60,7 +60,6 @@
     noConfigOption = [
         "calendarserver_bootstrap_database",
         "calendarserver_load_augmentdb",
-        "calendarserver_make_partition",
         "calendarserver_manage_augments",
         "calendarserver_manage_postgres",
         "calendarserver_manage_timezones",

Deleted: CalendarServer/branches/users/cdaboo/json/bin/calendarserver_make_partition
===================================================================
--- CalendarServer/branches/users/cdaboo/json/bin/calendarserver_make_partition	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/bin/calendarserver_make_partition	2013-11-08 03:33:05 UTC (rev 11912)
@@ -1,32 +0,0 @@
-#!/usr/bin/env python
-##
-# Copyright (c) 2010-2013 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-import sys
-
-#PYTHONPATH
-
-if __name__ == "__main__":
-    if "PYTHONPATH" in globals():
-        sys.path.insert(0, PYTHONPATH)
-    else:
-        try:
-            import _calendarserver_preamble
-        except ImportError:
-            sys.exc_clear()
-
-    from calendarserver.tools.makepartition import main
-    main()

Modified: CalendarServer/branches/users/cdaboo/json/calendarserver/accesslog.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/calendarserver/accesslog.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/calendarserver/accesslog.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -173,7 +173,7 @@
                     formatArgs["t"] = (nowtime - request.timeStamps[0][1]) * 1000
 
                 if hasattr(request, "extendedLogItems"):
-                    for k, v in request.extendedLogItems.iteritems():
+                    for k, v in sorted(request.extendedLogItems.iteritems(), key=lambda x: x[0]):
                         k = str(k).replace('"', "%22")
                         v = str(v).replace('"', "%22")
                         if " " in v:

Modified: CalendarServer/branches/users/cdaboo/json/calendarserver/push/amppush.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/calendarserver/push/amppush.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/calendarserver/push/amppush.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -48,7 +48,8 @@
 # AMP Commands sent to client (and forwarded to Master)
 
 class NotificationForID(amp.Command):
-    arguments = [('id', amp.String()), ('dataChangedTimestamp', amp.Integer())]
+    arguments = [('id', amp.String()),
+                 ('dataChangedTimestamp', amp.Integer(optional=True))]
     response = [('status', amp.String())]
 
 

Modified: CalendarServer/branches/users/cdaboo/json/calendarserver/push/notifier.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/calendarserver/push/notifier.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/calendarserver/push/notifier.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -84,10 +84,13 @@
 
 
     @inlineCallbacks
-    def notify(self):
+    def notify(self, txn):
         """
         Send the notification. For a home object we just push using the home id. For a home
         child we push both the owner home id and the owned home child id.
+
+        @param txn: The transaction to create the work item with
+        @type txn: L{CommonStoreTransaction}
         """
         # Push ids from the store objects are a tuple of (prefix, name,) and we need to compose that
         # into a single token.
@@ -100,7 +103,7 @@
         for prefix, id in ids:
             if self._notify:
                 self.log.debug("Notifications are enabled: %s %s/%s" % (self._storeObject, prefix, id,))
-                yield self._notifierFactory.send(prefix, id)
+                yield self._notifierFactory.send(prefix, id, txn)
             else:
                 self.log.debug("Skipping notification for: %s %s/%s" % (self._storeObject, prefix, id,))
 
@@ -147,11 +150,12 @@
 
 
     @inlineCallbacks
-    def send(self, prefix, id):
-        txn = self.store.newTransaction()
+    def send(self, prefix, id, txn):
+        """
+        Enqueue a push notification work item on the provided transaction.
+        """
         notBefore = datetime.datetime.utcnow() + datetime.timedelta(seconds=self.coalesceSeconds)
         yield txn.enqueue(PushNotificationWork, pushID=self.pushKeyForId(prefix, id), notBefore=notBefore)
-        yield txn.commit()
 
 
     def newNotifier(self, storeObject):

Modified: CalendarServer/branches/users/cdaboo/json/calendarserver/push/test/test_notifier.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/calendarserver/push/test/test_notifier.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/calendarserver/push/test/test_notifier.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -169,8 +169,8 @@
 
         home = yield self.homeUnderTest()
         yield home.notifyChanged()
+        self.assertEquals(self.notifierFactory.history, ["/CalDAV/example.com/home1/"])
         yield self.commit()
-        self.assertEquals(self.notifierFactory.history, ["/CalDAV/example.com/home1/"])
 
 
     @inlineCallbacks
@@ -178,11 +178,11 @@
 
         calendar = yield self.calendarUnderTest()
         yield calendar.notifyChanged()
-        yield self.commit()
         self.assertEquals(
             set(self.notifierFactory.history),
             set(["/CalDAV/example.com/home1/", "/CalDAV/example.com/home1/calendar_1/"])
         )
+        yield self.commit()
 
 
     @inlineCallbacks
@@ -191,7 +191,6 @@
         calendar = yield self.calendarUnderTest()
         home2 = yield self.homeUnderTest(name="home2")
         yield calendar.shareWith(home2, _BIND_MODE_WRITE)
-        yield self.commit()
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -200,11 +199,11 @@
                 "/CalDAV/example.com/home2/"
             ])
         )
+        yield self.commit()
 
         calendar = yield self.calendarUnderTest()
         home2 = yield self.homeUnderTest(name="home2")
         yield calendar.unshareWith(home2)
-        yield self.commit()
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -213,6 +212,7 @@
                 "/CalDAV/example.com/home2/"
             ])
         )
+        yield self.commit()
 
 
     @inlineCallbacks
@@ -226,11 +226,11 @@
 
         shared = yield self.calendarUnderTest(home="home2", name=shareName)
         yield shared.notifyChanged()
-        yield self.commit()
         self.assertEquals(
             set(self.notifierFactory.history),
             set(["/CalDAV/example.com/home1/", "/CalDAV/example.com/home1/calendar_1/"])
         )
+        yield self.commit()
 
 
     @inlineCallbacks
@@ -238,8 +238,8 @@
 
         notifications = yield self.transactionUnderTest().notificationsWithUID("home1")
         yield notifications.notifyChanged()
-        yield self.commit()
         self.assertEquals(
             set(self.notifierFactory.history),
             set(["/CalDAV/example.com/home1/", "/CalDAV/example.com/home1/notification/"])
         )
+        yield self.commit()

Modified: CalendarServer/branches/users/cdaboo/json/calendarserver/tap/caldav.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/calendarserver/tap/caldav.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/calendarserver/tap/caldav.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -401,6 +401,14 @@
         print("Reading configuration from file: %s" % (self["config"],))
 
         config.load(self["config"])
+
+        for path in config.getProvider().importedFiles:
+            print("Imported configuration from file: '%s'" % (path,))
+        for path in config.getProvider().includedFiles:
+            print("Adding configuration from file: '%s'" % (path,))
+        for path in config.getProvider().missingFiles:
+            print("Missing configuration file: '%s'" % (path,))
+
         config.updateDefaults(self.overrides)
 
 
@@ -1212,6 +1220,28 @@
             else:
                 groupCacher = None
 
+            # Optionally enable Manhole access
+            if config.Manhole.Enabled:
+                try:
+                    from twisted.conch.manhole_tap import makeService as manholeMakeService
+                    portString = "tcp:%d:interface=127.0.0.1" % (config.Manhole.StartingPortNumber,)
+                    manholeService = manholeMakeService({
+                        "sshPort" : None,
+                        "telnetPort" : portString,
+                        "namespace" : {
+                            "config" : config,
+                            "service" : result,
+                            "store" : store,
+                            "directory" : directory,
+                            },
+                        "passwd" : config.Manhole.PasswordFilePath,
+                    })
+                    manholeService.setServiceParent(result)
+                    # Using print(because logging isn't ready at this point)
+                    print("Manhole access enabled: %s" % (portString,))
+                except ImportError:
+                    print("Manhole access could not enabled because manhole_tap could not be imported")
+
             def decorateTransaction(txn):
                 txn._pushDistributor = pushDistributor
                 txn._rootResource = result.rootResource
@@ -1407,7 +1437,9 @@
 
                 # Conditionally stop after upgrade at this point
                 pps.addStep(
-                    QuitAfterUpgradeStep(config.StopAfterUpgradeTriggerFile)
+                    QuitAfterUpgradeStep(
+                        config.StopAfterUpgradeTriggerFile or config.UpgradeHomePrefix
+                    )
                 )
 
                 pps.addStep(

Modified: CalendarServer/branches/users/cdaboo/json/calendarserver/tap/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/calendarserver/tap/util.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/calendarserver/tap/util.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -95,6 +95,7 @@
 from txdav.common.datastore.sql import CommonDataStore as CommonSQLDataStore
 from txdav.common.datastore.file import CommonDataStore as CommonFileDataStore
 from txdav.common.datastore.sql import current_sql_schema
+from txdav.common.datastore.upgrade.sql.upgrade import NotAllowedToUpgrade
 from twext.python.filepath import CachingFilePath
 from urllib import quote
 from twisted.python.usage import UsageError
@@ -635,11 +636,23 @@
             addSystemEventTrigger("after", "startup", timezoneStdService.onStartup)
 
     #
-    # iSchedule service
+    # iSchedule service for podding
     #
+    if config.Servers.Enabled:
+        log.info("Setting up iSchedule podding inbox resource: {cls}", cls=iScheduleResourceClass)
+
+        ischedule = iScheduleResourceClass(
+            root,
+            newStore,
+            podding=True
+        )
+        root.putChild(config.Servers.InboxName, ischedule)
+
+    #
+    # iSchedule service (not used for podding)
+    #
     if config.Scheduling.iSchedule.Enabled:
-        log.info("Setting up iSchedule inbox resource: {cls}",
-                      cls=iScheduleResourceClass)
+        log.info("Setting up iSchedule inbox resource: {cls}", cls=iScheduleResourceClass)
 
         ischedule = iScheduleResourceClass(
             root,
@@ -650,8 +663,7 @@
         # Do DomainKey resources
         DKIMUtils.validConfiguration(config)
         if config.Scheduling.iSchedule.DKIM.Enabled:
-            log.info("Setting up domainkey resource: {res}",
-                res=DomainKeyResource)
+            log.info("Setting up domainkey resource: {res}", res=DomainKeyResource)
             domain = config.Scheduling.iSchedule.DKIM.Domain if config.Scheduling.iSchedule.DKIM.Domain else config.ServerHostName
             dk = DomainKeyResource(
                 domain,
@@ -1077,7 +1089,8 @@
 
 
     def defaultStepWithFailure(self, failure):
-        log.failure("Step failure", failure=failure)
+        if failure.type != NotAllowedToUpgrade:
+            log.failure("Step failure", failure=failure)
         return failure
 
     # def protectStep(self, callback):

Modified: CalendarServer/branches/users/cdaboo/json/calendarserver/tools/shell/directory.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/calendarserver/tools/shell/directory.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/calendarserver/tools/shell/directory.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -41,6 +41,7 @@
     returnValue(sorted(records, key=operator.attrgetter("fullName")))
 
 
+
 @inlineCallbacks
 def recordInfo(directory, record):
     """
@@ -58,8 +59,9 @@
     add("Proxy access"     , (yield recordProxyAccessInfo(directory, record)))
 
     returnValue("\n".join(info))
-        
 
+
+
 def recordBasicInfo(directory, record):
     """
     Basic information for a record.
@@ -87,15 +89,15 @@
     for cua in record.calendarUserAddresses:
         add("Calendar User Address", cua)
 
-    add("Server ID"           , record.serverID              )
-    add("Partition ID"        , record.partitionID           )
-    add("Enabled"             , record.enabled               )
-    add("Enabled for Calendar", record.enabledForCalendaring )
+    add("Server ID"           , record.serverID)
+    add("Enabled"             , record.enabled)
+    add("Enabled for Calendar", record.enabledForCalendaring)
     add("Enabled for Contacts", record.enabledForAddressBooks)
 
     return succeed(table.toString())
 
 
+
 def recordGroupMembershipInfo(directory, record):
     """
     Group membership info for a record.
@@ -109,7 +111,7 @@
         return succeed(None)
 
     rows = sorted(rows,
-        key = lambda row: (row[1], row[2])
+        key=lambda row: (row[1], row[2])
     )
 
     table = Table()
@@ -120,6 +122,7 @@
     return succeed(table.toString())
 
 
+
 @inlineCallbacks
 def recordProxyAccessInfo(directory, record):
     """
@@ -154,7 +157,7 @@
         returnValue(None)
 
     rows = sorted(rows,
-        key = lambda row: (row[1], row[2], row[4])
+        key=lambda row: (row[1], row[2], row[4])
     )
 
     table = Table()
@@ -165,6 +168,7 @@
     returnValue(table.toString())
 
 
+
 def summarizeRecords(directory, records):
     table = Table()
 

Modified: CalendarServer/branches/users/cdaboo/json/calendarserver/tools/test/deprovision/caldavd.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/json/calendarserver/tools/test/deprovision/caldavd.plist	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/calendarserver/tools/test/deprovision/caldavd.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -191,7 +191,6 @@
         Augments for the directory service records to add calendar specific attributes.
 
         A variety of augment services are available for use.
-        When using a partitioned server, a service that can be accessed from each host will be needed.
       -->
 
     <!-- XML File Augment Service -->

Modified: CalendarServer/branches/users/cdaboo/json/calendarserver/tools/test/gateway/caldavd.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/json/calendarserver/tools/test/gateway/caldavd.plist	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/calendarserver/tools/test/gateway/caldavd.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -203,7 +203,6 @@
         Augments for the directory service records to add calendar specific attributes.
 
         A variety of augment services are available for use.
-        When using a partitioned server, a service that can be accessed from each host will be needed.
       -->
 
     <!-- XML File Augment Service -->

Modified: CalendarServer/branches/users/cdaboo/json/calendarserver/tools/test/principals/caldavd.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/json/calendarserver/tools/test/principals/caldavd.plist	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/calendarserver/tools/test/principals/caldavd.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -195,7 +195,6 @@
         Augments for the directory service records to add calendar specific attributes.
 
         A variety of augment services are available for use.
-        When using a partitioned server, a service that can be accessed from each host will be needed.
       -->
 
     <!-- XML File Augment Service -->

Modified: CalendarServer/branches/users/cdaboo/json/calendarserver/tools/upgrade.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/calendarserver/tools/upgrade.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/calendarserver/tools/upgrade.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -82,6 +82,7 @@
 
     optParameters = [
         ['config', 'f', DEFAULT_CONFIG_FILE, "Specify caldavd.plist configuration path."],
+        ['prefix', 'x', "", "Only upgrade homes with the specified GUID prefix - partial upgrade only."],
     ]
 
     def __init__(self):
@@ -142,11 +143,17 @@
         """
         Immediately stop.  The upgrade will have been run before this.
         """
-        # If we get this far the database is OK
-        if self.options["status"]:
-            self.output.write("Database OK.\n")
+        if self.store is None:
+            if self.options["status"]:
+                self.output.write("Upgrade needed.\n")
+            else:
+                self.output.write("Upgrade failed.\n")
         else:
-            self.output.write("Upgrade complete, shutting down.\n")
+            # If we get this far the database is OK
+            if self.options["status"]:
+                self.output.write("Database OK.\n")
+            else:
+                self.output.write("Upgrade complete, shutting down.\n")
         UpgraderService.started = True
 
         from twisted.internet import reactor
@@ -191,9 +198,11 @@
             data.MergeUpgrades = True
         config.addPostUpdateHooks([setMerge])
 
+
     def makeService(store):
         return UpgraderService(store, options, output, reactor, config)
 
+
     def onlyUpgradeEvents(eventDict):
         text = formatEvent(eventDict)
         output.write(logDateString() + " " + text + "\n")
@@ -203,14 +212,19 @@
         log.publisher.levels.setLogLevelForNamespace(None, LogLevel.debug)
         addObserver(onlyUpgradeEvents)
 
+
     def customServiceMaker():
         customService = CalDAVServiceMaker()
         customService.doPostImport = options["postprocess"]
         return customService
 
+
     def _patchConfig(config):
         config.FailIfUpgradeNeeded = options["status"]
+        if options["prefix"]:
+            config.UpgradeHomePrefix = options["prefix"]
 
+
     def _onShutdown():
         if not UpgraderService.started:
             print("Failed to start service.")

Copied: CalendarServer/branches/users/cdaboo/json/conf/auth/accounts-test-pod.xml (from rev 11901, CalendarServer/trunk/conf/auth/accounts-test-pod.xml)
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/auth/accounts-test-pod.xml	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/json/conf/auth/accounts-test-pod.xml	2013-11-08 03:33:05 UTC (rev 11912)
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<accounts realm="Test Realm">
+  <user>
+    <uid>admin</uid>
+    <guid>admin</guid>
+    <password>admin</password>
+    <name>Super User</name>
+    <first-name>Super</first-name>
+    <last-name>User</last-name>
+  </user>
+  <user repeat="101">
+    <uid>user%02d</uid>
+    <uid>User %02d</uid>
+    <guid>user%02d</guid>
+    <password>user%02d</password>
+    <name>User %02d</name>
+    <first-name>User</first-name>
+    <last-name>%02d</last-name>
+    <email-address>user%02d at example.com</email-address>
+  </user>
+  <user repeat="101">
+    <uid>puser%02d</uid>
+    <uid>Puser %02d</uid>
+    <guid>puser%02d</guid>
+    <password>puser%02d</password>
+    <name>Puser %02d</name>
+    <first-name>Puser</first-name>
+    <last-name>%02d</last-name>
+    <email-address>puser%02d at example.com</email-address>
+  </user>
+</accounts>

Modified: CalendarServer/branches/users/cdaboo/json/conf/auth/accounts-test.xml
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/auth/accounts-test.xml	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/conf/auth/accounts-test.xml	2013-11-08 03:33:05 UTC (rev 11912)
@@ -89,7 +89,7 @@
     <first-name>ま</first-name>
     <last-name>だ</last-name>
   </user>
-  <user repeat="99">
+  <user repeat="101">
     <uid>user%02d</uid>
     <uid>User %02d</uid>
     <guid>user%02d</guid>

Copied: CalendarServer/branches/users/cdaboo/json/conf/auth/augments-test-pod.xml (from rev 11901, CalendarServer/trunk/conf/auth/augments-test-pod.xml)
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/auth/augments-test-pod.xml	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/json/conf/auth/augments-test-pod.xml	2013-11-08 03:33:05 UTC (rev 11912)
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!DOCTYPE augments SYSTEM "augments.dtd">
+
+<augments>
+  <record>
+    <uid>Default</uid>
+    <enable>true</enable>
+    <server-id>A</server-id>
+    <enable-calendar>true</enable-calendar>
+    <enable-addressbook>true</enable-addressbook>
+  </record>
+  <record repeat="101">
+    <uid>puser%02d</uid>
+    <enable>true</enable>
+    <server-id>B</server-id>
+    <enable-calendar>true</enable-calendar>
+    <enable-addressbook>true</enable-addressbook>
+  </record>
+</augments>

Modified: CalendarServer/branches/users/cdaboo/json/conf/auth/augments.dtd
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/auth/augments.dtd	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/conf/auth/augments.dtd	2013-11-08 03:33:05 UTC (rev 11912)
@@ -19,7 +19,7 @@
   <!ELEMENT record (
   		uid,
   		enable,
-  		(server-id, partition-id?)?,
+  		server-id?,
   		enable-calendar?,
   		enable-addressbook?,
   		enable-login?,
@@ -32,7 +32,6 @@
   <!ELEMENT uid                (#PCDATA)>
   <!ELEMENT enable             (#PCDATA)>
   <!ELEMENT server-id          (#PCDATA)>
-  <!ELEMENT partition-id       (#PCDATA)>
   <!ELEMENT enable-calendar    (#PCDATA)>
   <!ELEMENT enable-addressbook (#PCDATA)>
   <!ELEMENT enable-login       (#PCDATA)>

Copied: CalendarServer/branches/users/cdaboo/json/conf/auth/proxies-test-pod.xml (from rev 11901, CalendarServer/trunk/conf/auth/proxies-test-pod.xml)
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/auth/proxies-test-pod.xml	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/json/conf/auth/proxies-test-pod.xml	2013-11-08 03:33:05 UTC (rev 11912)
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+Copyright (c) 2009-2013 Apple Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+ -->
+
+<!DOCTYPE proxies SYSTEM "proxies.dtd">
+
+<proxies>
+</proxies>

Copied: CalendarServer/branches/users/cdaboo/json/conf/auth/resources-test-pod.xml (from rev 11901, CalendarServer/trunk/conf/auth/resources-test-pod.xml)
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/auth/resources-test-pod.xml	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/json/conf/auth/resources-test-pod.xml	2013-11-08 03:33:05 UTC (rev 11912)
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<accounts realm="Test Realm">
+</accounts>

Deleted: CalendarServer/branches/users/cdaboo/json/conf/caldavd-partitioning-primary.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/caldavd-partitioning-primary.plist	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/conf/caldavd-partitioning-primary.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -1,85 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-    Copyright (c) 2006-2013 Apple Inc. All rights reserved.
-
-    Licensed under the Apache License, Version 2.0 (the "License");
-    you may not use this file except in compliance with the License.
-    You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-  -->
-
-<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-  <dict>
-
-    <!--  Servers -->
-    <key>Servers</key>
-    <dict>
-	    <key>Enabled</key>
-	    <true/>
-	    <key>ConfigFile</key>
-	    <string>localservers.xml</string>
-	    <key>MaxClients</key>
-	    <integer>5</integer>
-	</dict>
-    <key>ServerPartitionID</key>
-    <string>00001</string>
-
-    <!-- PostgreSQL ProxyDB Service -->
-    <key>ProxyDBService</key>
-    <dict>
-      <key>type</key>
-      <string>twistedcaldav.directory.calendaruserproxy.ProxyPostgreSQLDB</string>
-      
-      <key>params</key>
-      <dict>
-        <key>host</key>
-        <string>localhost</string>
-        <key>database</key>
-        <string>proxies</string>
-      </dict>
-    </dict>
-
-    <!-- Support for Memcached -->
-    <key>Memcached</key>
-    <dict>
-	  <key>Pools</key>
-		<dict>
-		  <key>CommonToAllNodes</key>
-		  <dict>
-		    <key>ClientEnabled</key>
-		    <true/>
-		    <key>ServerEnabled</key>
-		    <true/>
-		    <key>BindAddress</key>
-		    <string>localhost</string>
-		    <key>Port</key>
-		    <integer>11311</integer>
-		    <key>HandleCacheTypes</key>
-		    <array>
-		      <string>ProxyDB</string>
-		      <string>PrincipalToken</string>
-		      <string>DIGESTCREDENTIALS</string>
-		    </array>
-		  </dict>
-		</dict>
-      <key>MaxClients</key>
-      <integer>5</integer>
-      <key>memcached</key>
-      <string>../memcached/_root/bin/memcached</string> <!-- Find in PATH -->
-      <key>Options</key>
-      <array>
-        <!--<string>-vv</string>-->
-      </array>
-    </dict>
-
-  </dict>
-</plist>

Deleted: CalendarServer/branches/users/cdaboo/json/conf/caldavd-partitioning-secondary.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/caldavd-partitioning-secondary.plist	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/conf/caldavd-partitioning-secondary.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -1,85 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-    Copyright (c) 2006-2013 Apple Inc. All rights reserved.
-
-    Licensed under the Apache License, Version 2.0 (the "License");
-    you may not use this file except in compliance with the License.
-    You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-  -->
-
-<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-  <dict>
-
-    <!--  Servers -->
-    <key>Servers</key>
-    <dict>
-	    <key>Enabled</key>
-	    <true/>
-	    <key>ConfigFile</key>
-	    <string>localservers.xml</string>
-	    <key>MaxClients</key>
-	    <integer>5</integer>
-	</dict>
-    <key>ServerPartitionID</key>
-    <string>00002</string>
-
-    <!-- PostgreSQL ProxyDB Service -->
-    <key>ProxyDBService</key>
-    <dict>
-      <key>type</key>
-      <string>twistedcaldav.directory.calendaruserproxy.ProxyPostgreSQLDB</string>
-      
-      <key>params</key>
-      <dict>
-        <key>host</key>
-        <string>localhost</string>
-        <key>database</key>
-        <string>proxies</string>
-      </dict>
-    </dict>
-
-    <!-- Support for Memcached -->
-    <key>Memcached</key>
-    <dict>
-	  <key>Pools</key>
-		<dict>
-		  <key>CommonToAllNodes</key>
-		  <dict>
-		    <key>ClientEnabled</key>
-		    <true/>
-		    <key>ServerEnabled</key>
-		    <false/>
-		    <key>BindAddress</key>
-		    <string>localhost</string>
-		    <key>Port</key>
-		    <integer>11311</integer>
-		    <key>HandleCacheTypes</key>
-		    <array>
-		      <string>ProxyDB</string>
-		      <string>PrincipalToken</string>
-		      <string>DIGESTCREDENTIALS</string>
-		    </array>
-		  </dict>
-		</dict>
-      <key>MaxClients</key>
-      <integer>5</integer>
-      <key>memcached</key>
-      <string>../memcached/_root/bin/memcached</string> <!-- Find in PATH -->
-      <key>Options</key>
-      <array>
-        <!--<string>-vv</string>-->
-      </array>
-    </dict>
-
-  </dict>
-</plist>

Copied: CalendarServer/branches/users/cdaboo/json/conf/caldavd-test-podA.plist (from rev 11901, CalendarServer/trunk/conf/caldavd-test-podA.plist)
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/caldavd-test-podA.plist	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/json/conf/caldavd-test-podA.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -0,0 +1,158 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+    Copyright (c) 2006-2009 Apple Inc. All rights reserved.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+  -->
+
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+  <dict>
+
+    <!-- Import a parent config before this one -->
+    <key>ImportConfig</key>
+    <string>./conf/caldavd-test.plist</string>
+
+    <!-- HTTP port [0 = disable HTTP] -->
+    <key>HTTPPort</key>
+    <integer>8008</integer>
+
+    <!-- SSL port [0 = disable HTTPS] -->
+    <!-- (Must also configure SSLCertificate and SSLPrivateKey below) -->
+    <key>SSLPort</key>
+    <integer>8443</integer>
+
+    <!-- List of port numbers to bind to for HTTP [empty = same as "Port"] -->
+    <key>BindHTTPPorts</key>
+    <array>
+    </array>
+
+    <!-- List of port numbers to bind to for SSL [empty = same as "SSLPort"] -->
+    <key>BindSSLPorts</key>
+    <array>
+    </array>
+
+    <!-- Server root -->
+    <key>ServerRoot</key>
+    <string>./data/podA</string>
+
+    <!-- Configuration root -->
+    <key>ConfigRoot</key>
+    <string>./conf</string>
+
+    <!-- XML File Directory Service -->
+    <key>DirectoryService</key>
+    <dict>
+      <key>type</key>
+      <string>twistedcaldav.directory.xmlfile.XMLDirectoryService</string>
+      
+      <key>params</key>
+      <dict>
+        <key>xmlFile</key>
+        <string>./conf/auth/accounts-test-pod.xml</string>
+      </dict>
+    </dict>
+    
+    <!-- Resource and Location Service -->
+    <key>ResourceService</key>
+    <dict>
+      <key>Enabled</key>
+      <true/>
+      <key>type</key>
+      <string>twistedcaldav.directory.xmlfile.XMLDirectoryService</string>
+      
+      <key>params</key>
+      <dict>
+        <key>xmlFile</key>
+        <string>./conf/auth/resources-test-pod.xml</string>
+      </dict>
+    </dict>
+
+    <!-- XML File Augment Service -->
+    <key>AugmentService</key>
+    <dict>
+      <key>type</key>
+      <string>twistedcaldav.directory.augment.AugmentXMLDB</string>
+      
+      <key>params</key>
+      <dict>
+        <key>xmlFiles</key>
+        <array>
+	      <string>./conf/auth/augments-test-pod.xml</string>
+        </array>
+      </dict>
+    </dict>
+
+    <key>ProxyLoadFromFile</key>
+    <string>./conf/auth/proxies-test-pod.xml</string>
+
+    <!--  Servers -->
+    <key>Servers</key>
+    <dict>
+	    <key>Enabled</key>
+	    <true/>
+	    <key>ConfigFile</key>
+	    <string>./conf/localservers-test.xml</string>
+	    <key>MaxClients</key>
+	    <integer>5</integer>
+	    <key>InboxName</key>
+	    <string>podding</string>
+	</dict>
+
+    <!-- Support for Memcached -->
+    <key>Memcached</key>
+    <dict>
+	  <key>Pools</key>
+		<dict>
+		  <key>Default</key>
+		  <dict>
+		    <key>ClientEnabled</key>
+		    <true/>
+		    <key>ServerEnabled</key>
+		    <true/>
+		    <key>BindAddress</key>
+		    <string>localhost</string>
+		    <key>Port</key>
+		    <integer>11211</integer>
+		  </dict>
+		  <key>ProxyDB</key>
+		  <dict>
+		    <key>ClientEnabled</key>
+		    <true/>
+		    <key>ServerEnabled</key>
+		    <true/>
+		    <key>BindAddress</key>
+		    <string>localhost</string>
+		    <key>Port</key>
+		    <integer>11311</integer>
+		    <key>HandleCacheTypes</key>
+		    <array>
+		      <string>ProxyDB</string>
+		      <string>PrincipalToken</string>
+		      <string>DIGESTCREDENTIALS</string>
+		    </array>
+		  </dict>
+		</dict>
+      <key>MaxClients</key>
+      <integer>5</integer>
+      <key>memcached</key>
+      <string>../memcached/_root/bin/memcached</string> <!-- Find in PATH -->
+      <key>Options</key>
+      <array>
+        <!--<string>-vv</string>-->
+      </array>
+    </dict>
+
+  </dict>
+</plist>

Copied: CalendarServer/branches/users/cdaboo/json/conf/caldavd-test-podB.plist (from rev 11901, CalendarServer/trunk/conf/caldavd-test-podB.plist)
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/caldavd-test-podB.plist	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/json/conf/caldavd-test-podB.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -0,0 +1,158 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+    Copyright (c) 2006-2009 Apple Inc. All rights reserved.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+  -->
+
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+  <dict>
+
+    <!-- Import a parent config before this one -->
+    <key>ImportConfig</key>
+    <string>./conf/caldavd-test.plist</string>
+
+    <!-- HTTP port [0 = disable HTTP] -->
+    <key>HTTPPort</key>
+    <integer>8108</integer>
+
+    <!-- SSL port [0 = disable HTTPS] -->
+    <!-- (Must also configure SSLCertificate and SSLPrivateKey below) -->
+    <key>SSLPort</key>
+    <integer>8543</integer>
+
+    <!-- List of port numbers to bind to for HTTP [empty = same as "Port"] -->
+    <key>BindHTTPPorts</key>
+    <array>
+    </array>
+
+    <!-- List of port numbers to bind to for SSL [empty = same as "SSLPort"] -->
+    <key>BindSSLPorts</key>
+    <array>
+    </array>
+
+    <!-- Server root -->
+    <key>ServerRoot</key>
+    <string>./data/podB</string>
+
+    <!-- Configuration root -->
+    <key>ConfigRoot</key>
+    <string>./conf</string>
+
+    <!-- XML File Directory Service -->
+    <key>DirectoryService</key>
+    <dict>
+      <key>type</key>
+      <string>twistedcaldav.directory.xmlfile.XMLDirectoryService</string>
+      
+      <key>params</key>
+      <dict>
+        <key>xmlFile</key>
+        <string>./conf/auth/accounts-test-pod.xml</string>
+      </dict>
+    </dict>
+    
+    <!-- Resource and Location Service -->
+    <key>ResourceService</key>
+    <dict>
+      <key>Enabled</key>
+      <true/>
+      <key>type</key>
+      <string>twistedcaldav.directory.xmlfile.XMLDirectoryService</string>
+      
+      <key>params</key>
+      <dict>
+        <key>xmlFile</key>
+        <string>./conf/auth/resources-test-pod.xml</string>
+      </dict>
+    </dict>
+
+    <!-- XML File Augment Service -->
+    <key>AugmentService</key>
+    <dict>
+      <key>type</key>
+      <string>twistedcaldav.directory.augment.AugmentXMLDB</string>
+      
+      <key>params</key>
+      <dict>
+        <key>xmlFiles</key>
+        <array>
+	      <string>./conf/auth/augments-test-pod.xml</string>
+        </array>
+      </dict>
+    </dict>
+
+    <key>ProxyLoadFromFile</key>
+    <string>./conf/auth/proxies-test-pod.xml</string>
+
+    <!--  Servers -->
+    <key>Servers</key>
+    <dict>
+	    <key>Enabled</key>
+	    <true/>
+	    <key>ConfigFile</key>
+	    <string>./conf/localservers-test.xml</string>
+	    <key>MaxClients</key>
+	    <integer>5</integer>
+	    <key>InboxName</key>
+	    <string>podding</string>
+	</dict>
+
+    <!-- Support for Memcached -->
+    <key>Memcached</key>
+    <dict>
+	  <key>Pools</key>
+		<dict>
+		  <key>Default</key>
+		  <dict>
+		    <key>ClientEnabled</key>
+		    <true/>
+		    <key>ServerEnabled</key>
+		    <true/>
+		    <key>BindAddress</key>
+		    <string>localhost</string>
+		    <key>Port</key>
+		    <integer>11411</integer>
+		  </dict>
+		  <key>ProxyDB</key>
+		  <dict>
+		    <key>ClientEnabled</key>
+		    <true/>
+		    <key>ServerEnabled</key>
+		    <true/>
+		    <key>BindAddress</key>
+		    <string>localhost</string>
+		    <key>Port</key>
+		    <integer>11311</integer>
+		    <key>HandleCacheTypes</key>
+		    <array>
+		      <string>ProxyDB</string>
+		      <string>PrincipalToken</string>
+		      <string>DIGESTCREDENTIALS</string>
+		    </array>
+		  </dict>
+		</dict>
+      <key>MaxClients</key>
+      <integer>5</integer>
+      <key>memcached</key>
+      <string>../memcached/_root/bin/memcached</string> <!-- Find in PATH -->
+      <key>Options</key>
+      <array>
+        <!--<string>-vv</string>-->
+      </array>
+    </dict>
+
+  </dict>
+</plist>

Modified: CalendarServer/branches/users/cdaboo/json/conf/localservers-test.xml
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/localservers-test.xml	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/conf/localservers-test.xml	2013-11-08 03:33:05 UTC (rev 11912)
@@ -16,21 +16,19 @@
 limitations under the License.
  -->
 
-<!DOCTYPE servers SYSTEM "servers.dtd">
+<!DOCTYPE servers SYSTEM "localservers.dtd">
 
 <servers>
   <server>
-    <id>00001</id>
+    <id>A</id>
     <uri>http://localhost:8008</uri>
-    <partitions>
-    	<partition>
-    		<id>00001</id>
-    		<uri>http://localhost:8008</uri>
-    	</partition>
-    	<partition>
-    		<id>00002</id>
-    		<uri>http://localhost:8108</uri>
-    	</partition>
-    </partitions>
+    <allowed-from>localhost</allowed-from>
+    <shared-secret>A</shared-secret>
   </server>
+  <server>
+    <id>B</id>
+    <uri>http://localhost:8108</uri>
+    <allowed-from>localhost</allowed-from>
+    <shared-secret>B</shared-secret>
+  </server>
 </servers>

Copied: CalendarServer/branches/users/cdaboo/json/conf/localservers.dtd (from rev 11901, CalendarServer/trunk/conf/localservers.dtd)
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/localservers.dtd	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/json/conf/localservers.dtd	2013-11-08 03:33:05 UTC (rev 11912)
@@ -0,0 +1,25 @@
+<!--
+Copyright (c) 2011-2013 Apple Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<!ELEMENT servers (server*) >
+
+	<!ELEMENT server (id, uri, allowed-from*, shared-secret?) >
+		<!ATTLIST server implicit (yes|no) "yes">
+
+		<!ELEMENT id  (#PCDATA) >
+		<!ELEMENT uri (#PCDATA) >
+		<!ELEMENT allowed-from (#PCDATA) >
+		<!ELEMENT shared-secret (#PCDATA) >

Modified: CalendarServer/branches/users/cdaboo/json/conf/localservers.xml
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/localservers.xml	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/conf/localservers.xml	2013-11-08 03:33:05 UTC (rev 11912)
@@ -16,7 +16,7 @@
 limitations under the License.
  -->
 
-<!DOCTYPE servers SYSTEM "servers.dtd">
+<!DOCTYPE servers SYSTEM "localservers.dtd">
 
 <servers>
   <!--
@@ -30,16 +30,6 @@
     <allowed-from>127.0.0.1</allowed-from>
     <allowed-from>example.local</allowed-from>
     <shared-secret>ABC</shared-secret>
-    <partitions>
-    	<partition>
-    		<id>00001</id>
-    		<url>https://machine1.example.com:8443</url>
-    	</partition>
-    	<partition>
-    		<id>00002</id>
-    		<url>https://machine2.example.com:8443</url>
-    	</partition>
-    </partitions>
   </server>
   -->
 </servers>

Modified: CalendarServer/branches/users/cdaboo/json/conf/remoteservers-test.xml
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/remoteservers-test.xml	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/conf/remoteservers-test.xml	2013-11-08 03:33:05 UTC (rev 11912)
@@ -16,7 +16,7 @@
 limitations under the License.
  -->
 
-<!DOCTYPE servers SYSTEM "servertoserver.dtd">
+<!DOCTYPE servers SYSTEM "remoteservers.dtd">
 
 <servers>
   <server>

Copied: CalendarServer/branches/users/cdaboo/json/conf/remoteservers.dtd (from rev 11901, CalendarServer/trunk/conf/remoteservers.dtd)
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/remoteservers.dtd	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/json/conf/remoteservers.dtd	2013-11-08 03:33:05 UTC (rev 11912)
@@ -0,0 +1,33 @@
+<!--
+Copyright (c) 2006-2013 Apple Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<!ELEMENT servers (server*) >
+
+	<!ELEMENT server (uri, authentication?, allow-requests-from, allow-requests-to, domains?, hosts?) >
+
+		<!ELEMENT uri (#PCDATA) >
+		<!ELEMENT authentication (user, password) >
+		    <!ATTLIST authentication type (basic) "">
+		    <!ELEMENT user (#PCDATA) >
+		    <!ELEMENT password (#PCDATA) >
+
+		<!ELEMENT allow-requests-from EMPTY >
+		<!ELEMENT allow-requests-to EMPTY >
+		<!ELEMENT domains (domain*) >
+			<!ELEMENT domain (#PCDATA) >
+		<!ELEMENT hosts (host*) >
+			<!ELEMENT host (#PCDATA) >
+			
\ No newline at end of file

Modified: CalendarServer/branches/users/cdaboo/json/conf/remoteservers.xml
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/remoteservers.xml	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/conf/remoteservers.xml	2013-11-08 03:33:05 UTC (rev 11912)
@@ -16,7 +16,7 @@
 limitations under the License.
  -->
 
-<!DOCTYPE servers SYSTEM "servertoserver.dtd">
+<!DOCTYPE servers SYSTEM "remoteservers.dtd">
 
 <servers>
   <!--

Modified: CalendarServer/branches/users/cdaboo/json/conf/resources/caldavd-resources.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/resources/caldavd-resources.plist	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/conf/resources/caldavd-resources.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -174,7 +174,6 @@
         Augments for the directory service records to add calendar specific attributes.
 
         A variety of augment services are available for use.
-        When using a partitioned server, a service that can be accessed from each host will be needed.
       -->
 
     <!-- XML File Augment Service -->

Deleted: CalendarServer/branches/users/cdaboo/json/conf/servers.dtd
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/servers.dtd	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/conf/servers.dtd	2013-11-08 03:33:05 UTC (rev 11912)
@@ -1,28 +0,0 @@
-<!--
-Copyright (c) 2011-2013 Apple Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!ELEMENT servers (server*) >
-
-	<!ELEMENT server (id, uri, allowed-from*, shared-secret?, partitions?) >
-		<!ATTLIST server implicit (yes|no) "yes">
-
-		<!ELEMENT id  (#PCDATA) >
-		<!ELEMENT uri (#PCDATA) >
-		<!ELEMENT allowed-from (#PCDATA) >
-		<!ELEMENT shared-secret (#PCDATA) >
-
-		<!ELEMENT partitions (partition*) >
-			<!ELEMENT partition (id, uri) >

Deleted: CalendarServer/branches/users/cdaboo/json/conf/servertoserver.dtd
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/servertoserver.dtd	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/conf/servertoserver.dtd	2013-11-08 03:33:05 UTC (rev 11912)
@@ -1,33 +0,0 @@
-<!--
-Copyright (c) 2006-2013 Apple Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!ELEMENT servers (server*) >
-
-	<!ELEMENT server (uri, authentication?, allow-requests-from, allow-requests-to, domains?, hosts?) >
-
-		<!ELEMENT uri (#PCDATA) >
-		<!ELEMENT authentication (user, password) >
-		    <!ATTLIST authentication type (basic) "">
-		    <!ELEMENT user (#PCDATA) >
-		    <!ELEMENT password (#PCDATA) >
-
-		<!ELEMENT allow-requests-from EMPTY >
-		<!ELEMENT allow-requests-to EMPTY >
-		<!ELEMENT domains (domain*) >
-			<!ELEMENT domain (#PCDATA) >
-		<!ELEMENT hosts (host*) >
-			<!ELEMENT host (#PCDATA) >
-			
\ No newline at end of file

Deleted: CalendarServer/branches/users/cdaboo/json/conf/sudoers.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/sudoers.plist	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/conf/sudoers.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -1,42 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-<dict>
-<key>users</key>
-<array>
-<!-- Sudo user definitions -->
-<!-- With the exception of username and password none of the following
-     elements are used in the current implementation. -->
-<!--
-  <dict>
-    <key>authorize-as</key>
-    <dict>
-      <key>allow</key>
-      <true/>
-      <key>principals</key>
-      <array>
-	<string>all</string>
-        <string>/principals/user/wsanchez</string>
-      </array>
-    </dict>
-    <key>authorize-from</key>
-    <array>
-      <string>127.0.0.1</string>
-    </array>
-
-    <key>username</key>
-    <string></string>
-
-    <key>password</key>
-    <string></string>
-  </dict>
--->
-  <dict>
-    <key>username</key>
-    <string>superuser</string>
-    <key>password</key>
-    <string>superuser</string>
-  </dict>
-</array>
-</dict>
-</plist>

Copied: CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/clients.plist (from rev 11901, CalendarServer/trunk/contrib/performance/loadtest/clients.plist)
===================================================================
--- CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/clients.plist	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/clients.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -0,0 +1,445 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+    Copyright (c) 2011-2013 Apple Inc. All rights reserved.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+  -->
+
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+	<dict>
+		<!-- Define the kinds of software and user behavior the load simulation
+			will simulate. -->
+		<key>clients</key>
+
+		<!-- Have as many different kinds of software and user behavior configurations
+			as you want. Each is a dict -->
+		<array>
+
+			<dict>
+
+				<!-- Here is a OS X client simulator. -->
+				<key>software</key>
+				<string>contrib.performance.loadtest.ical.OS_X_10_7</string>
+
+				<!-- Arguments to use to initialize the OS_X_10_7 instance. -->
+				<key>params</key>
+				<dict>
+					<!-- Name that appears in logs. -->
+					<key>title</key>
+					<string>10.7</string>
+	
+					<!-- OS_X_10_7 can poll the calendar home at some interval. This is
+						in seconds. -->
+					<key>calendarHomePollInterval</key>
+					<integer>30</integer>
+
+					<!-- If the server advertises xmpp push, OS_X_10_7 can wait for notifications
+						about calendar home changes instead of polling for them periodically. If
+						this option is true, then look for the server advertisement for xmpp push
+						and use it if possible. Still fall back to polling if there is no xmpp push
+						advertised. -->
+					<key>supportPush</key>
+					<false />
+
+					<key>supportAmpPush</key>
+					<true/>
+					<key>ampPushHost</key>
+					<string>localhost</string>
+					<key>ampPushPort</key>
+					<integer>62311</integer>
+				</dict>
+
+				<!-- The profiles define certain types of user behavior on top of the
+					client software being simulated. -->
+				<key>profiles</key>
+				<array>
+
+					<!-- First an event-creating profile, which will periodically create
+						new events at a random time on a random calendar. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.Eventer</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<true/>
+
+							<!-- Define the interval (in seconds) at which this profile will use
+								its client to create a new event. -->
+							<key>interval</key>
+							<integer>60</integer>
+
+							<!-- Define how start times (DTSTART) for the randomly generated events
+								will be selected. This is an example of a "Distribution" parameter. The value
+								for most "Distribution" parameters are interchangeable and extensible. -->
+							<key>eventStartDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized. It produces timestamps
+									in the near future, limited to certain days of the week and certain hours
+									of the day. -->
+								<key>type</key>
+								<string>contrib.performance.stats.WorkDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- These are the days of the week the distribution will use. -->
+									<key>daysOfWeek</key>
+									<array>
+										<string>mon</string>
+										<string>tue</string>
+										<string>wed</string>
+										<string>thu</string>
+										<string>fri</string>
+									</array>
+
+									<!-- The earliest hour of a day at which an event might be scheduled. -->
+									<key>beginHour</key>
+									<integer>8</integer>
+
+									<!-- And the latest hour of a day (at which an event will be scheduled
+										to begin!). -->
+									<key>endHour</key>
+									<integer>16</integer>
+
+									<!-- The timezone in which the event is scheduled. (XXX Does this
+										really work right?) -->
+									<key>tzname</key>
+									<string>America/Los_Angeles</string>
+								</dict>
+							</dict>
+
+							<!-- Define how recurrences are created. -->
+							<key>recurrenceDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized.  We have a fixed set of
+								     RRULEs defined for this distribution and pick each based on a
+								     weight. -->
+								<key>type</key>
+								<string>contrib.performance.stats.RecurrenceDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- False to disable RRULEs -->
+									<key>allowRecurrence</key>
+									<true/>
+
+									<!-- These are the weights for the specific set of RRULEs. -->
+									<key>weights</key>
+									<dict>
+										<!-- Half of all events will be non-recurring -->
+										<key>none</key>
+										<integer>50</integer>
+										
+										<!-- Daily and weekly are pretty common -->
+										<key>daily</key>
+										<integer>10</integer>
+										<key>weekly</key>
+										<integer>20</integer>
+										
+										<!-- Monthly, yearly, daily & weekly limit not so common -->
+										<key>monthly</key>
+										<integer>2</integer>
+										<key>yearly</key>
+										<integer>1</integer>
+										<key>dailylimit</key>
+										<integer>2</integer>
+										<key>weeklylimit</key>
+										<integer>5</integer>
+										
+										<!-- Work days pretty common -->
+										<key>workdays</key>
+										<integer>10</integer>
+									</dict>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+					<!-- This profile invites some number of new attendees to new events. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.RealisticInviter</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<true/>
+
+							<!-- Define the frequency at which new invitations will be sent out. -->
+							<key>sendInvitationDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.NormalDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- mu gives the mean of the normal distribution (in seconds). -->
+									<key>mu</key>
+									<integer>60</integer>
+
+									<!-- and sigma gives its standard deviation. -->
+									<key>sigma</key>
+									<integer>5</integer>
+								</dict>
+							</dict>
+
+							<!-- Define the distribution of who will be invited to an event.
+							
+								When inviteeClumping is turned on each invitee is based on a sample of
+								users "close to" the organizer based on account index. If the clumping
+								is too "tight" for the requested number of attendees, then invites for
+								those larger numbers will simply fail (the sim will report that situation).
+								
+								When inviteeClumping is off invitees will be sampled across an entire
+								range of account indexes. In this case the distribution ought to be a
+								UniformIntegerDistribution with min=0 and max set to the number of accounts.
+							-->
+							<key>inviteeDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.UniformIntegerDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- The minimum value (inclusive) of the uniform distribution. -->
+									<key>min</key>
+									<integer>0</integer>
+									<!-- The maximum value (exclusive) of the uniform distribution. -->
+									<key>max</key>
+									<integer>99</integer>
+								</dict>
+							</dict>
+
+							<key>inviteeClumping</key>
+							<true/>
+
+							<!-- Define the distribution of how many attendees will be invited to an event.
+							
+								LogNormal is the best fit to observed data.
+
+
+								For LogNormal "mode" is the peak, "mean" is the mean value.	For invites,
+								mode should typically be 1, and mean whatever matches the user behavior.
+								Our typical mean is 6. 							
+							     -->
+							<key>inviteeCountDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.LogNormalDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- mode - peak-->
+									<key>mode</key>
+									<integer>1</integer>
+									<!-- mean - average-->
+									<key>median</key>
+									<integer>6</integer>
+									<!-- maximum -->
+									<key>maximum</key>
+									<real>60</real>
+								</dict>
+							</dict>
+
+							<!-- Define how start times (DTSTART) for the randomly generated events
+								will be selected. This is an example of a "Distribution" parameter. The value
+								for most "Distribution" parameters are interchangeable and extensible. -->
+							<key>eventStartDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized. It produces timestamps
+									in the near future, limited to certain days of the week and certain hours
+									of the day. -->
+								<key>type</key>
+								<string>contrib.performance.stats.WorkDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- These are the days of the week the distribution will use. -->
+									<key>daysOfWeek</key>
+									<array>
+										<string>mon</string>
+										<string>tue</string>
+										<string>wed</string>
+										<string>thu</string>
+										<string>fri</string>
+									</array>
+
+									<!-- The earliest hour of a day at which an event might be scheduled. -->
+									<key>beginHour</key>
+									<integer>8</integer>
+
+									<!-- And the latest hour of a day (at which an event will be scheduled
+										to begin!). -->
+									<key>endHour</key>
+									<integer>16</integer>
+
+									<!-- The timezone in which the event is scheduled. (XXX Does this
+										really work right?) -->
+									<key>tzname</key>
+									<string>America/Los_Angeles</string>
+								</dict>
+							</dict>
+
+							<!-- Define how recurrences are created. -->
+							<key>recurrenceDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized.  We have a fixed set of
+								     RRULEs defined for this distribution and pick each based on a
+								     weight. -->
+								<key>type</key>
+								<string>contrib.performance.stats.RecurrenceDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- False to disable RRULEs -->
+									<key>allowRecurrence</key>
+									<true/>
+
+									<!-- These are the weights for the specific set of RRULEs. -->
+									<key>weights</key>
+									<dict>
+										<!-- Half of all events will be non-recurring -->
+										<key>none</key>
+										<integer>50</integer>
+										
+										<!-- Daily and weekly are pretty common -->
+										<key>daily</key>
+										<integer>10</integer>
+										<key>weekly</key>
+										<integer>20</integer>
+										
+										<!-- Monthly, yearly, daily & weekly limit not so common -->
+										<key>monthly</key>
+										<integer>2</integer>
+										<key>yearly</key>
+										<integer>1</integer>
+										<key>dailylimit</key>
+										<integer>2</integer>
+										<key>weeklylimit</key>
+										<integer>5</integer>
+										
+										<!-- Work days pretty common -->
+										<key>workdays</key>
+										<integer>10</integer>
+									</dict>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+					<!-- This profile accepts invitations to events, handles cancels, and
+					     handles replies received. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.Accepter</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<true/>
+
+							<!-- Define how long to wait after seeing a new invitation before
+								accepting it.
+
+								For LogNormal "mode" is the peak, "median" is the 50% cummulative value
+								(i.e., half of the user have accepted by that time).								
+							-->
+							<key>acceptDelayDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.LogNormalDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- mode - peak-->
+									<key>mode</key>
+									<integer>300</integer>
+									<!-- median - 50% done-->
+									<key>median</key>
+									<integer>1800</integer>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+					<!-- A task-creating profile, which will periodically create
+						new tasks at a random time on a random calendar. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.Tasker</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<true/>
+
+							<!-- Define the interval (in seconds) at which this profile will use
+								its client to create a new task. -->
+							<key>interval</key>
+							<integer>300</integer>
+
+							<!-- Define how due times (DUE) for the randomly generated tasks
+								will be selected. This is an example of a "Distribution" parameter. The value
+								for most "Distribution" parameters are interchangeable and extensible. -->
+							<key>taskDueDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized. It produces timestamps
+									in the near future, limited to certain days of the week and certain hours
+									of the day. -->
+								<key>type</key>
+								<string>contrib.performance.stats.WorkDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- These are the days of the week the distribution will use. -->
+									<key>daysOfWeek</key>
+									<array>
+										<string>mon</string>
+										<string>tue</string>
+										<string>wed</string>
+										<string>thu</string>
+										<string>fri</string>
+									</array>
+
+									<!-- The earliest hour of a day at which an event might be scheduled. -->
+									<key>beginHour</key>
+									<integer>8</integer>
+
+									<!-- And the latest hour of a day (at which an event will be scheduled
+										to begin!). -->
+									<key>endHour</key>
+									<integer>16</integer>
+
+									<!-- The timezone in which the event is scheduled. (XXX Does this
+										really work right?) -->
+									<key>tzname</key>
+									<string>America/Los_Angeles</string>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+				</array>
+
+				<!-- Determine the frequency at which this client configuration will
+					appear in the clients which are created by the load tester. -->
+				<key>weight</key>
+				<integer>1</integer>
+			</dict>
+		</array>
+	</dict>
+</plist>

Modified: CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/config.dist.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/config.dist.plist	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/config.dist.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -50,10 +50,19 @@
 			<integer>8080</integer>
 		</dict>
 
-		<!--  Define whether client data should be saved and re-used. -->
+		<!--  Define whether server supports stats socket. -->
+		<key>serverStats</key>
+		<dict>
+			<key>enabled</key>
+			<true/>
+			<key>Port</key>
+			<integer>8100</integer>
+		</dict>
+
+		<!--  Define whether client data should be re-used. It will always be saved to the specified path.-->
 		<key>clientDataSerialization</key>
 		<dict>
-			<key>Enabled</key>
+			<key>UseOldData</key>
 			<true/>
 			<key>Path</key>
 			<string>/tmp/sim</string>
@@ -119,471 +128,6 @@
 
 		</dict>
 
-		<!-- Define the kinds of software and user behavior the load simulation 
-			will simulate. -->
-		<key>clients</key>
-
-		<!-- Have as many different kinds of software and user behavior configurations 
-			as you want. Each is a dict -->
-		<array>
-
-			<dict>
-
-				<!-- Here is a OS X client simulator. -->
-				<key>software</key>
-				<string>contrib.performance.loadtest.ical.OS_X_10_7</string>
-
-				<!-- Arguments to use to initialize the OS_X_10_7 instance. -->
-				<key>params</key>
-				<dict>
-					<!-- Name that appears in logs. -->
-					<key>title</key>
-					<string>10.7</string>
-
-					<!-- OS_X_10_7 can poll the calendar home at some interval. This is 
-						in seconds. -->
-					<key>calendarHomePollInterval</key>
-					<integer>30</integer>
-
-					<!-- If the server advertises xmpp push, OS_X_10_7 can wait for notifications 
-						about calendar home changes instead of polling for them periodically. If 
-						this option is true, then look for the server advertisement for xmpp push 
-						and use it if possible. Still fall back to polling if there is no xmpp push 
-						advertised. -->
-					<key>supportPush</key>
-					<false />
-				</dict>
-
-				<!-- The profiles define certain types of user behavior on top of the 
-					client software being simulated. -->
-				<key>profiles</key>
-				<array>
-
-					<!-- First an event-creating profile, which will periodically create 
-						new events at a random time on a random calendar. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Eventer</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define the interval (in seconds) at which this profile will use 
-								its client to create a new event. -->
-							<key>interval</key>
-							<integer>60</integer>
-
-							<!-- Define how start times (DTSTART) for the randomly generated events 
-								will be selected. This is an example of a "Distribution" parameter. The value 
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>eventStartDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps 
-									in the near future, limited to certain days of the week and certain hours 
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled 
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this 
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-
-							<!-- Define how recurrences are created. -->
-							<key>recurrenceDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized.  We have a fixed set of
-								     RRULEs defined for this distribution and pick each based on a
-								     weight. -->
-								<key>type</key>
-								<string>contrib.performance.stats.RecurrenceDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- False to disable RRULEs -->
-									<key>allowRecurrence</key>
-									<true/>
-
-									<!-- These are the weights for the specific set of RRULEs. -->
-									<key>weights</key>
-									<dict>
-										<!-- Half of all events will be non-recurring -->
-										<key>none</key>
-										<integer>50</integer>
-										
-										<!-- Daily and weekly are pretty common -->
-										<key>daily</key>
-										<integer>10</integer>
-										<key>weekly</key>
-										<integer>20</integer>
-										
-										<!-- Monthly, yearly, daily & weekly limit not so common -->
-										<key>monthly</key>
-										<integer>2</integer>
-										<key>yearly</key>
-										<integer>1</integer>
-										<key>dailylimit</key>
-										<integer>2</integer>
-										<key>weeklylimit</key>
-										<integer>5</integer>
-										
-										<!-- Work days pretty common -->
-										<key>workdays</key>
-										<integer>10</integer>
-									</dict>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- This profile invites new attendees to existing events. 
-					     This profile should no longer be used - use RealisticInviter instead. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Inviter</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<false/>
-
-							<!-- Define the frequency at which new invitations will be sent out. -->
-							<key>sendInvitationDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.NormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mu gives the mean of the normal distribution (in seconds). -->
-									<key>mu</key>
-									<integer>60</integer>
-
-									<!-- and sigma gives its standard deviation. -->
-									<key>sigma</key>
-									<integer>5</integer>
-								</dict>
-							</dict>
-
-							<!-- Define the distribution of who will be invited to an event. Each 
-								set of credentials loaded by the load tester has an index; samples from this 
-								distribution will be added to that index to arrive at the index of some other 
-								credentials, which will be the target of the invitation. -->
-							<key>inviteeDistanceDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.UniformIntegerDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- The minimum value (inclusive) of the uniform distribution. -->
-									<key>min</key>
-									<integer>-100</integer>
-									<!-- The maximum value (exclusive) of the uniform distribution. -->
-									<key>max</key>
-									<integer>101</integer>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- This profile invites some number of new attendees to new events. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.RealisticInviter</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define the frequency at which new invitations will be sent out. -->
-							<key>sendInvitationDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.NormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mu gives the mean of the normal distribution (in seconds). -->
-									<key>mu</key>
-									<integer>60</integer>
-
-									<!-- and sigma gives its standard deviation. -->
-									<key>sigma</key>
-									<integer>5</integer>
-								</dict>
-							</dict>
-
-							<!-- Define the distribution of who will be invited to an event.
-							
-								When inviteeClumping is turned on each invitee is based on a sample of
-								users "close to" the organizer based on account index. If the clumping
-								is too "tight" for the requested number of attendees, then invites for
-								those larger numbers will simply fail (the sim will report that situation).
-								
-								When inviteeClumping is off invitees will be sampled across an entire
-								range of account indexes. In this case the distribution ought to be a
-								UniformIntegerDistribution with min=0 and max set to the number of accounts.
-							-->
-							<key>inviteeDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.UniformIntegerDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- The minimum value (inclusive) of the uniform distribution. -->
-									<key>min</key>
-									<integer>-100</integer>
-									<!-- The maximum value (exclusive) of the uniform distribution. -->
-									<key>max</key>
-									<integer>101</integer>
-								</dict>
-							</dict>
-
-							<key>inviteeClumping</key>
-							<true/>
-
-							<!-- Define the distribution of how many attendees will be invited to an event.
-							
-								LogNormal is the best fit to observed data.
-
-
-								For LogNormal "mode" is the peak, "mean" is the mean value.	For invites,
-								mode should typically be 1, and mean whatever matches the user behavior.
-								Our typical mean is 6. 							
-							     -->
-							<key>inviteeCountDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.LogNormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mode - peak-->
-									<key>mode</key>
-									<integer>1</integer>
-									<!-- mean - average-->
-									<key>median</key>
-									<integer>6</integer>
-									<!-- maximum -->
-									<key>maximum</key>
-									<real>100</real>
-								</dict>
-							</dict>
-
-							<!-- Define how start times (DTSTART) for the randomly generated events 
-								will be selected. This is an example of a "Distribution" parameter. The value 
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>eventStartDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps 
-									in the near future, limited to certain days of the week and certain hours 
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled 
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this 
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-
-							<!-- Define how recurrences are created. -->
-							<key>recurrenceDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized.  We have a fixed set of
-								     RRULEs defined for this distribution and pick each based on a
-								     weight. -->
-								<key>type</key>
-								<string>contrib.performance.stats.RecurrenceDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- False to disable RRULEs -->
-									<key>allowRecurrence</key>
-									<true/>
-
-									<!-- These are the weights for the specific set of RRULEs. -->
-									<key>weights</key>
-									<dict>
-										<!-- Half of all events will be non-recurring -->
-										<key>none</key>
-										<integer>50</integer>
-										
-										<!-- Daily and weekly are pretty common -->
-										<key>daily</key>
-										<integer>10</integer>
-										<key>weekly</key>
-										<integer>20</integer>
-										
-										<!-- Monthly, yearly, daily & weekly limit not so common -->
-										<key>monthly</key>
-										<integer>2</integer>
-										<key>yearly</key>
-										<integer>1</integer>
-										<key>dailylimit</key>
-										<integer>2</integer>
-										<key>weeklylimit</key>
-										<integer>5</integer>
-										
-										<!-- Work days pretty common -->
-										<key>workdays</key>
-										<integer>10</integer>
-									</dict>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- This profile accepts invitations to events, handles cancels, and
-					     handles replies received. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Accepter</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define how long to wait after seeing a new invitation before
-								accepting it.
-
-								For LogNormal "mode" is the peak, "median" is the 50% cummulative value
-								(i.e., half of the user have accepted by that time).								
-							-->
-							<key>acceptDelayDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.LogNormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mode - peak-->
-									<key>mode</key>
-									<integer>300</integer>
-									<!-- median - 50% done-->
-									<key>median</key>
-									<integer>1800</integer>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- A task-creating profile, which will periodically create 
-						new tasks at a random time on a random calendar. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Tasker</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define the interval (in seconds) at which this profile will use 
-								its client to create a new task. -->
-							<key>interval</key>
-							<integer>300</integer>
-
-							<!-- Define how due times (DUE) for the randomly generated tasks 
-								will be selected. This is an example of a "Distribution" parameter. The value 
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>taskDueDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps 
-									in the near future, limited to certain days of the week and certain hours 
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled 
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this 
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-				</array>
-
-				<!-- Determine the frequency at which this client configuration will 
-					appear in the clients which are created by the load tester. -->
-				<key>weight</key>
-				<integer>1</integer>
-			</dict>
-		</array>
-
 		<!-- Define some log observers to report on the load test. -->
 		<key>observers</key>
 		<array>

Modified: CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/config.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/config.plist	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/config.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -37,10 +37,19 @@
 			<integer>8080</integer>
 		</dict>
 
-		<!--  Define whether client data should be saved and re-used. -->
+		<!--  Define whether server supports stats socket. -->
+		<key>serverStats</key>
+		<dict>
+			<key>enabled</key>
+			<true/>
+			<key>Port</key>
+			<integer>8100</integer>
+		</dict>
+
+		<!--  Define whether client data should be re-used. It will always be saved to the specified path.-->
 		<key>clientDataSerialization</key>
 		<dict>
-			<key>Enabled</key>
+			<key>UseOldData</key>
 			<true/>
 			<key>Path</key>
 			<string>/tmp/sim</string>
@@ -106,429 +115,6 @@
 
 		</dict>
 
-		<!-- Define the kinds of software and user behavior the load simulation
-			will simulate. -->
-		<key>clients</key>
-
-		<!-- Have as many different kinds of software and user behavior configurations
-			as you want. Each is a dict -->
-		<array>
-
-			<dict>
-
-				<!-- Here is a OS X client simulator. -->
-				<key>software</key>
-				<string>contrib.performance.loadtest.ical.OS_X_10_7</string>
-
-				<!-- Arguments to use to initialize the OS_X_10_7 instance. -->
-				<key>params</key>
-				<dict>
-					<!-- Name that appears in logs. -->
-					<key>title</key>
-					<string>10.7</string>
-	
-					<!-- OS_X_10_7 can poll the calendar home at some interval. This is
-						in seconds. -->
-					<key>calendarHomePollInterval</key>
-					<integer>30</integer>
-
-					<!-- If the server advertises xmpp push, OS_X_10_7 can wait for notifications
-						about calendar home changes instead of polling for them periodically. If
-						this option is true, then look for the server advertisement for xmpp push
-						and use it if possible. Still fall back to polling if there is no xmpp push
-						advertised. -->
-					<key>supportPush</key>
-					<false />
-
-					<key>supportAmpPush</key>
-					<true/>
-					<key>ampPushHost</key>
-					<string>localhost</string>
-					<key>ampPushPort</key>
-					<integer>62311</integer>
-				</dict>
-
-				<!-- The profiles define certain types of user behavior on top of the
-					client software being simulated. -->
-				<key>profiles</key>
-				<array>
-
-					<!-- First an event-creating profile, which will periodically create
-						new events at a random time on a random calendar. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Eventer</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define the interval (in seconds) at which this profile will use
-								its client to create a new event. -->
-							<key>interval</key>
-							<integer>60</integer>
-
-							<!-- Define how start times (DTSTART) for the randomly generated events
-								will be selected. This is an example of a "Distribution" parameter. The value
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>eventStartDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps
-									in the near future, limited to certain days of the week and certain hours
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-
-							<!-- Define how recurrences are created. -->
-							<key>recurrenceDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized.  We have a fixed set of
-								     RRULEs defined for this distribution and pick each based on a
-								     weight. -->
-								<key>type</key>
-								<string>contrib.performance.stats.RecurrenceDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- False to disable RRULEs -->
-									<key>allowRecurrence</key>
-									<true/>
-
-									<!-- These are the weights for the specific set of RRULEs. -->
-									<key>weights</key>
-									<dict>
-										<!-- Half of all events will be non-recurring -->
-										<key>none</key>
-										<integer>50</integer>
-										
-										<!-- Daily and weekly are pretty common -->
-										<key>daily</key>
-										<integer>10</integer>
-										<key>weekly</key>
-										<integer>20</integer>
-										
-										<!-- Monthly, yearly, daily & weekly limit not so common -->
-										<key>monthly</key>
-										<integer>2</integer>
-										<key>yearly</key>
-										<integer>1</integer>
-										<key>dailylimit</key>
-										<integer>2</integer>
-										<key>weeklylimit</key>
-										<integer>5</integer>
-										
-										<!-- Work days pretty common -->
-										<key>workdays</key>
-										<integer>10</integer>
-									</dict>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- This profile invites some number of new attendees to new events. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.RealisticInviter</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define the frequency at which new invitations will be sent out. -->
-							<key>sendInvitationDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.NormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mu gives the mean of the normal distribution (in seconds). -->
-									<key>mu</key>
-									<integer>60</integer>
-
-									<!-- and sigma gives its standard deviation. -->
-									<key>sigma</key>
-									<integer>5</integer>
-								</dict>
-							</dict>
-
-							<!-- Define the distribution of who will be invited to an event.
-							
-								When inviteeClumping is turned on each invitee is based on a sample of
-								users "close to" the organizer based on account index. If the clumping
-								is too "tight" for the requested number of attendees, then invites for
-								those larger numbers will simply fail (the sim will report that situation).
-								
-								When inviteeClumping is off invitees will be sampled across an entire
-								range of account indexes. In this case the distribution ought to be a
-								UniformIntegerDistribution with min=0 and max set to the number of accounts.
-							-->
-							<key>inviteeDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.UniformIntegerDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- The minimum value (inclusive) of the uniform distribution. -->
-									<key>min</key>
-									<integer>0</integer>
-									<!-- The maximum value (exclusive) of the uniform distribution. -->
-									<key>max</key>
-									<integer>99</integer>
-								</dict>
-							</dict>
-
-							<key>inviteeClumping</key>
-							<true/>
-
-							<!-- Define the distribution of how many attendees will be invited to an event.
-							
-								LogNormal is the best fit to observed data.
-
-
-								For LogNormal "mode" is the peak, "mean" is the mean value.	For invites,
-								mode should typically be 1, and mean whatever matches the user behavior.
-								Our typical mean is 6. 							
-							     -->
-							<key>inviteeCountDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.LogNormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mode - peak-->
-									<key>mode</key>
-									<integer>1</integer>
-									<!-- mean - average-->
-									<key>median</key>
-									<integer>6</integer>
-									<!-- maximum -->
-									<key>maximum</key>
-									<real>60</real>
-								</dict>
-							</dict>
-
-							<!-- Define how start times (DTSTART) for the randomly generated events
-								will be selected. This is an example of a "Distribution" parameter. The value
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>eventStartDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps
-									in the near future, limited to certain days of the week and certain hours
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-
-							<!-- Define how recurrences are created. -->
-							<key>recurrenceDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized.  We have a fixed set of
-								     RRULEs defined for this distribution and pick each based on a
-								     weight. -->
-								<key>type</key>
-								<string>contrib.performance.stats.RecurrenceDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- False to disable RRULEs -->
-									<key>allowRecurrence</key>
-									<true/>
-
-									<!-- These are the weights for the specific set of RRULEs. -->
-									<key>weights</key>
-									<dict>
-										<!-- Half of all events will be non-recurring -->
-										<key>none</key>
-										<integer>50</integer>
-										
-										<!-- Daily and weekly are pretty common -->
-										<key>daily</key>
-										<integer>10</integer>
-										<key>weekly</key>
-										<integer>20</integer>
-										
-										<!-- Monthly, yearly, daily & weekly limit not so common -->
-										<key>monthly</key>
-										<integer>2</integer>
-										<key>yearly</key>
-										<integer>1</integer>
-										<key>dailylimit</key>
-										<integer>2</integer>
-										<key>weeklylimit</key>
-										<integer>5</integer>
-										
-										<!-- Work days pretty common -->
-										<key>workdays</key>
-										<integer>10</integer>
-									</dict>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- This profile accepts invitations to events, handles cancels, and
-					     handles replies received. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Accepter</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define how long to wait after seeing a new invitation before
-								accepting it.
-
-								For LogNormal "mode" is the peak, "median" is the 50% cummulative value
-								(i.e., half of the user have accepted by that time).								
-							-->
-							<key>acceptDelayDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.LogNormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mode - peak-->
-									<key>mode</key>
-									<integer>300</integer>
-									<!-- median - 50% done-->
-									<key>median</key>
-									<integer>1800</integer>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- A task-creating profile, which will periodically create
-						new tasks at a random time on a random calendar. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Tasker</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define the interval (in seconds) at which this profile will use
-								its client to create a new task. -->
-							<key>interval</key>
-							<integer>300</integer>
-
-							<!-- Define how due times (DUE) for the randomly generated tasks
-								will be selected. This is an example of a "Distribution" parameter. The value
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>taskDueDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps
-									in the near future, limited to certain days of the week and certain hours
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-				</array>
-
-				<!-- Determine the frequency at which this client configuration will
-					appear in the clients which are created by the load tester. -->
-				<key>weight</key>
-				<integer>1</integer>
-			</dict>
-		</array>
-
 		<!-- Define some log observers to report on the load test. -->
 		<key>observers</key>
 		<array>

Modified: CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/population.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/population.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/population.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -396,6 +396,7 @@
         self._failed_clients = []
         self._failed_sim = collections.defaultdict(int)
         self._startTime = datetime.now()
+        self._expired_data = None
 
         # Load parameters from config
         if "thresholdsPath" in params:
@@ -423,6 +424,13 @@
             self._fail_cut_off = params["failCutoff"]
 
 
+    def observe(self, event):
+        if event.get('type') == 'sim-expired':
+            self.simExpired(event)
+        else:
+            super(ReportStatistics, self).observe(event)
+
+
     def countUsers(self):
         return len(self._users)
 
@@ -454,6 +462,10 @@
         self._failed_sim[event['reason']] += 1
 
 
+    def simExpired(self, event):
+        self._expired_data = event['reason']
+
+
     def printMiscellaneous(self, output, items):
         maxColumnWidth = str(len(max(items.iterkeys(), key=len)))
         fmt = "%" + maxColumnWidth + "s : %-s\n"
@@ -480,7 +492,7 @@
             if result is not None:
                 differences.append(result)
 
-        return mean(differences) if differences else "None"
+        return ("%-8.4f" % mean(differences)) if differences else "None"
 
 
     def qos_value(self, method, value):
@@ -518,7 +530,7 @@
             'Start time': self._startTime.strftime('%m/%d %H:%M:%S'),
             'Run time': "%02d:%02d:%02d" % (runHours, runMinutes, runSeconds),
             'CPU Time': "user %-5.2f sys %-5.2f total %02d:%02d:%02d" % (cpuUser, cpuSys, cpuHours, cpuMinutes, cpuSeconds,),
-            'QoS': "%-8.4f" % (self.qos(),),
+            'QoS': self.qos(),
         }
         if self.countClientFailures() > 0:
             items['Failed clients'] = self.countClientFailures()
@@ -527,8 +539,22 @@
         if self.countSimFailures() > 0:
             for reason, count in self._failed_sim.items():
                 items['Failed operation'] = "%s : %d times" % (reason, count,)
+        output.write("* Client\n")
         self.printMiscellaneous(output, items)
         output.write("\n")
+
+        if self._expired_data is not None:
+            items = {
+                "Req/sec" : "%.1f" % (self._expired_data[0],),
+                "Response": "%.1f (ms)" % (self._expired_data[1],),
+                "Slots": "%.2f" % (self._expired_data[2],),
+                "CPU": "%.1f%%" % (self._expired_data[3],),
+            }
+            output.write("* Server (Last 5 minutes)\n")
+            self.printMiscellaneous(output, items)
+            output.write("\n")
+        output.write("* Details\n")
+
         self.printHeader(output, [
                 (label, width)
                 for (label, width, _ignore_fmt)

Modified: CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/sim.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/sim.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/sim.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -23,11 +23,15 @@
 from plistlib import readPlist
 from random import Random
 from sys import argv, stdout
+from urlparse import urlsplit
 from xml.parsers.expat import ExpatError
+import json
+import shutil
+import socket
 
 from twisted.python import context
 from twisted.python.filepath import FilePath
-from twisted.python.log import startLogging, addObserver, removeObserver
+from twisted.python.log import startLogging, addObserver, removeObserver, msg
 from twisted.python.usage import UsageError, Options
 from twisted.python.reflect import namedAny
 
@@ -56,6 +60,11 @@
 
 
 
+def safeDivision(value, total, factor=1):
+    return value * factor / total if total else 0
+
+
+
 def generateRecords(count, uidPattern="user%d", passwordPattern="user%d",
     namePattern="User %d", emailPattern="user%d at example.com"):
     for i in xrange(count):
@@ -121,6 +130,7 @@
     """
     config = None
     _defaultConfig = FilePath(__file__).sibling("config.plist")
+    _defaultClients = FilePath(__file__).sibling("clients.plist")
 
     optParameters = [
         ("runtime", "t", None,
@@ -129,6 +139,9 @@
         ("config", None, _defaultConfig,
          "Configuration plist file name from which to read simulation parameters.",
          FilePath),
+        ("clients", None, _defaultClients,
+         "Configuration plist file name from which to read client parameters.",
+         FilePath),
         ]
 
 
@@ -181,7 +194,23 @@
         finally:
             configFile.close()
 
+        try:
+            clientFile = self['clients'].open()
+        except IOError, e:
+            raise UsageError("--clients %s: %s" % (
+                    self['clients'].path, e.strerror))
+        try:
+            try:
+                client_config = readPlist(clientFile)
+                self.config["clients"] = client_config["clients"]
+                if "arrivalInterval" in client_config:
+                    self.config["arrival"]["params"]["interval"] = client_config["arrivalInterval"]
+            except ExpatError, e:
+                raise UsageError("--clients %s: %s" % (self['clients'].path, e))
+        finally:
+            clientFile.close()
 
+
 Arrival = namedtuple('Arrival', 'factory parameters')
 
 
@@ -200,7 +229,7 @@
         user information about the accounts on the server being put
         under load.
     """
-    def __init__(self, server, principalPathTemplate, webadminPort, serializationPath, arrival, parameters, observers=None,
+    def __init__(self, server, principalPathTemplate, webadminPort, serverStats, serializationPath, arrival, parameters, observers=None,
                  records=None, reactor=None, runtime=None, workers=None,
                  configTemplate=None, workerID=None, workerCount=1):
         if reactor is None:
@@ -208,6 +237,7 @@
         self.server = server
         self.principalPathTemplate = principalPathTemplate
         self.webadminPort = webadminPort
+        self.serverStats = serverStats
         self.serializationPath = serializationPath
         self.arrival = arrival
         self.parameters = parameters
@@ -260,15 +290,17 @@
                 principalPathTemplate = config['principalPathTemplate']
 
             if 'clientDataSerialization' in config:
-                if config['clientDataSerialization']['Enabled']:
-                    serializationPath = config['clientDataSerialization']['Path']
-                    if not isdir(serializationPath):
-                        try:
-                            mkdir(serializationPath)
-                        except OSError:
-                            print("Unable to create client data serialization directory: %s" % (serializationPath))
-                            print("Please consult the clientDataSerialization stanza of contrib/performance/loadtest/config.plist")
-                            raise
+                serializationPath = config['clientDataSerialization']['Path']
+                if not config['clientDataSerialization']['UseOldData']:
+                    shutil.rmtree(serializationPath)
+                serializationPath = config['clientDataSerialization']['Path']
+                if not isdir(serializationPath):
+                    try:
+                        mkdir(serializationPath)
+                    except OSError:
+                        print("Unable to create client data serialization directory: %s" % (serializationPath))
+                        print("Please consult the clientDataSerialization stanza of contrib/performance/loadtest/config.plist")
+                        raise
 
             if 'arrival' in config:
                 arrival = Arrival(
@@ -310,6 +342,12 @@
             if config['webadmin']['enabled']:
                 webadminPort = config['webadmin']['HTTPPort']
 
+        serverStats = None
+        if 'serverStats' in config:
+            if config['serverStats']['enabled']:
+                serverStats = config['serverStats']
+                serverStats['server'] = config['server'] if 'server' in config else ''
+
         observers = []
         if 'observers' in config:
             for observer in config['observers']:
@@ -324,11 +362,23 @@
             records.extend(namedAny(loader)(**params))
             output.write("Loaded {0} accounts.\n".format(len(records)))
 
-        return cls(server, principalPathTemplate, webadminPort, serializationPath,
-                   arrival, parameters, observers=observers,
-                   records=records, runtime=runtime, reactor=reactor,
-                   workers=workers, configTemplate=configTemplate,
-                   workerID=workerID, workerCount=workerCount)
+        return cls(
+            server,
+            principalPathTemplate,
+            webadminPort,
+            serverStats,
+            serializationPath,
+            arrival,
+            parameters,
+            observers=observers,
+            records=records,
+            runtime=runtime,
+            reactor=reactor,
+            workers=workers,
+            configTemplate=configTemplate,
+            workerID=workerID,
+            workerCount=workerCount,
+        )
 
 
     @classmethod
@@ -409,7 +459,7 @@
     def run(self, output=stdout):
         self.attachServices(output)
         if self.runtime is not None:
-            self.reactor.callLater(self.runtime, self.reactor.stop)
+            self.reactor.callLater(self.runtime, self.stopAndReport)
         if self.webadminPort:
             self.reactor.listenTCP(self.webadminPort, server.Site(LoadSimAdminResource(self)))
         self.reactor.run()
@@ -417,16 +467,65 @@
 
     def stop(self):
         if self.ms.running:
+            self.updateStats()
             self.ms.stopService()
-            self.reactor.callLater(5, self.reactor.stop)
+            self.reactor.callLater(5, self.stopAndReport)
 
 
     def shutdown(self):
         if self.ms.running:
+            self.updateStats()
             return self.ms.stopService()
 
 
+    def updateStats(self):
+        """
+        Capture server stats and stop.
+        """
 
+        if self.serverStats is not None:
+            _ignore_scheme, hostname, _ignore_path, _ignore_query, _ignore_fragment = urlsplit(self.serverStats["server"])
+            data = self.readStatsSock((hostname.split(":")[0], self.serverStats["Port"],), True)
+            if "Failed" not in data:
+                data = data["5 Minutes"]
+                result = (
+                    safeDivision(float(data["requests"]), 5 * 60),
+                    safeDivision(data["t"], data["requests"]),
+                    safeDivision(float(data["slots"]), data["requests"]),
+                    safeDivision(data["cpu"], data["requests"]),
+                )
+                msg(type="sim-expired", reason=result)
+
+
+    def stopAndReport(self):
+        """
+        Runtime has expired - capture server stats and stop.
+        """
+
+        self.updateStats()
+        self.reactor.stop()
+
+
+    def readStatsSock(self, sockname, useTCP):
+        try:
+            s = socket.socket(socket.AF_INET if useTCP else socket.AF_UNIX, socket.SOCK_STREAM)
+            s.connect(sockname)
+            data = ""
+            while True:
+                d = s.recv(1024)
+                if d:
+                    data += d
+                else:
+                    break
+            s.close()
+            data = json.loads(data)
+        except socket.error:
+            data = {"Failed": "Unable to read statistics from server: %s" % (sockname,)}
+        data["Server"] = sockname
+        return data
+
+
+
 def attachService(reactor, loadsim, service):
     """
     Attach a given L{IService} provider to the given L{IReactorCore}; cause it
@@ -557,7 +656,6 @@
 
 
     def errReceived(self, error):
-        from twisted.python.log import msg
         msg("stderr received from " + str(self.transport.pid))
         msg("    " + repr(error))
 

Deleted: CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/events-only.plist
===================================================================
--- CalendarServer/trunk/contrib/performance/loadtest/standard-configs/events-only.plist	2013-11-07 17:02:26 UTC (rev 11901)
+++ CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/events-only.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -1,440 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-    Copyright (c) 2011-2012 Apple Inc. All rights reserved.
-
-    Licensed under the Apache License, Version 2.0 (the "License");
-    you may not use this file except in compliance with the License.
-    You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-  -->
-
-<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-	<dict>
-		<!-- Define the kinds of software and user behavior the load simulation
-			will simulate. -->
-		<key>clients</key>
-
-		<!-- Have as many different kinds of software and user behavior configurations
-			as you want. Each is a dict -->
-		<array>
-
-			<dict>
-
-				<!-- Here is a Lion iCal simulator. -->
-				<key>software</key>
-				<string>contrib.performance.loadtest.ical.OS_X_10_7</string>
-
-				<!-- Arguments to use to initialize the client instance. -->
-				<key>params</key>
-				<dict>
-					<!-- Name that appears in logs. -->
-					<key>title</key>
-					<string>10.7</string>
-
-					<!-- Client can poll the calendar home at some interval. This is 
-						in seconds. -->
-					<key>calendarHomePollInterval</key>
-					<integer>300000</integer>
-
-					<!-- If the server advertises xmpp push, OS X 10.6 can wait for notifications 
-						about calendar home changes instead of polling for them periodically. If 
-						this option is true, then look for the server advertisement for xmpp push 
-						and use it if possible. Still fall back to polling if there is no xmpp push 
-						advertised. -->
-					<key>supportPush</key>
-					<false />
-					<key>supportAmpPush</key>
-					<false />
-				</dict>
-
-				<!-- The profiles define certain types of user behavior on top of the 
-					client software being simulated. -->
-				<key>profiles</key>
-				<array>
-
-					<!-- First an event-creating profile, which will periodically create 
-						new events at a random time on a random calendar. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Eventer</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define the interval (in seconds) at which this profile will use 
-								its client to create a new event. -->
-							<key>interval</key>
-							<integer>20</integer>
-
-							<!-- Define how start times (DTSTART) for the randomly generated events 
-								will be selected. This is an example of a "Distribution" parameter. The value 
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>eventStartDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps 
-									in the near future, limited to certain days of the week and certain hours 
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled 
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this 
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-
-							<!-- Define how recurrences are created. -->
-							<key>recurrenceDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized.  We have a fixed set of
-								     RRULEs defined for this distribution and pick each based on a
-								     weight. -->
-								<key>type</key>
-								<string>contrib.performance.stats.RecurrenceDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- False to disable RRULEs -->
-									<key>allowRecurrence</key>
-									<false/>
-
-									<!-- These are the weights for the specific set of RRULEs. -->
-									<key>weights</key>
-									<dict>
-										<!-- Half of all events will be non-recurring -->
-										<key>none</key>
-										<integer>50</integer>
-										
-										<!-- Daily and weekly are pretty common -->
-										<key>daily</key>
-										<integer>10</integer>
-										<key>weekly</key>
-										<integer>20</integer>
-										
-										<!-- Monthly, yearly, daily & weekly limit not so common -->
-										<key>monthly</key>
-										<integer>2</integer>
-										<key>yearly</key>
-										<integer>1</integer>
-										<key>dailylimit</key>
-										<integer>2</integer>
-										<key>weeklylimit</key>
-										<integer>5</integer>
-										
-										<!-- Work days pretty common -->
-										<key>workdays</key>
-										<integer>10</integer>
-									</dict>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- This profile invites some number of new attendees to new events. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.RealisticInviter</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<false/>
-
-							<!-- Define the frequency at which new invitations will be sent out. -->
-							<key>sendInvitationDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.NormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mu gives the mean of the normal distribution (in seconds). -->
-									<key>mu</key>
-									<integer>10</integer>
-
-									<!-- and sigma gives its standard deviation. -->
-									<key>sigma</key>
-									<integer>5</integer>
-								</dict>
-							</dict>
-
-							<!-- Define the distribution of who will be invited to an event.
-							
-								When inviteeClumping is turned on each invitee is based on a sample of
-								users "close to" the organizer based on account index. If the clumping
-								is too "tight" for the requested number of attendees, then invites for
-								those larger numbers will simply fail (the sim will report that situation).
-								
-								When inviteeClumping is off invitees will be sampled across an entire
-								range of account indexes. In this case the distribution ought to be a
-								UniformIntegerDistribution with min=0 and max set to the number of accounts.
-							-->
-							<key>inviteeDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.UniformIntegerDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- The minimum value (inclusive) of the uniform distribution. -->
-									<key>min</key>
-									<integer>0</integer>
-									<!-- The maximum value (exclusive) of the uniform distribution. -->
-									<key>max</key>
-									<integer>99</integer>
-								</dict>
-							</dict>
-
-							<key>inviteeClumping</key>
-							<true/>
-
-							<!-- Define the distribution of how many attendees will be invited to an event.
-							
-								LogNormal is the best fit to observed data.
-
-
-								For LogNormal "mode" is the peak, "mean" is the mean value.	For invites,
-								mode should typically be 1, and mean whatever matches the user behavior.
-								Our typical mean is 6. 							
-							     -->
-							<key>inviteeCountDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.LogNormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mode - peak-->
-									<key>mode</key>
-									<integer>1</integer>
-									<!-- mean - average-->
-									<key>median</key>
-									<integer>6</integer>
-									<!-- maximum -->
-									<key>maximum</key>
-									<real>100</real>
-								</dict>
-							</dict>
-
-							<!-- Define how start times (DTSTART) for the randomly generated events 
-								will be selected. This is an example of a "Distribution" parameter. The value 
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>eventStartDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps 
-									in the near future, limited to certain days of the week and certain hours 
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled 
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this 
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-
-							<!-- Define how recurrences are created. -->
-							<key>recurrenceDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized.  We have a fixed set of
-								     RRULEs defined for this distribution and pick each based on a
-								     weight. -->
-								<key>type</key>
-								<string>contrib.performance.stats.RecurrenceDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- False to disable RRULEs -->
-									<key>allowRecurrence</key>
-									<true/>
-
-									<!-- These are the weights for the specific set of RRULEs. -->
-									<key>weights</key>
-									<dict>
-										<!-- Half of all events will be non-recurring -->
-										<key>none</key>
-										<integer>50</integer>
-										
-										<!-- Daily and weekly are pretty common -->
-										<key>daily</key>
-										<integer>10</integer>
-										<key>weekly</key>
-										<integer>20</integer>
-										
-										<!-- Monthly, yearly, daily & weekly limit not so common -->
-										<key>monthly</key>
-										<integer>2</integer>
-										<key>yearly</key>
-										<integer>1</integer>
-										<key>dailylimit</key>
-										<integer>2</integer>
-										<key>weeklylimit</key>
-										<integer>5</integer>
-										
-										<!-- Work days pretty common -->
-										<key>workdays</key>
-										<integer>10</integer>
-									</dict>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- This profile accepts invitations to events, handles cancels, and
-					     handles replies received. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Accepter</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<false/>
-
-							<!-- Define how long to wait after seeing a new invitation before
-								accepting it.
-
-								For LogNormal "mode" is the peak, "median" is the 50% cummulative value
-								(i.e., half of the user have accepted by that time).								
-							-->
-							<key>acceptDelayDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.LogNormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mode - peak-->
-									<key>mode</key>
-									<integer>300</integer>
-									<!-- median - 50% done-->
-									<key>median</key>
-									<integer>1800</integer>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- A task-creating profile, which will periodically create 
-						new tasks at a random time on a random calendar. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Tasker</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<false/>
-
-							<!-- Define the interval (in seconds) at which this profile will use 
-								its client to create a new task. -->
-							<key>interval</key>
-							<integer>300</integer>
-
-							<!-- Define how due times (DUE) for the randomly generated tasks 
-								will be selected. This is an example of a "Distribution" parameter. The value 
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>taskDueDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps 
-									in the near future, limited to certain days of the week and certain hours 
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled 
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this 
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-				</array>
-
-				<!-- Determine the frequency at which this client configuration will 
-					appear in the clients which are created by the load tester. -->
-				<key>weight</key>
-				<integer>1</integer>
-			</dict>
-		</array>
-	</dict>
-</plist>

Copied: CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/events-only.plist (from rev 11901, CalendarServer/trunk/contrib/performance/loadtest/standard-configs/events-only.plist)
===================================================================
--- CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/events-only.plist	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/events-only.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -0,0 +1,440 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+    Copyright (c) 2011-2012 Apple Inc. All rights reserved.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+  -->
+
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+	<dict>
+		<!-- Define the kinds of software and user behavior the load simulation
+			will simulate. -->
+		<key>clients</key>
+
+		<!-- Have as many different kinds of software and user behavior configurations
+			as you want. Each is a dict -->
+		<array>
+
+			<dict>
+
+				<!-- Here is a Lion iCal simulator. -->
+				<key>software</key>
+				<string>contrib.performance.loadtest.ical.OS_X_10_7</string>
+
+				<!-- Arguments to use to initialize the client instance. -->
+				<key>params</key>
+				<dict>
+					<!-- Name that appears in logs. -->
+					<key>title</key>
+					<string>10.7</string>
+
+					<!-- Client can poll the calendar home at some interval. This is 
+						in seconds. -->
+					<key>calendarHomePollInterval</key>
+					<integer>300000</integer>
+
+					<!-- If the server advertises xmpp push, OS X 10.6 can wait for notifications 
+						about calendar home changes instead of polling for them periodically. If 
+						this option is true, then look for the server advertisement for xmpp push 
+						and use it if possible. Still fall back to polling if there is no xmpp push 
+						advertised. -->
+					<key>supportPush</key>
+					<false />
+					<key>supportAmpPush</key>
+					<false />
+				</dict>
+
+				<!-- The profiles define certain types of user behavior on top of the 
+					client software being simulated. -->
+				<key>profiles</key>
+				<array>
+
+					<!-- First an event-creating profile, which will periodically create 
+						new events at a random time on a random calendar. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.Eventer</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<true/>
+
+							<!-- Define the interval (in seconds) at which this profile will use 
+								its client to create a new event. -->
+							<key>interval</key>
+							<integer>20</integer>
+
+							<!-- Define how start times (DTSTART) for the randomly generated events 
+								will be selected. This is an example of a "Distribution" parameter. The value 
+								for most "Distribution" parameters are interchangeable and extensible. -->
+							<key>eventStartDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized. It produces timestamps 
+									in the near future, limited to certain days of the week and certain hours 
+									of the day. -->
+								<key>type</key>
+								<string>contrib.performance.stats.WorkDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- These are the days of the week the distribution will use. -->
+									<key>daysOfWeek</key>
+									<array>
+										<string>mon</string>
+										<string>tue</string>
+										<string>wed</string>
+										<string>thu</string>
+										<string>fri</string>
+									</array>
+
+									<!-- The earliest hour of a day at which an event might be scheduled. -->
+									<key>beginHour</key>
+									<integer>8</integer>
+
+									<!-- And the latest hour of a day (at which an event will be scheduled 
+										to begin!). -->
+									<key>endHour</key>
+									<integer>16</integer>
+
+									<!-- The timezone in which the event is scheduled. (XXX Does this 
+										really work right?) -->
+									<key>tzname</key>
+									<string>America/Los_Angeles</string>
+								</dict>
+							</dict>
+
+							<!-- Define how recurrences are created. -->
+							<key>recurrenceDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized.  We have a fixed set of
+								     RRULEs defined for this distribution and pick each based on a
+								     weight. -->
+								<key>type</key>
+								<string>contrib.performance.stats.RecurrenceDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- False to disable RRULEs -->
+									<key>allowRecurrence</key>
+									<false/>
+
+									<!-- These are the weights for the specific set of RRULEs. -->
+									<key>weights</key>
+									<dict>
+										<!-- Half of all events will be non-recurring -->
+										<key>none</key>
+										<integer>50</integer>
+										
+										<!-- Daily and weekly are pretty common -->
+										<key>daily</key>
+										<integer>10</integer>
+										<key>weekly</key>
+										<integer>20</integer>
+										
+										<!-- Monthly, yearly, daily & weekly limit not so common -->
+										<key>monthly</key>
+										<integer>2</integer>
+										<key>yearly</key>
+										<integer>1</integer>
+										<key>dailylimit</key>
+										<integer>2</integer>
+										<key>weeklylimit</key>
+										<integer>5</integer>
+										
+										<!-- Work days pretty common -->
+										<key>workdays</key>
+										<integer>10</integer>
+									</dict>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+					<!-- This profile invites some number of new attendees to new events. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.RealisticInviter</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<false/>
+
+							<!-- Define the frequency at which new invitations will be sent out. -->
+							<key>sendInvitationDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.NormalDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- mu gives the mean of the normal distribution (in seconds). -->
+									<key>mu</key>
+									<integer>10</integer>
+
+									<!-- and sigma gives its standard deviation. -->
+									<key>sigma</key>
+									<integer>5</integer>
+								</dict>
+							</dict>
+
+							<!-- Define the distribution of who will be invited to an event.
+							
+								When inviteeClumping is turned on each invitee is based on a sample of
+								users "close to" the organizer based on account index. If the clumping
+								is too "tight" for the requested number of attendees, then invites for
+								those larger numbers will simply fail (the sim will report that situation).
+								
+								When inviteeClumping is off invitees will be sampled across an entire
+								range of account indexes. In this case the distribution ought to be a
+								UniformIntegerDistribution with min=0 and max set to the number of accounts.
+							-->
+							<key>inviteeDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.UniformIntegerDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- The minimum value (inclusive) of the uniform distribution. -->
+									<key>min</key>
+									<integer>0</integer>
+									<!-- The maximum value (exclusive) of the uniform distribution. -->
+									<key>max</key>
+									<integer>99</integer>
+								</dict>
+							</dict>
+
+							<key>inviteeClumping</key>
+							<true/>
+
+							<!-- Define the distribution of how many attendees will be invited to an event.
+							
+								LogNormal is the best fit to observed data.
+
+
+								For LogNormal "mode" is the peak, "mean" is the mean value.	For invites,
+								mode should typically be 1, and mean whatever matches the user behavior.
+								Our typical mean is 6. 							
+							     -->
+							<key>inviteeCountDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.LogNormalDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- mode - peak-->
+									<key>mode</key>
+									<integer>1</integer>
+									<!-- mean - average-->
+									<key>median</key>
+									<integer>6</integer>
+									<!-- maximum -->
+									<key>maximum</key>
+									<real>100</real>
+								</dict>
+							</dict>
+
+							<!-- Define how start times (DTSTART) for the randomly generated events 
+								will be selected. This is an example of a "Distribution" parameter. The value 
+								for most "Distribution" parameters are interchangeable and extensible. -->
+							<key>eventStartDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized. It produces timestamps 
+									in the near future, limited to certain days of the week and certain hours 
+									of the day. -->
+								<key>type</key>
+								<string>contrib.performance.stats.WorkDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- These are the days of the week the distribution will use. -->
+									<key>daysOfWeek</key>
+									<array>
+										<string>mon</string>
+										<string>tue</string>
+										<string>wed</string>
+										<string>thu</string>
+										<string>fri</string>
+									</array>
+
+									<!-- The earliest hour of a day at which an event might be scheduled. -->
+									<key>beginHour</key>
+									<integer>8</integer>
+
+									<!-- And the latest hour of a day (at which an event will be scheduled 
+										to begin!). -->
+									<key>endHour</key>
+									<integer>16</integer>
+
+									<!-- The timezone in which the event is scheduled. (XXX Does this 
+										really work right?) -->
+									<key>tzname</key>
+									<string>America/Los_Angeles</string>
+								</dict>
+							</dict>
+
+							<!-- Define how recurrences are created. -->
+							<key>recurrenceDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized.  We have a fixed set of
+								     RRULEs defined for this distribution and pick each based on a
+								     weight. -->
+								<key>type</key>
+								<string>contrib.performance.stats.RecurrenceDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- False to disable RRULEs -->
+									<key>allowRecurrence</key>
+									<true/>
+
+									<!-- These are the weights for the specific set of RRULEs. -->
+									<key>weights</key>
+									<dict>
+										<!-- Half of all events will be non-recurring -->
+										<key>none</key>
+										<integer>50</integer>
+										
+										<!-- Daily and weekly are pretty common -->
+										<key>daily</key>
+										<integer>10</integer>
+										<key>weekly</key>
+										<integer>20</integer>
+										
+										<!-- Monthly, yearly, daily & weekly limit not so common -->
+										<key>monthly</key>
+										<integer>2</integer>
+										<key>yearly</key>
+										<integer>1</integer>
+										<key>dailylimit</key>
+										<integer>2</integer>
+										<key>weeklylimit</key>
+										<integer>5</integer>
+										
+										<!-- Work days pretty common -->
+										<key>workdays</key>
+										<integer>10</integer>
+									</dict>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+					<!-- This profile accepts invitations to events, handles cancels, and
+					     handles replies received. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.Accepter</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<false/>
+
+							<!-- Define how long to wait after seeing a new invitation before
+								accepting it.
+
+								For LogNormal "mode" is the peak, "median" is the 50% cummulative value
+								(i.e., half of the user have accepted by that time).								
+							-->
+							<key>acceptDelayDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.LogNormalDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- mode - peak-->
+									<key>mode</key>
+									<integer>300</integer>
+									<!-- median - 50% done-->
+									<key>median</key>
+									<integer>1800</integer>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+					<!-- A task-creating profile, which will periodically create 
+						new tasks at a random time on a random calendar. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.Tasker</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<false/>
+
+							<!-- Define the interval (in seconds) at which this profile will use 
+								its client to create a new task. -->
+							<key>interval</key>
+							<integer>300</integer>
+
+							<!-- Define how due times (DUE) for the randomly generated tasks 
+								will be selected. This is an example of a "Distribution" parameter. The value 
+								for most "Distribution" parameters are interchangeable and extensible. -->
+							<key>taskDueDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized. It produces timestamps 
+									in the near future, limited to certain days of the week and certain hours 
+									of the day. -->
+								<key>type</key>
+								<string>contrib.performance.stats.WorkDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- These are the days of the week the distribution will use. -->
+									<key>daysOfWeek</key>
+									<array>
+										<string>mon</string>
+										<string>tue</string>
+										<string>wed</string>
+										<string>thu</string>
+										<string>fri</string>
+									</array>
+
+									<!-- The earliest hour of a day at which an event might be scheduled. -->
+									<key>beginHour</key>
+									<integer>8</integer>
+
+									<!-- And the latest hour of a day (at which an event will be scheduled 
+										to begin!). -->
+									<key>endHour</key>
+									<integer>16</integer>
+
+									<!-- The timezone in which the event is scheduled. (XXX Does this 
+										really work right?) -->
+									<key>tzname</key>
+									<string>America/Los_Angeles</string>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+				</array>
+
+				<!-- Determine the frequency at which this client configuration will 
+					appear in the clients which are created by the load tester. -->
+				<key>weight</key>
+				<integer>1</integer>
+			</dict>
+		</array>
+	</dict>
+</plist>

Deleted: CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-accepts.plist
===================================================================
--- CalendarServer/trunk/contrib/performance/loadtest/standard-configs/invites-accepts.plist	2013-11-07 17:02:26 UTC (rev 11901)
+++ CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-accepts.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -1,419 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-    Copyright (c) 2011-2012 Apple Inc. All rights reserved.
-
-    Licensed under the Apache License, Version 2.0 (the "License");
-    you may not use this file except in compliance with the License.
-    You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-  -->
-
-<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-	<dict>
-		<!-- Define the kinds of software and user behavior the load simulation
-			will simulate. -->
-		<key>clients</key>
-
-		<!-- Have as many different kinds of software and user behavior configurations
-			as you want. Each is a dict -->
-		<array>
-
-			<dict>
-
-				<!-- Here is a Lion iCal simulator. -->
-				<key>software</key>
-				<string>contrib.performance.loadtest.ical.OS_X_10_7</string>
-
-				<!-- Arguments to use to initialize the client instance. -->
-				<key>params</key>
-				<dict>
-					<!-- Name that appears in logs. -->
-					<key>title</key>
-					<string>10.7</string>
-
-					<!-- Client can poll the calendar home at some interval. This is 
-						in seconds. -->
-					<key>calendarHomePollInterval</key>
-					<integer>300000</integer>
-
-					<!-- If the server advertises xmpp push, OS X 10.6 can wait for notifications 
-						about calendar home changes instead of polling for them periodically. If 
-						this option is true, then look for the server advertisement for xmpp push 
-						and use it if possible. Still fall back to polling if there is no xmpp push 
-						advertised. -->
-					<key>supportPush</key>
-					<false />
-					<key>supportAmpPush</key>
-					<true />
-				</dict>
-
-				<!-- The profiles define certain types of user behavior on top of the 
-					client software being simulated. -->
-				<key>profiles</key>
-				<array>
-
-					<!-- First an event-creating profile, which will periodically create 
-						new events at a random time on a random calendar. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Eventer</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<false/>
-
-							<!-- Define the interval (in seconds) at which this profile will use 
-								its client to create a new event. -->
-							<key>interval</key>
-							<integer>20</integer>
-
-							<!-- Define how start times (DTSTART) for the randomly generated events 
-								will be selected. This is an example of a "Distribution" parameter. The value 
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>eventStartDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps 
-									in the near future, limited to certain days of the week and certain hours 
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled 
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this 
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-
-							<!-- Define how recurrences are created. -->
-							<key>recurrenceDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized.  We have a fixed set of
-								     RRULEs defined for this distribution and pick each based on a
-								     weight. -->
-								<key>type</key>
-								<string>contrib.performance.stats.RecurrenceDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- False to disable RRULEs -->
-									<key>allowRecurrence</key>
-									<false/>
-
-									<!-- These are the weights for the specific set of RRULEs. -->
-									<key>weights</key>
-									<dict>
-										<!-- Half of all events will be non-recurring -->
-										<key>none</key>
-										<integer>50</integer>
-										
-										<!-- Daily and weekly are pretty common -->
-										<key>daily</key>
-										<integer>10</integer>
-										<key>weekly</key>
-										<integer>20</integer>
-										
-										<!-- Monthly, yearly, daily & weekly limit not so common -->
-										<key>monthly</key>
-										<integer>2</integer>
-										<key>yearly</key>
-										<integer>1</integer>
-										<key>dailylimit</key>
-										<integer>2</integer>
-										<key>weeklylimit</key>
-										<integer>5</integer>
-										
-										<!-- Work days pretty common -->
-										<key>workdays</key>
-										<integer>10</integer>
-									</dict>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- This profile invites some number of new attendees to new events. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.RealisticInviter</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define the frequency at which new invitations will be sent out. -->
-							<key>sendInvitationDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.FixedDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- interval (in seconds). -->
-									<key>value</key>
-									<integer>150</integer>
-								</dict>
-							</dict>
-
-							<!-- Define the distribution of who will be invited to an event.
-							
-								When inviteeClumping is turned on each invitee is based on a sample of
-								users "close to" the organizer based on account index. If the clumping
-								is too "tight" for the requested number of attendees, then invites for
-								those larger numbers will simply fail (the sim will report that situation).
-								
-								When inviteeClumping is off invitees will be sampled across an entire
-								range of account indexes. In this case the distribution ought to be a
-								UniformIntegerDistribution with min=0 and max set to the number of accounts.
-							-->
-							<key>inviteeDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.UniformIntegerDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- The minimum value (inclusive) of the uniform distribution. -->
-									<key>min</key>
-									<integer>0</integer>
-									<!-- The maximum value (exclusive) of the uniform distribution. -->
-									<key>max</key>
-									<integer>99</integer>
-								</dict>
-							</dict>
-
-							<key>inviteeClumping</key>
-							<true/>
-
-							<!-- Define the distribution of how many attendees will be invited to an event.
-							
-								LogNormal is the best fit to observed data.
-
-
-								For LogNormal "mode" is the peak, "mean" is the mean value.	For invites,
-								mode should typically be 1, and mean whatever matches the user behavior.
-								Our typical mean is 6. 							
-							     -->
-							<key>inviteeCountDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.FixedDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- Number of attendees. -->
-									<key>value</key>
-									<integer>5</integer>
-								</dict>
-							</dict>
-
-							<!-- Define how start times (DTSTART) for the randomly generated events 
-								will be selected. This is an example of a "Distribution" parameter. The value 
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>eventStartDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps 
-									in the near future, limited to certain days of the week and certain hours 
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled 
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this 
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-
-							<!-- Define how recurrences are created. -->
-							<key>recurrenceDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized.  We have a fixed set of
-								     RRULEs defined for this distribution and pick each based on a
-								     weight. -->
-								<key>type</key>
-								<string>contrib.performance.stats.RecurrenceDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- False to disable RRULEs -->
-									<key>allowRecurrence</key>
-									<false/>
-
-									<!-- These are the weights for the specific set of RRULEs. -->
-									<key>weights</key>
-									<dict>
-										<!-- Daily and weekly are pretty common -->
-										<key>daily</key>
-										<integer>100</integer>
-									</dict>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- This profile accepts invitations to events, handles cancels, and
-					     handles replies received. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Accepter</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define how long to wait after seeing a new invitation before
-								accepting it.
-
-								For LogNormal "mode" is the peak, "median" is the 50% cummulative value
-								(i.e., half of the user have accepted by that time).								
-							-->
-							<key>acceptDelayDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.UniformDiscreteDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- Set of values to use - will be chosen in random order. -->
-									<key>values</key>
-									<array>
-										<integer>0</integer>
-										<integer>5</integer>
-										<integer>10</integer>
-										<integer>15</integer>
-										<integer>20</integer>
-										<integer>25</integer>
-										<integer>30</integer>
-									</array>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- A task-creating profile, which will periodically create 
-						new tasks at a random time on a random calendar. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Tasker</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<false/>
-
-							<!-- Define the interval (in seconds) at which this profile will use 
-								its client to create a new task. -->
-							<key>interval</key>
-							<integer>300</integer>
-
-							<!-- Define how due times (DUE) for the randomly generated tasks 
-								will be selected. This is an example of a "Distribution" parameter. The value 
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>taskDueDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps 
-									in the near future, limited to certain days of the week and certain hours 
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled 
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this 
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-				</array>
-
-				<!-- Determine the frequency at which this client configuration will 
-					appear in the clients which are created by the load tester. -->
-				<key>weight</key>
-				<integer>1</integer>
-			</dict>
-		</array>
-
-		<!-- Determine the interval between client creation. -->
-		<key>arrivalInterval</key>
-		<integer>5</integer>
-	</dict>
-</plist>

Copied: CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-accepts.plist (from rev 11901, CalendarServer/trunk/contrib/performance/loadtest/standard-configs/invites-accepts.plist)
===================================================================
--- CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-accepts.plist	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-accepts.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -0,0 +1,419 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+    Copyright (c) 2011-2012 Apple Inc. All rights reserved.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+  -->
+
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+	<dict>
+		<!-- Define the kinds of software and user behavior the load simulation
+			will simulate. -->
+		<key>clients</key>
+
+		<!-- Have as many different kinds of software and user behavior configurations
+			as you want. Each is a dict -->
+		<array>
+
+			<dict>
+
+				<!-- Here is a Lion iCal simulator. -->
+				<key>software</key>
+				<string>contrib.performance.loadtest.ical.OS_X_10_7</string>
+
+				<!-- Arguments to use to initialize the client instance. -->
+				<key>params</key>
+				<dict>
+					<!-- Name that appears in logs. -->
+					<key>title</key>
+					<string>10.7</string>
+
+					<!-- Client can poll the calendar home at some interval. This is 
+						in seconds. -->
+					<key>calendarHomePollInterval</key>
+					<integer>300000</integer>
+
+					<!-- If the server advertises xmpp push, OS X 10.6 can wait for notifications 
+						about calendar home changes instead of polling for them periodically. If 
+						this option is true, then look for the server advertisement for xmpp push 
+						and use it if possible. Still fall back to polling if there is no xmpp push 
+						advertised. -->
+					<key>supportPush</key>
+					<false />
+					<key>supportAmpPush</key>
+					<true />
+				</dict>
+
+				<!-- The profiles define certain types of user behavior on top of the 
+					client software being simulated. -->
+				<key>profiles</key>
+				<array>
+
+					<!-- First an event-creating profile, which will periodically create 
+						new events at a random time on a random calendar. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.Eventer</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<false/>
+
+							<!-- Define the interval (in seconds) at which this profile will use 
+								its client to create a new event. -->
+							<key>interval</key>
+							<integer>20</integer>
+
+							<!-- Define how start times (DTSTART) for the randomly generated events 
+								will be selected. This is an example of a "Distribution" parameter. The value 
+								for most "Distribution" parameters are interchangeable and extensible. -->
+							<key>eventStartDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized. It produces timestamps 
+									in the near future, limited to certain days of the week and certain hours 
+									of the day. -->
+								<key>type</key>
+								<string>contrib.performance.stats.WorkDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- These are the days of the week the distribution will use. -->
+									<key>daysOfWeek</key>
+									<array>
+										<string>mon</string>
+										<string>tue</string>
+										<string>wed</string>
+										<string>thu</string>
+										<string>fri</string>
+									</array>
+
+									<!-- The earliest hour of a day at which an event might be scheduled. -->
+									<key>beginHour</key>
+									<integer>8</integer>
+
+									<!-- And the latest hour of a day (at which an event will be scheduled 
+										to begin!). -->
+									<key>endHour</key>
+									<integer>16</integer>
+
+									<!-- The timezone in which the event is scheduled. (XXX Does this 
+										really work right?) -->
+									<key>tzname</key>
+									<string>America/Los_Angeles</string>
+								</dict>
+							</dict>
+
+							<!-- Define how recurrences are created. -->
+							<key>recurrenceDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized.  We have a fixed set of
+								     RRULEs defined for this distribution and pick each based on a
+								     weight. -->
+								<key>type</key>
+								<string>contrib.performance.stats.RecurrenceDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- False to disable RRULEs -->
+									<key>allowRecurrence</key>
+									<false/>
+
+									<!-- These are the weights for the specific set of RRULEs. -->
+									<key>weights</key>
+									<dict>
+										<!-- Half of all events will be non-recurring -->
+										<key>none</key>
+										<integer>50</integer>
+										
+										<!-- Daily and weekly are pretty common -->
+										<key>daily</key>
+										<integer>10</integer>
+										<key>weekly</key>
+										<integer>20</integer>
+										
+										<!-- Monthly, yearly, daily & weekly limit not so common -->
+										<key>monthly</key>
+										<integer>2</integer>
+										<key>yearly</key>
+										<integer>1</integer>
+										<key>dailylimit</key>
+										<integer>2</integer>
+										<key>weeklylimit</key>
+										<integer>5</integer>
+										
+										<!-- Work days pretty common -->
+										<key>workdays</key>
+										<integer>10</integer>
+									</dict>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+					<!-- This profile invites some number of new attendees to new events. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.RealisticInviter</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<true/>
+
+							<!-- Define the frequency at which new invitations will be sent out. -->
+							<key>sendInvitationDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.FixedDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- interval (in seconds). -->
+									<key>value</key>
+									<integer>150</integer>
+								</dict>
+							</dict>
+
+							<!-- Define the distribution of who will be invited to an event.
+							
+								When inviteeClumping is turned on each invitee is based on a sample of
+								users "close to" the organizer based on account index. If the clumping
+								is too "tight" for the requested number of attendees, then invites for
+								those larger numbers will simply fail (the sim will report that situation).
+								
+								When inviteeClumping is off invitees will be sampled across an entire
+								range of account indexes. In this case the distribution ought to be a
+								UniformIntegerDistribution with min=0 and max set to the number of accounts.
+							-->
+							<key>inviteeDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.UniformIntegerDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- The minimum value (inclusive) of the uniform distribution. -->
+									<key>min</key>
+									<integer>0</integer>
+									<!-- The maximum value (exclusive) of the uniform distribution. -->
+									<key>max</key>
+									<integer>99</integer>
+								</dict>
+							</dict>
+
+							<key>inviteeClumping</key>
+							<true/>
+
+							<!-- Define the distribution of how many attendees will be invited to an event.
+							
+								LogNormal is the best fit to observed data.
+
+
+								For LogNormal "mode" is the peak, "mean" is the mean value.	For invites,
+								mode should typically be 1, and mean whatever matches the user behavior.
+								Our typical mean is 6. 							
+							     -->
+							<key>inviteeCountDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.FixedDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- Number of attendees. -->
+									<key>value</key>
+									<integer>5</integer>
+								</dict>
+							</dict>
+
+							<!-- Define how start times (DTSTART) for the randomly generated events 
+								will be selected. This is an example of a "Distribution" parameter. The value 
+								for most "Distribution" parameters are interchangeable and extensible. -->
+							<key>eventStartDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized. It produces timestamps 
+									in the near future, limited to certain days of the week and certain hours 
+									of the day. -->
+								<key>type</key>
+								<string>contrib.performance.stats.WorkDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- These are the days of the week the distribution will use. -->
+									<key>daysOfWeek</key>
+									<array>
+										<string>mon</string>
+										<string>tue</string>
+										<string>wed</string>
+										<string>thu</string>
+										<string>fri</string>
+									</array>
+
+									<!-- The earliest hour of a day at which an event might be scheduled. -->
+									<key>beginHour</key>
+									<integer>8</integer>
+
+									<!-- And the latest hour of a day (at which an event will be scheduled 
+										to begin!). -->
+									<key>endHour</key>
+									<integer>16</integer>
+
+									<!-- The timezone in which the event is scheduled. (XXX Does this 
+										really work right?) -->
+									<key>tzname</key>
+									<string>America/Los_Angeles</string>
+								</dict>
+							</dict>
+
+							<!-- Define how recurrences are created. -->
+							<key>recurrenceDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized.  We have a fixed set of
+								     RRULEs defined for this distribution and pick each based on a
+								     weight. -->
+								<key>type</key>
+								<string>contrib.performance.stats.RecurrenceDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- False to disable RRULEs -->
+									<key>allowRecurrence</key>
+									<false/>
+
+									<!-- These are the weights for the specific set of RRULEs. -->
+									<key>weights</key>
+									<dict>
+										<!-- Daily and weekly are pretty common -->
+										<key>daily</key>
+										<integer>100</integer>
+									</dict>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+					<!-- This profile accepts invitations to events, handles cancels, and
+					     handles replies received. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.Accepter</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<true/>
+
+							<!-- Define how long to wait after seeing a new invitation before
+								accepting it.
+
+								For LogNormal "mode" is the peak, "median" is the 50% cummulative value
+								(i.e., half of the user have accepted by that time).								
+							-->
+							<key>acceptDelayDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.UniformDiscreteDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- Set of values to use - will be chosen in random order. -->
+									<key>values</key>
+									<array>
+										<integer>0</integer>
+										<integer>5</integer>
+										<integer>10</integer>
+										<integer>15</integer>
+										<integer>20</integer>
+										<integer>25</integer>
+										<integer>30</integer>
+									</array>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+					<!-- A task-creating profile, which will periodically create 
+						new tasks at a random time on a random calendar. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.Tasker</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<false/>
+
+							<!-- Define the interval (in seconds) at which this profile will use 
+								its client to create a new task. -->
+							<key>interval</key>
+							<integer>300</integer>
+
+							<!-- Define how due times (DUE) for the randomly generated tasks 
+								will be selected. This is an example of a "Distribution" parameter. The value 
+								for most "Distribution" parameters are interchangeable and extensible. -->
+							<key>taskDueDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized. It produces timestamps 
+									in the near future, limited to certain days of the week and certain hours 
+									of the day. -->
+								<key>type</key>
+								<string>contrib.performance.stats.WorkDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- These are the days of the week the distribution will use. -->
+									<key>daysOfWeek</key>
+									<array>
+										<string>mon</string>
+										<string>tue</string>
+										<string>wed</string>
+										<string>thu</string>
+										<string>fri</string>
+									</array>
+
+									<!-- The earliest hour of a day at which an event might be scheduled. -->
+									<key>beginHour</key>
+									<integer>8</integer>
+
+									<!-- And the latest hour of a day (at which an event will be scheduled 
+										to begin!). -->
+									<key>endHour</key>
+									<integer>16</integer>
+
+									<!-- The timezone in which the event is scheduled. (XXX Does this 
+										really work right?) -->
+									<key>tzname</key>
+									<string>America/Los_Angeles</string>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+				</array>
+
+				<!-- Determine the frequency at which this client configuration will 
+					appear in the clients which are created by the load tester. -->
+				<key>weight</key>
+				<integer>1</integer>
+			</dict>
+		</array>
+
+		<!-- Determine the interval between client creation. -->
+		<key>arrivalInterval</key>
+		<integer>5</integer>
+	</dict>
+</plist>

Deleted: CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-only-recurring.plist
===================================================================
--- CalendarServer/trunk/contrib/performance/loadtest/standard-configs/invites-only-recurring.plist	2013-11-07 17:02:26 UTC (rev 11901)
+++ CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-only-recurring.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -1,414 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-    Copyright (c) 2011-2012 Apple Inc. All rights reserved.
-
-    Licensed under the Apache License, Version 2.0 (the "License");
-    you may not use this file except in compliance with the License.
-    You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-  -->
-
-<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-	<dict>
-		<!-- Define the kinds of software and user behavior the load simulation
-			will simulate. -->
-		<key>clients</key>
-
-		<!-- Have as many different kinds of software and user behavior configurations
-			as you want. Each is a dict -->
-		<array>
-
-			<dict>
-
-				<!-- Here is a Lion iCal simulator. -->
-				<key>software</key>
-				<string>contrib.performance.loadtest.ical.OS_X_10_7</string>
-
-				<!-- Arguments to use to initialize the client instance. -->
-				<key>params</key>
-				<dict>
-					<!-- Name that appears in logs. -->
-					<key>title</key>
-					<string>10.7</string>
-
-					<!-- Client can poll the calendar home at some interval. This is 
-						in seconds. -->
-					<key>calendarHomePollInterval</key>
-					<integer>300000</integer>
-
-					<!-- If the server advertises xmpp push, OS X 10.6 can wait for notifications 
-						about calendar home changes instead of polling for them periodically. If 
-						this option is true, then look for the server advertisement for xmpp push 
-						and use it if possible. Still fall back to polling if there is no xmpp push 
-						advertised. -->
-					<key>supportPush</key>
-					<false />
-					<key>supportAmpPush</key>
-					<false />
-				</dict>
-
-				<!-- The profiles define certain types of user behavior on top of the 
-					client software being simulated. -->
-				<key>profiles</key>
-				<array>
-
-					<!-- First an event-creating profile, which will periodically create 
-						new events at a random time on a random calendar. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Eventer</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<false/>
-
-							<!-- Define the interval (in seconds) at which this profile will use 
-								its client to create a new event. -->
-							<key>interval</key>
-							<integer>20</integer>
-
-							<!-- Define how start times (DTSTART) for the randomly generated events 
-								will be selected. This is an example of a "Distribution" parameter. The value 
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>eventStartDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps 
-									in the near future, limited to certain days of the week and certain hours 
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled 
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this 
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-
-							<!-- Define how recurrences are created. -->
-							<key>recurrenceDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized.  We have a fixed set of
-								     RRULEs defined for this distribution and pick each based on a
-								     weight. -->
-								<key>type</key>
-								<string>contrib.performance.stats.RecurrenceDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- False to disable RRULEs -->
-									<key>allowRecurrence</key>
-									<false/>
-
-									<!-- These are the weights for the specific set of RRULEs. -->
-									<key>weights</key>
-									<dict>
-										<!-- Half of all events will be non-recurring -->
-										<key>none</key>
-										<integer>50</integer>
-										
-										<!-- Daily and weekly are pretty common -->
-										<key>daily</key>
-										<integer>10</integer>
-										<key>weekly</key>
-										<integer>20</integer>
-										
-										<!-- Monthly, yearly, daily & weekly limit not so common -->
-										<key>monthly</key>
-										<integer>2</integer>
-										<key>yearly</key>
-										<integer>1</integer>
-										<key>dailylimit</key>
-										<integer>2</integer>
-										<key>weeklylimit</key>
-										<integer>5</integer>
-										
-										<!-- Work days pretty common -->
-										<key>workdays</key>
-										<integer>10</integer>
-									</dict>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- This profile invites some number of new attendees to new events. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.RealisticInviter</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define the frequency at which new invitations will be sent out. -->
-							<key>sendInvitationDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.FixedDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- interval (in seconds). -->
-									<key>value</key>
-									<integer>120</integer>
-								</dict>
-							</dict>
-
-							<!-- Define the distribution of who will be invited to an event.
-							
-								When inviteeClumping is turned on each invitee is based on a sample of
-								users "close to" the organizer based on account index. If the clumping
-								is too "tight" for the requested number of attendees, then invites for
-								those larger numbers will simply fail (the sim will report that situation).
-								
-								When inviteeClumping is off invitees will be sampled across an entire
-								range of account indexes. In this case the distribution ought to be a
-								UniformIntegerDistribution with min=0 and max set to the number of accounts.
-							-->
-							<key>inviteeDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.UniformIntegerDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- The minimum value (inclusive) of the uniform distribution. -->
-									<key>min</key>
-									<integer>0</integer>
-									<!-- The maximum value (exclusive) of the uniform distribution. -->
-									<key>max</key>
-									<integer>99</integer>
-								</dict>
-							</dict>
-
-							<key>inviteeClumping</key>
-							<true/>
-
-							<!-- Define the distribution of how many attendees will be invited to an event.
-							
-								LogNormal is the best fit to observed data.
-
-
-								For LogNormal "mode" is the peak, "mean" is the mean value.	For invites,
-								mode should typically be 1, and mean whatever matches the user behavior.
-								Our typical mean is 6. 							
-							     -->
-							<key>inviteeCountDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.FixedDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- Number of attendees. -->
-									<key>value</key>
-									<integer>5</integer>
-								</dict>
-							</dict>
-
-							<!-- Define how start times (DTSTART) for the randomly generated events 
-								will be selected. This is an example of a "Distribution" parameter. The value 
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>eventStartDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps 
-									in the near future, limited to certain days of the week and certain hours 
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled 
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this 
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-
-							<!-- Define how recurrences are created. -->
-							<key>recurrenceDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized.  We have a fixed set of
-								     RRULEs defined for this distribution and pick each based on a
-								     weight. -->
-								<key>type</key>
-								<string>contrib.performance.stats.RecurrenceDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- False to disable RRULEs -->
-									<key>allowRecurrence</key>
-									<true/>
-
-									<!-- These are the weights for the specific set of RRULEs. -->
-									<key>weights</key>
-									<dict>
-										<!-- Daily and weekly are pretty common -->
-										<key>daily</key>
-										<integer>100</integer>
-									</dict>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- This profile accepts invitations to events, handles cancels, and
-					     handles replies received. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Accepter</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<false/>
-
-							<!-- Define how long to wait after seeing a new invitation before
-								accepting it.
-
-								For LogNormal "mode" is the peak, "median" is the 50% cummulative value
-								(i.e., half of the user have accepted by that time).								
-							-->
-							<key>acceptDelayDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.LogNormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mode - peak-->
-									<key>mode</key>
-									<integer>300</integer>
-									<!-- median - 50% done-->
-									<key>median</key>
-									<integer>1800</integer>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- A task-creating profile, which will periodically create 
-						new tasks at a random time on a random calendar. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Tasker</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<false/>
-
-							<!-- Define the interval (in seconds) at which this profile will use 
-								its client to create a new task. -->
-							<key>interval</key>
-							<integer>300</integer>
-
-							<!-- Define how due times (DUE) for the randomly generated tasks 
-								will be selected. This is an example of a "Distribution" parameter. The value 
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>taskDueDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps 
-									in the near future, limited to certain days of the week and certain hours 
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled 
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this 
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-				</array>
-
-				<!-- Determine the frequency at which this client configuration will 
-					appear in the clients which are created by the load tester. -->
-				<key>weight</key>
-				<integer>1</integer>
-			</dict>
-		</array>
-
-		<!-- Determine the interval between client creation. -->
-		<key>arrivalInterval</key>
-		<integer>4</integer>
-	</dict>
-</plist>

Copied: CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-only-recurring.plist (from rev 11901, CalendarServer/trunk/contrib/performance/loadtest/standard-configs/invites-only-recurring.plist)
===================================================================
--- CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-only-recurring.plist	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-only-recurring.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -0,0 +1,414 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+    Copyright (c) 2011-2012 Apple Inc. All rights reserved.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+  -->
+
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+	<dict>
+		<!-- Define the kinds of software and user behavior the load simulation
+			will simulate. -->
+		<key>clients</key>
+
+		<!-- Have as many different kinds of software and user behavior configurations
+			as you want. Each is a dict -->
+		<array>
+
+			<dict>
+
+				<!-- Here is a Lion iCal simulator. -->
+				<key>software</key>
+				<string>contrib.performance.loadtest.ical.OS_X_10_7</string>
+
+				<!-- Arguments to use to initialize the client instance. -->
+				<key>params</key>
+				<dict>
+					<!-- Name that appears in logs. -->
+					<key>title</key>
+					<string>10.7</string>
+
+					<!-- Client can poll the calendar home at some interval. This is 
+						in seconds. -->
+					<key>calendarHomePollInterval</key>
+					<integer>300000</integer>
+
+					<!-- If the server advertises xmpp push, OS X 10.6 can wait for notifications 
+						about calendar home changes instead of polling for them periodically. If 
+						this option is true, then look for the server advertisement for xmpp push 
+						and use it if possible. Still fall back to polling if there is no xmpp push 
+						advertised. -->
+					<key>supportPush</key>
+					<false />
+					<key>supportAmpPush</key>
+					<false />
+				</dict>
+
+				<!-- The profiles define certain types of user behavior on top of the 
+					client software being simulated. -->
+				<key>profiles</key>
+				<array>
+
+					<!-- First an event-creating profile, which will periodically create 
+						new events at a random time on a random calendar. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.Eventer</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<false/>
+
+							<!-- Define the interval (in seconds) at which this profile will use 
+								its client to create a new event. -->
+							<key>interval</key>
+							<integer>20</integer>
+
+							<!-- Define how start times (DTSTART) for the randomly generated events 
+								will be selected. This is an example of a "Distribution" parameter. The value 
+								for most "Distribution" parameters are interchangeable and extensible. -->
+							<key>eventStartDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized. It produces timestamps 
+									in the near future, limited to certain days of the week and certain hours 
+									of the day. -->
+								<key>type</key>
+								<string>contrib.performance.stats.WorkDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- These are the days of the week the distribution will use. -->
+									<key>daysOfWeek</key>
+									<array>
+										<string>mon</string>
+										<string>tue</string>
+										<string>wed</string>
+										<string>thu</string>
+										<string>fri</string>
+									</array>
+
+									<!-- The earliest hour of a day at which an event might be scheduled. -->
+									<key>beginHour</key>
+									<integer>8</integer>
+
+									<!-- And the latest hour of a day (at which an event will be scheduled 
+										to begin!). -->
+									<key>endHour</key>
+									<integer>16</integer>
+
+									<!-- The timezone in which the event is scheduled. (XXX Does this 
+										really work right?) -->
+									<key>tzname</key>
+									<string>America/Los_Angeles</string>
+								</dict>
+							</dict>
+
+							<!-- Define how recurrences are created. -->
+							<key>recurrenceDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized.  We have a fixed set of
+								     RRULEs defined for this distribution and pick each based on a
+								     weight. -->
+								<key>type</key>
+								<string>contrib.performance.stats.RecurrenceDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- False to disable RRULEs -->
+									<key>allowRecurrence</key>
+									<false/>
+
+									<!-- These are the weights for the specific set of RRULEs. -->
+									<key>weights</key>
+									<dict>
+										<!-- Half of all events will be non-recurring -->
+										<key>none</key>
+										<integer>50</integer>
+										
+										<!-- Daily and weekly are pretty common -->
+										<key>daily</key>
+										<integer>10</integer>
+										<key>weekly</key>
+										<integer>20</integer>
+										
+										<!-- Monthly, yearly, daily & weekly limit not so common -->
+										<key>monthly</key>
+										<integer>2</integer>
+										<key>yearly</key>
+										<integer>1</integer>
+										<key>dailylimit</key>
+										<integer>2</integer>
+										<key>weeklylimit</key>
+										<integer>5</integer>
+										
+										<!-- Work days pretty common -->
+										<key>workdays</key>
+										<integer>10</integer>
+									</dict>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+					<!-- This profile invites some number of new attendees to new events. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.RealisticInviter</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<true/>
+
+							<!-- Define the frequency at which new invitations will be sent out. -->
+							<key>sendInvitationDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.FixedDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- interval (in seconds). -->
+									<key>value</key>
+									<integer>120</integer>
+								</dict>
+							</dict>
+
+							<!-- Define the distribution of who will be invited to an event.
+							
+								When inviteeClumping is turned on each invitee is based on a sample of
+								users "close to" the organizer based on account index. If the clumping
+								is too "tight" for the requested number of attendees, then invites for
+								those larger numbers will simply fail (the sim will report that situation).
+								
+								When inviteeClumping is off invitees will be sampled across an entire
+								range of account indexes. In this case the distribution ought to be a
+								UniformIntegerDistribution with min=0 and max set to the number of accounts.
+							-->
+							<key>inviteeDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.UniformIntegerDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- The minimum value (inclusive) of the uniform distribution. -->
+									<key>min</key>
+									<integer>0</integer>
+									<!-- The maximum value (exclusive) of the uniform distribution. -->
+									<key>max</key>
+									<integer>99</integer>
+								</dict>
+							</dict>
+
+							<key>inviteeClumping</key>
+							<true/>
+
+							<!-- Define the distribution of how many attendees will be invited to an event.
+							
+								LogNormal is the best fit to observed data.
+
+
+								For LogNormal "mode" is the peak, "mean" is the mean value.	For invites,
+								mode should typically be 1, and mean whatever matches the user behavior.
+								Our typical mean is 6. 							
+							     -->
+							<key>inviteeCountDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.FixedDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- Number of attendees. -->
+									<key>value</key>
+									<integer>5</integer>
+								</dict>
+							</dict>
+
+							<!-- Define how start times (DTSTART) for the randomly generated events 
+								will be selected. This is an example of a "Distribution" parameter. The value 
+								for most "Distribution" parameters are interchangeable and extensible. -->
+							<key>eventStartDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized. It produces timestamps 
+									in the near future, limited to certain days of the week and certain hours 
+									of the day. -->
+								<key>type</key>
+								<string>contrib.performance.stats.WorkDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- These are the days of the week the distribution will use. -->
+									<key>daysOfWeek</key>
+									<array>
+										<string>mon</string>
+										<string>tue</string>
+										<string>wed</string>
+										<string>thu</string>
+										<string>fri</string>
+									</array>
+
+									<!-- The earliest hour of a day at which an event might be scheduled. -->
+									<key>beginHour</key>
+									<integer>8</integer>
+
+									<!-- And the latest hour of a day (at which an event will be scheduled 
+										to begin!). -->
+									<key>endHour</key>
+									<integer>16</integer>
+
+									<!-- The timezone in which the event is scheduled. (XXX Does this 
+										really work right?) -->
+									<key>tzname</key>
+									<string>America/Los_Angeles</string>
+								</dict>
+							</dict>
+
+							<!-- Define how recurrences are created. -->
+							<key>recurrenceDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized.  We have a fixed set of
+								     RRULEs defined for this distribution and pick each based on a
+								     weight. -->
+								<key>type</key>
+								<string>contrib.performance.stats.RecurrenceDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- False to disable RRULEs -->
+									<key>allowRecurrence</key>
+									<true/>
+
+									<!-- These are the weights for the specific set of RRULEs. -->
+									<key>weights</key>
+									<dict>
+										<!-- Daily and weekly are pretty common -->
+										<key>daily</key>
+										<integer>100</integer>
+									</dict>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+					<!-- This profile accepts invitations to events, handles cancels, and
+					     handles replies received. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.Accepter</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<false/>
+
+							<!-- Define how long to wait after seeing a new invitation before
+								accepting it.
+
+								For LogNormal "mode" is the peak, "median" is the 50% cummulative value
+								(i.e., half of the user have accepted by that time).								
+							-->
+							<key>acceptDelayDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.LogNormalDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- mode - peak-->
+									<key>mode</key>
+									<integer>300</integer>
+									<!-- median - 50% done-->
+									<key>median</key>
+									<integer>1800</integer>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+					<!-- A task-creating profile, which will periodically create 
+						new tasks at a random time on a random calendar. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.Tasker</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<false/>
+
+							<!-- Define the interval (in seconds) at which this profile will use 
+								its client to create a new task. -->
+							<key>interval</key>
+							<integer>300</integer>
+
+							<!-- Define how due times (DUE) for the randomly generated tasks 
+								will be selected. This is an example of a "Distribution" parameter. The value 
+								for most "Distribution" parameters are interchangeable and extensible. -->
+							<key>taskDueDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized. It produces timestamps 
+									in the near future, limited to certain days of the week and certain hours 
+									of the day. -->
+								<key>type</key>
+								<string>contrib.performance.stats.WorkDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- These are the days of the week the distribution will use. -->
+									<key>daysOfWeek</key>
+									<array>
+										<string>mon</string>
+										<string>tue</string>
+										<string>wed</string>
+										<string>thu</string>
+										<string>fri</string>
+									</array>
+
+									<!-- The earliest hour of a day at which an event might be scheduled. -->
+									<key>beginHour</key>
+									<integer>8</integer>
+
+									<!-- And the latest hour of a day (at which an event will be scheduled 
+										to begin!). -->
+									<key>endHour</key>
+									<integer>16</integer>
+
+									<!-- The timezone in which the event is scheduled. (XXX Does this 
+										really work right?) -->
+									<key>tzname</key>
+									<string>America/Los_Angeles</string>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+				</array>
+
+				<!-- Determine the frequency at which this client configuration will 
+					appear in the clients which are created by the load tester. -->
+				<key>weight</key>
+				<integer>1</integer>
+			</dict>
+		</array>
+
+		<!-- Determine the interval between client creation. -->
+		<key>arrivalInterval</key>
+		<integer>4</integer>
+	</dict>
+</plist>

Deleted: CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-only.plist
===================================================================
--- CalendarServer/trunk/contrib/performance/loadtest/standard-configs/invites-only.plist	2013-11-07 17:02:26 UTC (rev 11901)
+++ CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-only.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -1,430 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-    Copyright (c) 2011-2012 Apple Inc. All rights reserved.
-
-    Licensed under the Apache License, Version 2.0 (the "License");
-    you may not use this file except in compliance with the License.
-    You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-  -->
-
-<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-	<dict>
-		<!-- Define the kinds of software and user behavior the load simulation
-			will simulate. -->
-		<key>clients</key>
-
-		<!-- Have as many different kinds of software and user behavior configurations
-			as you want. Each is a dict -->
-		<array>
-
-			<dict>
-
-				<!-- Here is a Lion iCal simulator. -->
-				<key>software</key>
-				<string>contrib.performance.loadtest.ical.OS_X_10_7</string>
-
-				<!-- Arguments to use to initialize the client instance. -->
-				<key>params</key>
-				<dict>
-					<!-- Name that appears in logs. -->
-					<key>title</key>
-					<string>10.7</string>
-
-					<!-- Client can poll the calendar home at some interval. This is 
-						in seconds. -->
-					<key>calendarHomePollInterval</key>
-					<integer>300000</integer>
-
-					<!-- If the server advertises xmpp push, OS X 10.6 can wait for notifications 
-						about calendar home changes instead of polling for them periodically. If 
-						this option is true, then look for the server advertisement for xmpp push 
-						and use it if possible. Still fall back to polling if there is no xmpp push 
-						advertised. -->
-					<key>supportPush</key>
-					<false />
-					<key>supportAmpPush</key>
-					<false />
-				</dict>
-
-				<!-- The profiles define certain types of user behavior on top of the 
-					client software being simulated. -->
-				<key>profiles</key>
-				<array>
-
-					<!-- First an event-creating profile, which will periodically create 
-						new events at a random time on a random calendar. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Eventer</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<false/>
-
-							<!-- Define the interval (in seconds) at which this profile will use 
-								its client to create a new event. -->
-							<key>interval</key>
-							<integer>20</integer>
-
-							<!-- Define how start times (DTSTART) for the randomly generated events 
-								will be selected. This is an example of a "Distribution" parameter. The value 
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>eventStartDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps 
-									in the near future, limited to certain days of the week and certain hours 
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled 
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this 
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-
-							<!-- Define how recurrences are created. -->
-							<key>recurrenceDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized.  We have a fixed set of
-								     RRULEs defined for this distribution and pick each based on a
-								     weight. -->
-								<key>type</key>
-								<string>contrib.performance.stats.RecurrenceDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- False to disable RRULEs -->
-									<key>allowRecurrence</key>
-									<false/>
-
-									<!-- These are the weights for the specific set of RRULEs. -->
-									<key>weights</key>
-									<dict>
-										<!-- Half of all events will be non-recurring -->
-										<key>none</key>
-										<integer>50</integer>
-										
-										<!-- Daily and weekly are pretty common -->
-										<key>daily</key>
-										<integer>10</integer>
-										<key>weekly</key>
-										<integer>20</integer>
-										
-										<!-- Monthly, yearly, daily & weekly limit not so common -->
-										<key>monthly</key>
-										<integer>2</integer>
-										<key>yearly</key>
-										<integer>1</integer>
-										<key>dailylimit</key>
-										<integer>2</integer>
-										<key>weeklylimit</key>
-										<integer>5</integer>
-										
-										<!-- Work days pretty common -->
-										<key>workdays</key>
-										<integer>10</integer>
-									</dict>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- This profile invites some number of new attendees to new events. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.RealisticInviter</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<true/>
-
-							<!-- Define the frequency at which new invitations will be sent out. -->
-							<key>sendInvitationDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.FixedDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- interval (in seconds). -->
-									<key>value</key>
-									<integer>120</integer>
-								</dict>
-							</dict>
-
-							<!-- Define the distribution of who will be invited to an event.
-							
-								When inviteeClumping is turned on each invitee is based on a sample of
-								users "close to" the organizer based on account index. If the clumping
-								is too "tight" for the requested number of attendees, then invites for
-								those larger numbers will simply fail (the sim will report that situation).
-								
-								When inviteeClumping is off invitees will be sampled across an entire
-								range of account indexes. In this case the distribution ought to be a
-								UniformIntegerDistribution with min=0 and max set to the number of accounts.
-							-->
-							<key>inviteeDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.UniformIntegerDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- The minimum value (inclusive) of the uniform distribution. -->
-									<key>min</key>
-									<integer>0</integer>
-									<!-- The maximum value (exclusive) of the uniform distribution. -->
-									<key>max</key>
-									<integer>99</integer>
-								</dict>
-							</dict>
-
-							<key>inviteeClumping</key>
-							<true/>
-
-							<!-- Define the distribution of how many attendees will be invited to an event.
-							
-								LogNormal is the best fit to observed data.
-
-
-								For LogNormal "mode" is the peak, "mean" is the mean value.	For invites,
-								mode should typically be 1, and mean whatever matches the user behavior.
-								Our typical mean is 6. 							
-							     -->
-							<key>inviteeCountDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.FixedDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- Number of attendees. -->
-									<key>value</key>
-									<integer>5</integer>
-								</dict>
-							</dict>
-
-							<!-- Define how start times (DTSTART) for the randomly generated events 
-								will be selected. This is an example of a "Distribution" parameter. The value 
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>eventStartDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps 
-									in the near future, limited to certain days of the week and certain hours 
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled 
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this 
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-
-							<!-- Define how recurrences are created. -->
-							<key>recurrenceDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized.  We have a fixed set of
-								     RRULEs defined for this distribution and pick each based on a
-								     weight. -->
-								<key>type</key>
-								<string>contrib.performance.stats.RecurrenceDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- False to disable RRULEs -->
-									<key>allowRecurrence</key>
-									<false/>
-
-									<!-- These are the weights for the specific set of RRULEs. -->
-									<key>weights</key>
-									<dict>
-										<!-- Half of all events will be non-recurring -->
-										<key>none</key>
-										<integer>50</integer>
-										
-										<!-- Daily and weekly are pretty common -->
-										<key>daily</key>
-										<integer>10</integer>
-										<key>weekly</key>
-										<integer>20</integer>
-										
-										<!-- Monthly, yearly, daily & weekly limit not so common -->
-										<key>monthly</key>
-										<integer>2</integer>
-										<key>yearly</key>
-										<integer>1</integer>
-										<key>dailylimit</key>
-										<integer>2</integer>
-										<key>weeklylimit</key>
-										<integer>5</integer>
-										
-										<!-- Work days pretty common -->
-										<key>workdays</key>
-										<integer>10</integer>
-									</dict>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- This profile accepts invitations to events, handles cancels, and
-					     handles replies received. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Accepter</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<false/>
-
-							<!-- Define how long to wait after seeing a new invitation before
-								accepting it.
-
-								For LogNormal "mode" is the peak, "median" is the 50% cummulative value
-								(i.e., half of the user have accepted by that time).								
-							-->
-							<key>acceptDelayDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.LogNormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mode - peak-->
-									<key>mode</key>
-									<integer>300</integer>
-									<!-- median - 50% done-->
-									<key>median</key>
-									<integer>1800</integer>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-					<!-- A task-creating profile, which will periodically create 
-						new tasks at a random time on a random calendar. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Tasker</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<false/>
-
-							<!-- Define the interval (in seconds) at which this profile will use 
-								its client to create a new task. -->
-							<key>interval</key>
-							<integer>300</integer>
-
-							<!-- Define how due times (DUE) for the randomly generated tasks 
-								will be selected. This is an example of a "Distribution" parameter. The value 
-								for most "Distribution" parameters are interchangeable and extensible. -->
-							<key>taskDueDistribution</key>
-							<dict>
-
-								<!-- This distribution is pretty specialized. It produces timestamps 
-									in the near future, limited to certain days of the week and certain hours 
-									of the day. -->
-								<key>type</key>
-								<string>contrib.performance.stats.WorkDistribution</string>
-
-								<key>params</key>
-								<dict>
-									<!-- These are the days of the week the distribution will use. -->
-									<key>daysOfWeek</key>
-									<array>
-										<string>mon</string>
-										<string>tue</string>
-										<string>wed</string>
-										<string>thu</string>
-										<string>fri</string>
-									</array>
-
-									<!-- The earliest hour of a day at which an event might be scheduled. -->
-									<key>beginHour</key>
-									<integer>8</integer>
-
-									<!-- And the latest hour of a day (at which an event will be scheduled 
-										to begin!). -->
-									<key>endHour</key>
-									<integer>16</integer>
-
-									<!-- The timezone in which the event is scheduled. (XXX Does this 
-										really work right?) -->
-									<key>tzname</key>
-									<string>America/Los_Angeles</string>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
-				</array>
-
-				<!-- Determine the frequency at which this client configuration will 
-					appear in the clients which are created by the load tester. -->
-				<key>weight</key>
-				<integer>1</integer>
-			</dict>
-		</array>
-	</dict>
-</plist>

Copied: CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-only.plist (from rev 11901, CalendarServer/trunk/contrib/performance/loadtest/standard-configs/invites-only.plist)
===================================================================
--- CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-only.plist	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/standard-configs/invites-only.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -0,0 +1,430 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+    Copyright (c) 2011-2012 Apple Inc. All rights reserved.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+  -->
+
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+	<dict>
+		<!-- Define the kinds of software and user behavior the load simulation
+			will simulate. -->
+		<key>clients</key>
+
+		<!-- Have as many different kinds of software and user behavior configurations
+			as you want. Each is a dict -->
+		<array>
+
+			<dict>
+
+				<!-- Here is a Lion iCal simulator. -->
+				<key>software</key>
+				<string>contrib.performance.loadtest.ical.OS_X_10_7</string>
+
+				<!-- Arguments to use to initialize the client instance. -->
+				<key>params</key>
+				<dict>
+					<!-- Name that appears in logs. -->
+					<key>title</key>
+					<string>10.7</string>
+
+					<!-- Client can poll the calendar home at some interval. This is 
+						in seconds. -->
+					<key>calendarHomePollInterval</key>
+					<integer>300000</integer>
+
+					<!-- If the server advertises xmpp push, OS X 10.6 can wait for notifications 
+						about calendar home changes instead of polling for them periodically. If 
+						this option is true, then look for the server advertisement for xmpp push 
+						and use it if possible. Still fall back to polling if there is no xmpp push 
+						advertised. -->
+					<key>supportPush</key>
+					<false />
+					<key>supportAmpPush</key>
+					<false />
+				</dict>
+
+				<!-- The profiles define certain types of user behavior on top of the 
+					client software being simulated. -->
+				<key>profiles</key>
+				<array>
+
+					<!-- First an event-creating profile, which will periodically create 
+						new events at a random time on a random calendar. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.Eventer</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<false/>
+
+							<!-- Define the interval (in seconds) at which this profile will use 
+								its client to create a new event. -->
+							<key>interval</key>
+							<integer>20</integer>
+
+							<!-- Define how start times (DTSTART) for the randomly generated events 
+								will be selected. This is an example of a "Distribution" parameter. The value 
+								for most "Distribution" parameters are interchangeable and extensible. -->
+							<key>eventStartDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized. It produces timestamps 
+									in the near future, limited to certain days of the week and certain hours 
+									of the day. -->
+								<key>type</key>
+								<string>contrib.performance.stats.WorkDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- These are the days of the week the distribution will use. -->
+									<key>daysOfWeek</key>
+									<array>
+										<string>mon</string>
+										<string>tue</string>
+										<string>wed</string>
+										<string>thu</string>
+										<string>fri</string>
+									</array>
+
+									<!-- The earliest hour of a day at which an event might be scheduled. -->
+									<key>beginHour</key>
+									<integer>8</integer>
+
+									<!-- And the latest hour of a day (at which an event will be scheduled 
+										to begin!). -->
+									<key>endHour</key>
+									<integer>16</integer>
+
+									<!-- The timezone in which the event is scheduled. (XXX Does this 
+										really work right?) -->
+									<key>tzname</key>
+									<string>America/Los_Angeles</string>
+								</dict>
+							</dict>
+
+							<!-- Define how recurrences are created. -->
+							<key>recurrenceDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized.  We have a fixed set of
+								     RRULEs defined for this distribution and pick each based on a
+								     weight. -->
+								<key>type</key>
+								<string>contrib.performance.stats.RecurrenceDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- False to disable RRULEs -->
+									<key>allowRecurrence</key>
+									<false/>
+
+									<!-- These are the weights for the specific set of RRULEs. -->
+									<key>weights</key>
+									<dict>
+										<!-- Half of all events will be non-recurring -->
+										<key>none</key>
+										<integer>50</integer>
+										
+										<!-- Daily and weekly are pretty common -->
+										<key>daily</key>
+										<integer>10</integer>
+										<key>weekly</key>
+										<integer>20</integer>
+										
+										<!-- Monthly, yearly, daily & weekly limit not so common -->
+										<key>monthly</key>
+										<integer>2</integer>
+										<key>yearly</key>
+										<integer>1</integer>
+										<key>dailylimit</key>
+										<integer>2</integer>
+										<key>weeklylimit</key>
+										<integer>5</integer>
+										
+										<!-- Work days pretty common -->
+										<key>workdays</key>
+										<integer>10</integer>
+									</dict>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+					<!-- This profile invites some number of new attendees to new events. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.RealisticInviter</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<true/>
+
+							<!-- Define the frequency at which new invitations will be sent out. -->
+							<key>sendInvitationDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.FixedDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- interval (in seconds). -->
+									<key>value</key>
+									<integer>120</integer>
+								</dict>
+							</dict>
+
+							<!-- Define the distribution of who will be invited to an event.
+							
+								When inviteeClumping is turned on each invitee is based on a sample of
+								users "close to" the organizer based on account index. If the clumping
+								is too "tight" for the requested number of attendees, then invites for
+								those larger numbers will simply fail (the sim will report that situation).
+								
+								When inviteeClumping is off invitees will be sampled across an entire
+								range of account indexes. In this case the distribution ought to be a
+								UniformIntegerDistribution with min=0 and max set to the number of accounts.
+							-->
+							<key>inviteeDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.UniformIntegerDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- The minimum value (inclusive) of the uniform distribution. -->
+									<key>min</key>
+									<integer>0</integer>
+									<!-- The maximum value (exclusive) of the uniform distribution. -->
+									<key>max</key>
+									<integer>99</integer>
+								</dict>
+							</dict>
+
+							<key>inviteeClumping</key>
+							<true/>
+
+							<!-- Define the distribution of how many attendees will be invited to an event.
+							
+								LogNormal is the best fit to observed data.
+
+
+								For LogNormal "mode" is the peak, "mean" is the mean value.	For invites,
+								mode should typically be 1, and mean whatever matches the user behavior.
+								Our typical mean is 6. 							
+							     -->
+							<key>inviteeCountDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.FixedDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- Number of attendees. -->
+									<key>value</key>
+									<integer>5</integer>
+								</dict>
+							</dict>
+
+							<!-- Define how start times (DTSTART) for the randomly generated events 
+								will be selected. This is an example of a "Distribution" parameter. The value 
+								for most "Distribution" parameters are interchangeable and extensible. -->
+							<key>eventStartDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized. It produces timestamps 
+									in the near future, limited to certain days of the week and certain hours 
+									of the day. -->
+								<key>type</key>
+								<string>contrib.performance.stats.WorkDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- These are the days of the week the distribution will use. -->
+									<key>daysOfWeek</key>
+									<array>
+										<string>mon</string>
+										<string>tue</string>
+										<string>wed</string>
+										<string>thu</string>
+										<string>fri</string>
+									</array>
+
+									<!-- The earliest hour of a day at which an event might be scheduled. -->
+									<key>beginHour</key>
+									<integer>8</integer>
+
+									<!-- And the latest hour of a day (at which an event will be scheduled 
+										to begin!). -->
+									<key>endHour</key>
+									<integer>16</integer>
+
+									<!-- The timezone in which the event is scheduled. (XXX Does this 
+										really work right?) -->
+									<key>tzname</key>
+									<string>America/Los_Angeles</string>
+								</dict>
+							</dict>
+
+							<!-- Define how recurrences are created. -->
+							<key>recurrenceDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized.  We have a fixed set of
+								     RRULEs defined for this distribution and pick each based on a
+								     weight. -->
+								<key>type</key>
+								<string>contrib.performance.stats.RecurrenceDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- False to disable RRULEs -->
+									<key>allowRecurrence</key>
+									<false/>
+
+									<!-- These are the weights for the specific set of RRULEs. -->
+									<key>weights</key>
+									<dict>
+										<!-- Half of all events will be non-recurring -->
+										<key>none</key>
+										<integer>50</integer>
+										
+										<!-- Daily and weekly are pretty common -->
+										<key>daily</key>
+										<integer>10</integer>
+										<key>weekly</key>
+										<integer>20</integer>
+										
+										<!-- Monthly, yearly, daily & weekly limit not so common -->
+										<key>monthly</key>
+										<integer>2</integer>
+										<key>yearly</key>
+										<integer>1</integer>
+										<key>dailylimit</key>
+										<integer>2</integer>
+										<key>weeklylimit</key>
+										<integer>5</integer>
+										
+										<!-- Work days pretty common -->
+										<key>workdays</key>
+										<integer>10</integer>
+									</dict>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+					<!-- This profile accepts invitations to events, handles cancels, and
+					     handles replies received. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.Accepter</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<false/>
+
+							<!-- Define how long to wait after seeing a new invitation before
+								accepting it.
+
+								For LogNormal "mode" is the peak, "median" is the 50% cummulative value
+								(i.e., half of the user have accepted by that time).								
+							-->
+							<key>acceptDelayDistribution</key>
+							<dict>
+								<key>type</key>
+								<string>contrib.performance.stats.LogNormalDistribution</string>
+								<key>params</key>
+								<dict>
+									<!-- mode - peak-->
+									<key>mode</key>
+									<integer>300</integer>
+									<!-- median - 50% done-->
+									<key>median</key>
+									<integer>1800</integer>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+					<!-- A task-creating profile, which will periodically create 
+						new tasks at a random time on a random calendar. -->
+					<dict>
+						<key>class</key>
+						<string>contrib.performance.loadtest.profiles.Tasker</string>
+
+						<key>params</key>
+						<dict>
+							<key>enabled</key>
+							<false/>
+
+							<!-- Define the interval (in seconds) at which this profile will use 
+								its client to create a new task. -->
+							<key>interval</key>
+							<integer>300</integer>
+
+							<!-- Define how due times (DUE) for the randomly generated tasks 
+								will be selected. This is an example of a "Distribution" parameter. The value 
+								for most "Distribution" parameters are interchangeable and extensible. -->
+							<key>taskDueDistribution</key>
+							<dict>
+
+								<!-- This distribution is pretty specialized. It produces timestamps 
+									in the near future, limited to certain days of the week and certain hours 
+									of the day. -->
+								<key>type</key>
+								<string>contrib.performance.stats.WorkDistribution</string>
+
+								<key>params</key>
+								<dict>
+									<!-- These are the days of the week the distribution will use. -->
+									<key>daysOfWeek</key>
+									<array>
+										<string>mon</string>
+										<string>tue</string>
+										<string>wed</string>
+										<string>thu</string>
+										<string>fri</string>
+									</array>
+
+									<!-- The earliest hour of a day at which an event might be scheduled. -->
+									<key>beginHour</key>
+									<integer>8</integer>
+
+									<!-- And the latest hour of a day (at which an event will be scheduled 
+										to begin!). -->
+									<key>endHour</key>
+									<integer>16</integer>
+
+									<!-- The timezone in which the event is scheduled. (XXX Does this 
+										really work right?) -->
+									<key>tzname</key>
+									<string>America/Los_Angeles</string>
+								</dict>
+							</dict>
+						</dict>
+					</dict>
+
+				</array>
+
+				<!-- Determine the frequency at which this client configuration will 
+					appear in the clients which are created by the load tester. -->
+				<key>weight</key>
+				<integer>1</integer>
+			</dict>
+		</array>
+	</dict>
+</plist>

Modified: CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/test_sim.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/test_sim.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/contrib/performance/loadtest/test_sim.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -253,7 +253,7 @@
         exc = self.assertRaises(
             SystemExit, StubSimulator.main, ['--config', config.path])
         self.assertEquals(
-            exc.args, (StubSimulator(None, None, None, None, None, None).run(),))
+            exc.args, (StubSimulator(None, None, None, None, None, None, None).run(),))
 
 
     def test_createSimulator(self):
@@ -264,7 +264,7 @@
         """
         server = 'http://127.0.0.7:1243/'
         reactor = object()
-        sim = LoadSimulator(server, None, None, None, None, None, reactor=reactor)
+        sim = LoadSimulator(server, None, None, None, None, None, None, reactor=reactor)
         calsim = sim.createSimulator()
         self.assertIsInstance(calsim, CalendarClientSimulator)
         self.assertIsInstance(calsim.reactor, LagTrackingReactor)
@@ -447,7 +447,7 @@
 
         reactor = object()
         sim = LoadSimulator(
-            None, None, None, None, Arrival(FakeArrival, {'x': 3, 'y': 2}), None, reactor=reactor)
+            None, None, None, None, None, Arrival(FakeArrival, {'x': 3, 'y': 2}), None, reactor=reactor)
         arrival = sim.createArrivalPolicy()
         self.assertIsInstance(arrival, FakeArrival)
         self.assertIdentical(arrival.reactor, sim.reactor)
@@ -478,7 +478,9 @@
                             "weight": 3,
                             }]}))
 
-        sim = LoadSimulator.fromCommandLine(['--config', config.path])
+        sim = LoadSimulator.fromCommandLine(
+            ['--config', config.path, '--clients', config.path]
+        )
         expectedParameters = PopulationParameters()
         expectedParameters.addClient(
             3, ClientType(OS_X_10_6, {"foo": "bar"}, [ProfileType(Eventer, {
@@ -495,7 +497,9 @@
         """
         config = FilePath(self.mktemp())
         config.setContent(writePlistToString({"clients": []}))
-        sim = LoadSimulator.fromCommandLine(['--config', config.path])
+        sim = LoadSimulator.fromCommandLine(
+            ['--config', config.path, '--clients', config.path]
+        )
         expectedParameters = PopulationParameters()
         expectedParameters.addClient(
             1, ClientType(OS_X_10_6, {}, [Eventer, Inviter, Accepter]))
@@ -528,6 +532,7 @@
             "/principals/users/%s/",
             None,
             None,
+            None,
             Arrival(lambda reactor: NullArrival(), {}),
             None, observers, reactor=Reactor())
         io = StringIO()

Modified: CalendarServer/branches/users/cdaboo/json/contrib/performance/sqlusage/requests/httpTests.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/contrib/performance/sqlusage/requests/httpTests.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/contrib/performance/sqlusage/requests/httpTests.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -89,12 +89,21 @@
             pos = line.find(": ")
             return float(line[pos + 2:])
 
+        # Need to skip over stats that are unlabeled
         data = open(self.logFilePath).read()
         lines = data.splitlines()
-        count = extractInt(lines[4])
-        rows = extractInt(lines[5])
-        timing = extractFloat(lines[6])
-        self.result = HTTPTestBase.SQLResults(count, rows, timing)
+        offset = 0
+        while True:
+            if lines[offset] == "*** SQL Stats ***":
+                if lines[offset + 2].split()[1] != "unlabeled":
+                    count = extractInt(lines[offset + 4])
+                    rows = extractInt(lines[offset + 5])
+                    timing = extractFloat(lines[offset + 6])
+                    self.result = HTTPTestBase.SQLResults(count, rows, timing)
+                    break
+            offset += 1
+        else:
+            self.result = HTTPTestBase.SQLResults(-1, -1, 0.0)
 
         with open("%s-%d-%s" % (self.logFilePath, event_count, self.label), "w") as f:
             f.write(data)

Modified: CalendarServer/branches/users/cdaboo/json/contrib/performance/sqlusage/sqlusage.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/contrib/performance/sqlusage/sqlusage.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/contrib/performance/sqlusage/sqlusage.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -127,11 +127,17 @@
         ]
         self.requestLabels = [request.label for request in requests]
 
-        # Warm-up server by doing calendar home and calendar propfinds
-        props = (davxml.resourcetype,)
-        for session in sessions:
-            session.getPropertiesOnHierarchy(URL(path=session.homeHref), props)
-            session.getPropertiesOnHierarchy(URL(path=session.calendarHref), props)
+        def _warmUp():
+            # Warm-up server by doing calendar home and child collection propfinds.
+            # Do this twice because the very first time might provision DB objects and
+            # blow any DB cache - the second time will warm the DB cache.
+            props = (davxml.resourcetype,)
+            for _ignore in range(2):
+                for session in sessions:
+                    session.getPropertiesOnHierarchy(URL(path=session.homeHref), props)
+                    session.getPropertiesOnHierarchy(URL(path=session.calendarHref), props)
+                    session.getPropertiesOnHierarchy(URL(path=session.inboxHref), props)
+                    session.getPropertiesOnHierarchy(URL(path=session.notificationHref), props)
 
         # Now loop over sets of events
         for count in event_counts:
@@ -140,6 +146,7 @@
             result = {}
             for request in requests:
                 print("  Test = %s" % (request.label,))
+                _warmUp()
                 result[request.label] = request.execute(count)
             self.results[count] = result
 

Modified: CalendarServer/branches/users/cdaboo/json/contrib/tools/protocolanalysis.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/contrib/tools/protocolanalysis.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/contrib/tools/protocolanalysis.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -293,6 +293,12 @@
         self.userCounts = collections.defaultdict(int)
         self.userResponseTimes = collections.defaultdict(float)
 
+        self.newEvents = 0
+        self.newInvites = 0
+        self.updateEvents = 0
+        self.updateInvites = 0
+        self.attendeeInvites = 0
+
         self.otherUserCalendarRequests = {}
 
         self.currentLine = None
@@ -416,6 +422,19 @@
                 self.hourlyByStatus[" TOTAL"][timeBucketIndex] += 1
                 self.hourlyByStatus[self.currentLine.status][timeBucketIndex] += 1
 
+                if self.currentLine.status == 201:
+                    if adjustedMethod == METHOD_PUT_ICS:
+                        self.newEvents += 1
+                    elif adjustedMethod == METHOD_PUT_ORGANIZER:
+                        self.newInvites += 1
+                elif isOK:
+                    if adjustedMethod == METHOD_PUT_ICS:
+                        self.updateEvents += 1
+                    elif adjustedMethod == METHOD_PUT_ORGANIZER:
+                        self.updateInvites += 1
+                    elif adjustedMethod == METHOD_PUT_ATTENDEE:
+                        self.attendeeInvites += 1
+
                 # Cache analysis
                 if adjustedMethod == METHOD_PROPFIND_CALENDAR and self.currentLine.status == 207:
                     responses = int(self.currentLine.extended.get("responses", 0))
@@ -1029,7 +1048,10 @@
             #print("User Response times")
             #self.printUserResponseTimes(doTabs)
 
+            print("Sim values")
+            self.printSimStats(doTabs)
 
+
     def printInfo(self, doTabs):
 
         table = tables.Table()
@@ -1083,6 +1105,7 @@
         totalRequests = 0
         totalDepth = 0
         totalTime = 0.0
+        self.timeCounts = 0
         for ctr in xrange(self.timeBucketCount):
             hour = self.getHourFromIndex(ctr)
             if hour is None:
@@ -1101,12 +1124,13 @@
             totalRequests += countRequests
             totalDepth += countDepth
             totalTime += countTime
+            self.timeCounts += 1
 
         table.addFooter(
             (
                 "Total:",
                 totalRequests,
-                (1.0 * totalRequests) / self.timeBucketCount / self.resolutionMinutes / 60,
+                safePercent(totalRequests, self.timeCounts * self.resolutionMinutes * 60, 1.0),
                 safePercent(totalTime, totalRequests, 1.0),
                 safePercent(float(totalDepth), totalRequests, 1),
             ),
@@ -1545,7 +1569,38 @@
         print("")
 
 
+    def printSimStats(self, doTabs):
+        users = len(self.userCounts.keys())
+        hours = self.timeCounts / self.resolutionMinutes / 60
+        table = tables.Table()
+        table.setDefaultColumnFormats((
+                tables.Table.ColumnFormat("%s", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+                tables.Table.ColumnFormat("%s", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+                tables.Table.ColumnFormat("%s", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+                tables.Table.ColumnFormat("%s", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
+                ))
+        table.addHeader(("Item", "Value", "Items, per User, per Day", "Interval (sec), per item, per user"))
+        table.addRow(("Unique Users", users, "", ""))
 
+        def _addRow(title, item):
+            table.addRow((title, item, "%.1f" % (safePercent(24 * item, hours * users, 1.0),), "%.1f" % (safePercent(hours * 60 * 60 * users, item, 1.0),),))
+
+        _addRow("New Events", self.newEvents)
+        _addRow("New Invites", self.newInvites)
+        _addRow("Updated Events", self.updateEvents)
+        _addRow("Updated Invites", self.updateInvites)
+        _addRow("Attendee Invites", self.attendeeInvites)
+        table.addRow((
+            "Recipients",
+            "%.1f" % (safePercent(sum(self.averagedHourlyByRecipientCount["iTIP Average"]), self.timeCounts, 1.0),),
+            "",
+            "",
+        ))
+        table.printTabDelimitedData() if doTabs else table.printTable()
+        print("")
+
+
+
 class TablePrinter(object):
 
     @classmethod

Modified: CalendarServer/branches/users/cdaboo/json/contrib/tools/request_monitor.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/contrib/tools/request_monitor.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/contrib/tools/request_monitor.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -303,8 +303,6 @@
     print("--procs N  specifies how many python processes are expected in the log file (default: 80)")
     print("--top N    how many long requests to print (default: 10)")
     print("--users N  how many top users to print (default: 5)")
-    print("--router   analyze a partition server router node")
-    print("--worker   analyze a partition server worker node")
     print("")
     print("Version: 5")
 
@@ -313,19 +311,13 @@
 numTop = 10
 numUsers = 5
 lineRange = None
-router = False
-worker = False
-options, args = getopt.getopt(sys.argv[1:], "h", ["debug", "router", "worker", "lines=", "range=", "procs=", "top=", "users="])
+options, args = getopt.getopt(sys.argv[1:], "h", ["debug", "lines=", "range=", "procs=", "top=", "users="])
 for option, value in options:
     if option == "-h":
         usage()
         sys.exit(0)
     elif option == "--debug":
         debug = True
-    elif option == "--router":
-        router = True
-    elif option == "--worker":
-        worker = True
     elif option == "--lines":
         numLines = int(value)
     elif option == "--range":

Modified: CalendarServer/branches/users/cdaboo/json/setup.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/setup.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/setup.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -145,7 +145,6 @@
                              "bin/calendarserver_export",
                             #"bin/calendarserver_icalendar_validate",
                             #"bin/calendarserver_load_augmentdb",
-                            #"bin/calendarserver_make_partition",
                             #"bin/calendarserver_manage_postgres",
                              "bin/calendarserver_manage_principals",
                              "bin/calendarserver_manage_push",

Modified: CalendarServer/branches/users/cdaboo/json/support/version.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/support/version.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/support/version.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -26,7 +26,7 @@
     # Compute the version number.
     #
 
-    base_version = "5.1"
+    base_version = "5.2"
 
     branches = tuple(
         branch.format(version=base_version)

Modified: CalendarServer/branches/users/cdaboo/json/twext/enterprise/dal/syntax.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/enterprise/dal/syntax.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twext/enterprise/dal/syntax.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -1687,6 +1687,48 @@
 
 
 
+class DatabaseLock(_LockingStatement):
+    """
+    An SQL exclusive session level advisory lock
+    """
+
+    def _toSQL(self, queryGenerator):
+        assert(queryGenerator.dialect == POSTGRES_DIALECT)
+        return SQLFragment('select pg_advisory_lock(1)')
+
+
+    def on(self, txn, *a, **kw):
+        """
+        Override on() to only execute on Postgres
+        """
+        if txn.dialect == POSTGRES_DIALECT:
+            return super(DatabaseLock, self).on(txn, *a, **kw)
+
+        return succeed(None)
+
+
+
+class DatabaseUnlock(_LockingStatement):
+    """
+    An SQL exclusive session level advisory lock
+    """
+
+    def _toSQL(self, queryGenerator):
+        assert(queryGenerator.dialect == POSTGRES_DIALECT)
+        return SQLFragment('select pg_advisory_unlock(1)')
+
+
+    def on(self, txn, *a, **kw):
+        """
+        Override on() to only execute on Postgres
+        """
+        if txn.dialect == POSTGRES_DIALECT:
+            return super(DatabaseUnlock, self).on(txn, *a, **kw)
+
+        return succeed(None)
+
+
+
 class Savepoint(_LockingStatement):
     """
     An SQL 'savepoint' statement.

Modified: CalendarServer/branches/users/cdaboo/json/twext/enterprise/dal/test/test_sqlsyntax.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/enterprise/dal/test/test_sqlsyntax.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twext/enterprise/dal/test/test_sqlsyntax.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -25,7 +25,8 @@
     TableMismatch, Parameter, Max, Len, NotEnoughValues,
     Savepoint, RollbackToSavepoint, ReleaseSavepoint, SavepointAction,
     Union, Intersect, Except, SetExpression, DALError,
-    ResultAliasSyntax, Count, QueryGenerator, ALL_COLUMNS)
+    ResultAliasSyntax, Count, QueryGenerator, ALL_COLUMNS,
+    DatabaseLock, DatabaseUnlock)
 from twext.enterprise.dal.syntax import FixedPlaceholder, NumericPlaceholder
 from twext.enterprise.dal.syntax import Function
 from twext.enterprise.dal.syntax import SchemaSyntax
@@ -1314,6 +1315,22 @@
                           SQLFragment("lock table FOO in exclusive mode"))
 
 
+    def test_databaseLock(self):
+        """
+        L{DatabaseLock} generates a ('pg_advisory_lock') statement
+        """
+        self.assertEquals(DatabaseLock().toSQL(),
+                          SQLFragment("select pg_advisory_lock(1)"))
+
+
+    def test_databaseUnlock(self):
+        """
+        L{DatabaseUnlock} generates a ('pg_advisory_unlock') statement
+        """
+        self.assertEquals(DatabaseUnlock().toSQL(),
+                          SQLFragment("select pg_advisory_unlock(1)"))
+
+
     def test_savepoint(self):
         """
         L{Savepoint} generates a ('savepoint') statement.

Modified: CalendarServer/branches/users/cdaboo/json/twext/enterprise/fixtures.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/enterprise/fixtures.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twext/enterprise/fixtures.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -76,8 +76,8 @@
 
 def resultOf(deferred, propagate=False):
     """
-    Add a callback and errback which will capture the result of a L{Deferred} in
-    a list, and return that list.  If 'propagate' is True, pass through the
+    Add a callback and errback which will capture the result of a L{Deferred}
+    in a list, and return that list.  If 'propagate' is True, pass through the
     results.
     """
     results = []
@@ -194,7 +194,6 @@
         No implementation.
         """
 
-
     def callFromThread(self, thunk, *a, **kw):
         """
         No implementation.
@@ -223,14 +222,16 @@
             self.factory = ConnectionFactory()
             connect = self.factory.connect
         self.connect = connect
-        self.paused             = False
-        self.holders            = []
-        self.pool               = ConnectionPool(connect,
-                                                 maxConnections=2,
-                                                 dialect=self.dialect,
-                                                 paramstyle=self.paramstyle)
+        self.paused = False
+        self.holders = []
+        self.pool = ConnectionPool(
+            connect,
+            maxConnections=2,
+            dialect=self.dialect,
+            paramstyle=self.paramstyle
+        )
         self.pool._createHolder = self.makeAHolder
-        self.clock              = self.pool.reactor = ClockWithThreads()
+        self.clock = self.pool.reactor = ClockWithThreads()
         self.pool.startService()
         test.addCleanup(self.flushHolders)
 
@@ -239,7 +240,7 @@
         """
         Flush all pending C{submit}s since C{pauseHolders} was called.  This
         makes sure the service is stopped and the fake ThreadHolders are all
-        executing their queues so failed tsets can exit cleanly.
+        executing their queues so failed tests can exit cleanly.
         """
         self.paused = False
         for holder in self.holders:
@@ -551,6 +552,21 @@
         self._connectResultQueue.append(thunk)
 
 
+    def willConnectTo(self):
+        """
+        Queue a successful result for connect() and immediately add it as a
+        child to this L{ConnectionFactory}.
+
+        @return: a connection object
+        @rtype: L{FakeConnection}
+        """
+        aConnection = FakeConnection(self)
+        def thunk():
+            return aConnection
+        self._connectResultQueue.append(thunk)
+        return aConnection
+
+
     def willFail(self):
         """
         Used by tests to queue a successful result for connect().

Modified: CalendarServer/branches/users/cdaboo/json/twext/enterprise/queue.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/enterprise/queue.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twext/enterprise/queue.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -115,7 +115,7 @@
     (in the worst case) pass from worker->controller->controller->worker.
     """
 
-    def performWork(table, workID):
+    def performWork(table, workID): #@NoSelf
         """
         @param table: The table where work is waiting.
         @type table: L{TableSyntax}
@@ -149,7 +149,8 @@
     NodeTable.addColumn("PORT", SQLType("integer", None))
     NodeTable.addColumn("TIME", SQLType("timestamp", None)).setDefaultValue(
         # Note: in the real data structure, this is actually a not-cleaned-up
-        # sqlparse internal data structure, but it *should* look closer to this.
+        # sqlparse internal data structure, but it *should* look closer to
+        # this.
         ProcedureCall("timezone", ["UTC", NamedValue('CURRENT_TIMESTAMP')])
     )
     for column in NodeTable.columns:
@@ -370,7 +371,6 @@
         will be taken care of by the job queueing machinery.
         """
 
-
     @classmethod
     def forTable(cls, table):
         """
@@ -677,8 +677,8 @@
     """
 
     def __init__(self, peerPool, boxReceiver=None, locator=None):
-        super(ConnectionFromWorker, self).__init__(peerPool.schema, boxReceiver,
-                                                   locator)
+        super(ConnectionFromWorker, self).__init__(peerPool.schema,
+                                                   boxReceiver, locator)
         self.peerPool = peerPool
         self._load = 0
 
@@ -830,9 +830,9 @@
             workItem = yield workItemClass.load(txn, workID)
             if workItem.group is not None:
                 yield NamedLock.acquire(txn, workItem.group)
-            # TODO: what if we fail?  error-handling should be recorded someplace,
-            # the row should probably be marked, re-tries should be triggerable
-            # administratively.
+            # TODO: what if we fail?  error-handling should be recorded
+            # someplace, the row should probably be marked, re-tries should be
+            # triggerable administratively.
             yield workItem.delete()
             # TODO: verify that workID is the primary key someplace.
             yield workItem.doWork()
@@ -865,9 +865,6 @@
 
 
 
-
-
-
 class WorkerFactory(Factory, object):
     """
     Factory, to be used as the client to connect from the worker to the
@@ -950,7 +947,7 @@
         waiting for the transaction where that addition was completed to
         commit, and asking the local node controller process to do the work.
         """
-        @passthru(self.workItemType.create(self.txn, **self.kw).addCallback)
+        created = self.workItemType.create(self.txn, **self.kw)
         def whenCreated(item):
             self._whenProposed.callback(self)
             @self.txn.postCommit
@@ -967,12 +964,15 @@
                         self._whenExecuted.errback(why)
                 reactor = self._chooser.reactor
                 when = max(0, astimestamp(item.notBefore) - reactor.seconds())
-                # TODO: Track the returned DelayedCall so it can be stopped when
-                # the service stops.
+                # TODO: Track the returned DelayedCall so it can be stopped
+                # when the service stops.
                 self._chooser.reactor.callLater(when, maybeLater)
             @self.txn.postAbort
             def whenFailed():
                 self._whenCommitted.errback(TransactionFailed)
+        def whenNotCreated(failure):
+            self._whenProposed.errback(failure)
+        created.addCallbacks(whenCreated, whenNotCreated)
 
 
     def whenExecuted(self):
@@ -1023,6 +1023,8 @@
         """
         return _cloneDeferred(self._whenCommitted)
 
+
+
 class _BaseQueuer(object):
     implements(IQueuer)
 
@@ -1030,13 +1032,16 @@
         super(_BaseQueuer, self).__init__()
         self.proposalCallbacks = set()
 
+
     def callWithNewProposals(self, callback):
-        self.proposalCallbacks.add(callback);
+        self.proposalCallbacks.add(callback)
 
+
     def transferProposalCallbacks(self, newQueuer):
         newQueuer.proposalCallbacks = self.proposalCallbacks
         return newQueuer
 
+
     def enqueueWork(self, txn, workItemType, **kw):
         """
         There is some work to do.  Do it, someplace else, ideally in parallel.
@@ -1061,6 +1066,7 @@
         return wp
 
 
+
 class PeerConnectionPool(_BaseQueuer, MultiService, object):
     """
     Each node has a L{PeerConnectionPool} connecting it to all the other nodes
@@ -1140,7 +1146,7 @@
         self.mappedPeers = {}
         self.schema = schema
         self._startingUp = None
-        self._listeningPortObject = None
+        self._listeningPort = None
         self._lastSeenTotalNodes = 1
         self._lastSeenNodeIndex = 1
 
@@ -1197,7 +1203,8 @@
         A peer has requested us to perform some work; choose a work performer
         local to this node, and then execute it.
         """
-        return self.choosePerformer(onlyLocally=True).performWork(table, workID)
+        performer = self.choosePerformer(onlyLocally=True)
+        return performer.performWork(table, workID)
 
 
     def allWorkItemTypes(self):
@@ -1225,8 +1232,8 @@
 
         @return: the maximum number of other L{PeerConnectionPool} instances
             that may be connected to the database described by
-            C{self.transactionFactory}.  Note that this is not the current count
-            by connectivity, but the count according to the database.
+            C{self.transactionFactory}.  Note that this is not the current
+            count by connectivity, but the count according to the database.
         @rtype: L{int}
         """
         # TODO
@@ -1277,7 +1284,6 @@
                                            overdueItem.workID)
         return inTransaction(self.transactionFactory, workCheck)
 
-
     _currentWorkDeferred = None
     _lostWorkCheckCall = None
 
@@ -1315,10 +1321,10 @@
         @inlineCallbacks
         def startup(txn):
             endpoint = TCP4ServerEndpoint(self.reactor, self.ampPort)
-            # If this fails, the failure mode is going to be ugly, just like all
-            # conflicted-port failures.  But, at least it won't proceed.
-            self._listeningPortObject = yield endpoint.listen(self.peerFactory())
-            self.ampPort = self._listeningPortObject.getHost().port
+            # If this fails, the failure mode is going to be ugly, just like
+            # all conflicted-port failures.  But, at least it won't proceed.
+            self._listeningPort = yield endpoint.listen(self.peerFactory())
+            self.ampPort = self._listeningPort.getHost().port
             yield Lock.exclusive(NodeInfo.table).on(txn)
             nodes = yield self.activeNodes(txn)
             selves = [node for node in nodes
@@ -1354,8 +1360,8 @@
         yield super(PeerConnectionPool, self).stopService()
         if self._startingUp is not None:
             yield self._startingUp
-        if self._listeningPortObject is not None:
-            yield self._listeningPortObject.stopListening()
+        if self._listeningPort is not None:
+            yield self._listeningPort.stopListening()
         if self._lostWorkCheckCall is not None:
             self._lostWorkCheckCall.cancel()
         if self._currentWorkDeferred is not None:
@@ -1430,8 +1436,6 @@
 
 
 
-
-
 class LocalQueuer(_BaseQueuer):
     """
     When work is enqueued with this queuer, it is just executed locally.
@@ -1458,7 +1462,8 @@
     """
     Implementor of C{performWork} that doesn't actual perform any work.  This
     is used in the case where you want to be able to enqueue work for someone
-    else to do, but not take on any work yourself (such as a command line tool).
+    else to do, but not take on any work yourself (such as a command line
+    tool).
     """
     implements(_IWorkPerformer)
 
@@ -1469,6 +1474,7 @@
         return succeed(None)
 
 
+
 class NonPerformingQueuer(_BaseQueuer):
     """
     When work is enqueued with this queuer, it is never executed locally.
@@ -1487,4 +1493,4 @@
         """
         Choose to perform the work locally.
         """
-        return NonPerformer()
\ No newline at end of file
+        return NonPerformer()

Modified: CalendarServer/branches/users/cdaboo/json/twext/enterprise/test/test_queue.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/enterprise/test/test_queue.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twext/enterprise/test/test_queue.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -36,6 +36,7 @@
 )
 
 from twisted.trial.unittest import TestCase
+from twisted.python.failure import Failure
 from twisted.internet.defer import (
     Deferred, inlineCallbacks, gatherResults, passthru#, returnValue
 )
@@ -55,6 +56,8 @@
 from twisted.test.proto_helpers import StringTransport, MemoryReactor
 from twext.enterprise.fixtures import SteppablePoolHelper
 from twisted.internet.defer import returnValue
+from twext.enterprise.queue import LocalQueuer
+from twext.enterprise.fixtures import ConnectionPoolHelper
 
 from twext.enterprise.queue import _BaseQueuer, NonPerformingQueuer
 import twext.enterprise.queue
@@ -67,7 +70,7 @@
 
     def callLater(self, _seconds, _f, *args, **kw):
         if _seconds < 0:
-            raise ValueError("%s<0: "%(_seconds,))
+            raise ValueError("%s<0: " % (_seconds,))
         return super(Clock, self).callLater(_seconds, _f, *args, **kw)
 
 
@@ -267,6 +270,56 @@
 
 
 
+class WorkProposalTests(TestCase):
+    """
+    Tests for L{WorkProposal}.
+    """
+
+    def test_whenProposedSuccess(self):
+        """
+        The L{Deferred} returned by L{WorkProposal.whenProposed} fires when the
+        SQL sent to the database has completed.
+        """
+        cph = ConnectionPoolHelper()
+        cph.setUp(test=self)
+        cph.pauseHolders()
+        lq = LocalQueuer(cph.createTransaction)
+        enqTxn = cph.createTransaction()
+        wp = lq.enqueueWork(enqTxn, DummyWorkItem, a=3, b=4)
+        d = wp.whenProposed()
+        r = cph.resultOf(d)
+        self.assertEquals(r, [])
+        cph.flushHolders()
+        self.assertEquals(len(r), 1)
+
+
+    def test_whenProposedFailure(self):
+        """
+        The L{Deferred} returned by L{WorkProposal.whenProposed} fails with an
+        errback when the SQL executed to create the WorkItem row fails.
+        """
+        cph = ConnectionPoolHelper()
+        cph.setUp(self)
+        cph.pauseHolders()
+        firstConnection = cph.factory.willConnectTo()
+        enqTxn = cph.createTransaction()
+        # Execute some SQL on the connection before enqueueing the work-item so
+        # that we don't get the initial-statement.
+        enqTxn.execSQL("some sql")
+        lq = LocalQueuer(cph.createTransaction)
+        cph.flushHolders()
+        cph.pauseHolders()
+        wp = lq.enqueueWork(enqTxn, DummyWorkItem, a=3, b=4)
+        firstConnection.executeWillFail(lambda: RuntimeError("foo"))
+        d = wp.whenProposed()
+        r = cph.resultOf(d)
+        self.assertEquals(r, [])
+        cph.flushHolders()
+        self.assertEquals(len(r), 1)
+        self.assertIsInstance(r[0], Failure)
+
+
+
 class PeerConnectionPoolUnitTests(TestCase):
     """
     L{PeerConnectionPool} has many internal components.
@@ -393,7 +446,8 @@
             # Next, create one that's actually far enough into the past to run.
             yield DummyWorkItem.create(
                 txn, a=3, b=4, notBefore=(
-                    # Schedule it in the past so that it should have already run.
+                    # Schedule it in the past so that it should have already
+                    # run.
                     fakeNow - datetime.timedelta(
                         seconds=qpool.queueProcessTimeout + 20
                     )
@@ -509,8 +563,8 @@
             t = StringTransport()
             p.makeConnection(t)
             return p, t
-        worker1, trans1 = peer()
-        worker2, trans2 = peer()
+        worker1, _ignore_trans1 = peer()
+        worker2, _ignore_trans2 = peer()
         # Ask the worker to do something.
         worker1.performWork(schema.DUMMY_WORK_ITEM, 1)
         self.assertEquals(worker1.currentLoad, 1)
@@ -619,11 +673,12 @@
         self.receiver, self.sender = self.sender, self.receiver
         return result
 
+
     def flush(self, turns=10):
         """
         Keep relaying data until there's no more.
         """
-        for x in range(turns):
+        for _ignore_x in range(turns):
             if not (self.pump() or self.pump()):
                 return
 
@@ -718,7 +773,7 @@
         def op2(txn):
             return Select([schema.DUMMY_WORK_DONE.WORK_ID,
                            schema.DUMMY_WORK_DONE.A_PLUS_B],
-                           From=schema.DUMMY_WORK_DONE).on(txn)
+                          From=schema.DUMMY_WORK_DONE).on(txn)
         rows = yield inTransaction(self.store.newTransaction, op2)
         self.assertEquals(rows, [[4321, 7]])
 
@@ -729,7 +784,7 @@
         When a L{WorkItem} is concurrently deleted by another transaction, it
         should I{not} perform its work.
         """
-        # Provide access to a method called 'concurrently' everything using 
+        # Provide access to a method called 'concurrently' everything using
         original = self.store.newTransaction
         def decorate(*a, **k):
             result = original(*a, **k)
@@ -746,13 +801,13 @@
         # Sanity check on the concurrent deletion.
         def op2(txn):
             return Select([schema.DUMMY_WORK_ITEM.WORK_ID],
-                           From=schema.DUMMY_WORK_ITEM).on(txn)
+                          From=schema.DUMMY_WORK_ITEM).on(txn)
         rows = yield inTransaction(self.store.newTransaction, op2)
         self.assertEquals(rows, [])
         def op3(txn):
             return Select([schema.DUMMY_WORK_DONE.WORK_ID,
                            schema.DUMMY_WORK_DONE.A_PLUS_B],
-                           From=schema.DUMMY_WORK_DONE).on(txn)
+                          From=schema.DUMMY_WORK_DONE).on(txn)
         rows = yield inTransaction(self.store.newTransaction, op3)
         self.assertEquals(rows, [])
 
@@ -763,18 +818,23 @@
     def __init__(self, *ignored):
         pass
 
+
     def _start(self):
         pass
 
+
+
 class BaseQueuerTests(TestCase):
 
     def setUp(self):
         self.proposal = None
         self.patch(twext.enterprise.queue, "WorkProposal", DummyProposal)
 
+
     def _proposalCallback(self, proposal):
         self.proposal = proposal
 
+
     def test_proposalCallbacks(self):
         queuer = _BaseQueuer()
         queuer.callWithNewProposals(self._proposalCallback)
@@ -783,6 +843,7 @@
         self.assertNotEqual(self.proposal, None)
 
 
+
 class NonPerformingQueuerTests(TestCase):
 
     @inlineCallbacks
@@ -791,5 +852,3 @@
         performer = queuer.choosePerformer()
         result = (yield performer.performWork(None, None))
         self.assertEquals(result, None)
-
-

Modified: CalendarServer/branches/users/cdaboo/json/twext/patches.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/patches.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twext/patches.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -26,6 +26,8 @@
 from twisted.python.versions import Version
 from twisted.python.modules import getModule
 
+
+
 def _hasIPv6ClientSupport():
     """
     Does the loaded version of Twisted have IPv6 client support?
@@ -34,8 +36,9 @@
     if version > lastVersionWithoutIPv6Clients:
         return True
     elif version == lastVersionWithoutIPv6Clients:
-        # It could be a snapshot of trunk or a branch with this bug fixed. Don't
-        # load the module, though, as that would be a bunch of unnecessary work.
+        # It could be a snapshot of trunk or a branch with this bug fixed.
+        # Don't load the module, though, as that would be a bunch of
+        # unnecessary work.
         return "_resolveIPv6" in (getModule("twisted.internet.tcp")
                                   .filePath.getContent())
     else:
@@ -45,8 +48,8 @@
 
 def _addBackports():
     """
-    We currently require 2 backported bugfixes from a future release of Twisted,
-    for IPv6 support:
+    We currently require 2 backported bugfixes from a future release of
+    Twisted, for IPv6 support:
 
         - U{IPv6 client support <http://tm.tl/5085>}
 

Modified: CalendarServer/branches/users/cdaboo/json/twext/protocols/test/test_memcache.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/protocols/test/test_memcache.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twext/protocols/test/test_memcache.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -14,7 +14,17 @@
 from twisted.internet.defer import Deferred, gatherResults, TimeoutError
 
 
+def onConnectionLossFire(protocol, deferred):
+    """
+    When the given L{MemCacheProtocol} is disconnected, fire the given
+    L{Deferred} with L{None}.
+    """
+    def cl(reason):
+        deferred.callback(None)
+    protocol.connectionLost = cl
 
+
+
 class MemCacheTestCase(TestCase):
     """
     Test client protocol class L{MemCacheProtocol}.
@@ -244,7 +254,7 @@
         d1 = self.proto.get("foo")
         d2 = self.proto.get("bar")
         d3 = Deferred()
-        self.proto.connectionLost = d3.callback
+        onConnectionLossFire(self.proto, d3)
 
         self.clock.advance(self.proto.persistentTimeOut)
         self.assertFailure(d1, TimeoutError)
@@ -280,7 +290,7 @@
         """
         d1 = self.proto.get("foo")
         d2 = Deferred()
-        self.proto.connectionLost = d2.callback
+        onConnectionLossFire(self.proto, d2)
 
         self.proto.dataReceived("VALUE foo 0 10\r\n12345")
         self.clock.advance(self.proto.persistentTimeOut)
@@ -295,7 +305,7 @@
         """
         d1 = self.proto.stats()
         d2 = Deferred()
-        self.proto.connectionLost = d2.callback
+        onConnectionLossFire(self.proto, d2)
 
         self.proto.dataReceived("STAT foo bar\r\n")
         self.clock.advance(self.proto.persistentTimeOut)
@@ -311,7 +321,7 @@
         d1 = self.proto.get("foo")
         d2 = self.proto.get("bar")
         d3 = Deferred()
-        self.proto.connectionLost = d3.callback
+        onConnectionLossFire(self.proto, d3)
 
         self.clock.advance(self.proto.persistentTimeOut - 1)
         self.proto.dataReceived("VALUE foo 0 3\r\nbar\r\nEND\r\n")
@@ -319,7 +329,7 @@
         def check(result):
             self.assertEquals(result, (0, "bar"))
             self.assertEquals(len(self.clock.calls), 1)
-            for i in range(self.proto.persistentTimeOut):
+            for _ignore_i in range(self.proto.persistentTimeOut):
                 self.clock.advance(1)
             return self.assertFailure(d2, TimeoutError).addCallback(checkTime)
         def checkTime(ignored):
@@ -338,7 +348,7 @@
         """
         d1 = self.proto.get("foo")
         d3 = Deferred()
-        self.proto.connectionLost = d3.callback
+        onConnectionLossFire(self.proto, d3)
 
         self.clock.advance(self.proto.persistentTimeOut - 1)
         d2 = self.proto.get("bar")

Modified: CalendarServer/branches/users/cdaboo/json/twext/who/aggregate.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/who/aggregate.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twext/who/aggregate.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -45,13 +45,16 @@
 
         for service in services:
             if not IDirectoryService.implementedBy(service.__class__):
-                raise ValueError("Not a directory service: %s" % (service,))
+                raise ValueError(
+                    "Not a directory service: {0}".format(service)
+                )
 
             for recordType in service.recordTypes():
                 if recordType in recordTypes:
                     raise DirectoryConfigurationError(
-                        "Aggregated services may not vend the same record type: %s"
-                        % (recordType,)
+                        "Aggregated services may not vend "
+                        "the same record type: {0}"
+                        .format(recordType)
                     )
                 recordTypes.add(recordType)
 

Modified: CalendarServer/branches/users/cdaboo/json/twext/who/directory.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/who/directory.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twext/who/directory.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -24,10 +24,8 @@
     "DirectoryRecord",
 ]
 
-from uuid import UUID
+from zope.interface import implementer
 
-from zope.interface import implements
-
 from twisted.internet.defer import inlineCallbacks, returnValue
 from twisted.internet.defer import succeed, fail
 
@@ -40,26 +38,69 @@
 
 
 
+ at implementer(IDirectoryService)
 class DirectoryService(object):
-    implements(IDirectoryService)
+    """
+    Generic implementation of L{IDirectoryService}.
 
+    This is a complete implementation of L{IDirectoryService}, with support for
+    the query operands in L{Operand}.
+
+    The C{recordsWith*} methods are all implemented in terms of
+    L{recordsWithFieldValue}, which is in turn implemented in terms of
+    L{recordsFromExpression}.
+    L{recordsFromQuery} is also implemented in terms of
+    {recordsFromExpression}.
+
+    L{recordsFromExpression} (and therefore most uses of the other methods)
+    will always fail with a L{QueryNotSupportedError}.
+
+    A subclass should therefore override L{recordsFromExpression} with an
+    implementation that handles any queries that it can support and its
+    superclass' implementation with any query it cannot support.
+
+    A subclass may override L{recordsFromQuery} if it is to support additional
+    operands.
+
+    L{updateRecords} and L{removeRecords} will fail with L{NotAllowedError}
+    when asked to modify data.
+    A subclass should override these methods if is to allow editing of
+    directory information.
+
+    @cvar recordType: a L{Names} class or compatible object (eg.
+        L{ConstantsContainer}) which contains the L{NamedConstant}s denoting
+        the record types that are supported by this directory service.
+
+    @cvar fieldName: a L{Names} class or compatible object (eg.
+        L{ConstantsContainer}) which contains the L{NamedConstant}s denoting
+        the record field names that are supported by this directory service.
+
+    @cvar normalizedFields: a L{dict} mapping of (ie. L{NamedConstant}s
+        contained in the C{fieldName} class variable) to callables that take
+        a field value (a L{unicode}) and return a normalized field value (also
+        a L{unicode}).
+    """
+
     recordType = RecordType
     fieldName  = FieldName
 
     normalizedFields = {
-        FieldName.guid:           lambda g: UUID(g).hex,
-        FieldName.emailAddresses: lambda e: e.lower(),
+        FieldName.emailAddresses: lambda e: bytes(e).lower(),
     }
 
 
     def __init__(self, realmName):
+        """
+        @param realmName: a realm name
+        @type realmName: unicode
+        """
         self.realmName = realmName
 
 
     def __repr__(self):
-        return "<%s %r>" % (
-            self.__class__.__name__,
-            self.realmName,
+        return (
+            "<{self.__class__.__name__} {self.realmName!r}>"
+            .format(self=self)
         )
 
 
@@ -70,13 +111,34 @@
     def recordsFromExpression(self, expression, records=None):
         """
         Finds records matching a single expression.
-        @param expression: an expression
+
+        @note: The implementation in L{DirectoryService} always raises
+            L{QueryNotSupportedError}.
+
+        @note: This L{DirectoryService} adds a C{records} keyword argument to
+            the interface defined by L{IDirectoryService}.
+            This allows the implementation of
+            L{DirectoryService.recordsFromQuery} to narrow the scope of records
+            being searched as it applies expressions.
+            This is therefore relevant to subclasses, which need to support the
+            added parameter, but not to users of L{IDirectoryService}.
+
+        @param expression: an expression to apply
         @type expression: L{object}
-        @param records: a set of records to search within. C{None} if
+
+        @param records: a set of records to limit the search to. C{None} if
             the whole directory should be searched.
         @type records: L{set} or L{frozenset}
+
+        @return: The matching records.
+        @rtype: deferred iterable of L{IDirectoryRecord}s
+
+        @raises: L{QueryNotSupportedError} if the expression is not
+            supported by this directory service.
         """
-        return fail(QueryNotSupportedError("Unknown expression: %s" % (expression,)))
+        return fail(QueryNotSupportedError(
+            "Unknown expression: {0}".format(expression)
+        ))
 
 
     @inlineCallbacks
@@ -109,7 +171,9 @@
             elif operand == Operand.OR:
                 results |= recordsMatchingExpression
             else:
-                raise QueryNotSupportedError("Unknown operand: %s" % (operand,))
+                raise QueryNotSupportedError(
+                    "Unknown operand: {0}".format(operand)
+                )
 
         returnValue(results)
 
@@ -120,12 +184,16 @@
 
     @inlineCallbacks
     def recordWithUID(self, uid):
-        returnValue(uniqueResult((yield self.recordsWithFieldValue(FieldName.uid, uid))))
-               
+        returnValue(uniqueResult(
+            (yield self.recordsWithFieldValue(FieldName.uid, uid))
+        ))
 
+
     @inlineCallbacks
     def recordWithGUID(self, guid):
-        returnValue(uniqueResult((yield self.recordsWithFieldValue(FieldName.guid, guid))))
+        returnValue(uniqueResult(
+            (yield self.recordsWithFieldValue(FieldName.guid, guid))
+        ))
 
 
     def recordsWithRecordType(self, recordType):
@@ -136,28 +204,46 @@
     def recordWithShortName(self, recordType, shortName):
         returnValue(uniqueResult((yield self.recordsFromQuery((
             MatchExpression(FieldName.recordType, recordType),
-            MatchExpression(FieldName.shortNames, shortName ),
+            MatchExpression(FieldName.shortNames, shortName),
         )))))
 
 
     def recordsWithEmailAddress(self, emailAddress):
-        return self.recordsWithFieldValue(FieldName.emailAddresses, emailAddress)
+        return self.recordsWithFieldValue(
+            FieldName.emailAddresses,
+            emailAddress,
+        )
 
 
     def updateRecords(self, records, create=False):
         for record in records:
             return fail(NotAllowedError("Record updates not allowed."))
+        return succeed(None)
 
 
     def removeRecords(self, uids):
         for uid in uids:
             return fail(NotAllowedError("Record removal not allowed."))
+        return succeed(None)
 
 
 
+ at implementer(IDirectoryRecord)
 class DirectoryRecord(object):
-    implements(IDirectoryRecord)
+    """
+    Generic implementation of L{IDirectoryService}.
 
+    This is an incomplete implementation of L{IDirectoryRecord}.
+
+    L{groups} will always fail with L{NotImplementedError} and L{members} will
+    do so if this is a group record.
+    A subclass should override these methods to support group membership and
+    complete this implementation.
+
+    @cvar requiredFields: an iterable of field names that must be present in
+        all directory records.
+    """
+
     requiredFields = (
         FieldName.uid,
         FieldName.recordType,
@@ -168,21 +254,31 @@
     def __init__(self, service, fields):
         for fieldName in self.requiredFields:
             if fieldName not in fields or not fields[fieldName]:
-                raise ValueError("%s field is required." % (fieldName,))
+                raise ValueError("{0} field is required.".format(fieldName))
 
             if FieldName.isMultiValue(fieldName):
                 values = fields[fieldName]
                 if len(values) == 0:
-                    raise ValueError("%s field must have at least one value." % (fieldName,))
+                    raise ValueError(
+                        "{0} field must have at least one value."
+                        .format(fieldName)
+                    )
                 for value in values:
                     if not value:
-                        raise ValueError("%s field must not be empty." % (fieldName,))
+                        raise ValueError(
+                            "{0} field must not be empty.".format(fieldName)
+                        )
 
-        if fields[FieldName.recordType] not in service.recordType.iterconstants():
-            raise ValueError("Record type must be one of %r, not %r." % (
-                tuple(service.recordType.iterconstants()),
-                fields[FieldName.recordType]
-            ))
+        if (
+            fields[FieldName.recordType] not in
+            service.recordType.iterconstants()
+        ):
+            raise ValueError(
+                "Record type must be one of {0!r}, not {1!r}.".format(
+                    tuple(service.recordType.iterconstants()),
+                    fields[FieldName.recordType],
+                )
+            )
 
         # Normalize fields
         normalizedFields = {}
@@ -197,16 +293,18 @@
                 normalizedFields[name] = tuple((normalize(v) for v in value))
             else:
                 normalizedFields[name] = normalize(value)
-        
+
         self.service = service
         self.fields  = normalizedFields
 
 
     def __repr__(self):
-        return "<%s (%s)%s>" % (
-            self.__class__.__name__,
-            describe(self.recordType),
-            self.shortNames[0],
+        return (
+            "<{self.__class__.__name__} ({recordType}){shortName}>".format(
+                self=self,
+                recordType=describe(self.recordType),
+                shortName=self.shortNames[0],
+            )
         )
 
 
@@ -262,9 +360,11 @@
 
     def members(self):
         if self.recordType == RecordType.group:
-            raise NotImplementedError()
+            return fail(
+                NotImplementedError("Subclasses must implement members()")
+            )
         return succeed(())
 
 
     def groups(self):
-        raise NotImplementedError()
+        return fail(NotImplementedError("Subclasses must implement groups()"))

Modified: CalendarServer/branches/users/cdaboo/json/twext/who/expression.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/who/expression.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twext/who/expression.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -66,18 +66,26 @@
     """
     Query for a matching value in a given field.
 
-    @ivar fieldName: a L{NamedConstant} specifying the field
-    @ivar fieldValue: a text value to match
-    @ivar matchType: a L{NamedConstant} specifying the match algorythm
-    @ivar flags: L{NamedConstant} specifying additional options
+    @ivar fieldName: A L{NamedConstant} specifying the field.
+
+    @ivar fieldValue: A value to match.
+
+    @ivar matchType: A L{NamedConstant} specifying the match algorithm.
+
+    @ivar flags: A L{NamedConstant} specifying additional options.
     """
 
-    def __init__(self, fieldName, fieldValue, matchType=MatchType.equals, flags=None):
+    def __init__(
+        self,
+        fieldName, fieldValue,
+        matchType=MatchType.equals, flags=None
+    ):
         self.fieldName  = fieldName
         self.fieldValue = fieldValue
         self.matchType  = matchType
         self.flags      = flags
 
+
     def __repr__(self):
         def describe(constant):
             return getattr(constant, "description", str(constant))
@@ -85,12 +93,16 @@
         if self.flags is None:
             flags = ""
         else:
-            flags = " (%s)" % (describe(self.flags),)
+            flags = " ({0})".format(describe(self.flags))
 
-        return "<%s: %r %s %r%s>" % (
-            self.__class__.__name__,
-            describe(self.fieldName),
-            describe(self.matchType),
-            describe(self.fieldValue),
-            flags
+        return (
+            "<{self.__class__.__name__}: {fieldName!r} "
+            "{matchType} {fieldValue!r}{flags}>"
+            .format(
+                self=self,
+                fieldName=describe(self.fieldName),
+                matchType=describe(self.matchType),
+                fieldValue=describe(self.fieldValue),
+                flags=flags,
+            )
         )

Modified: CalendarServer/branches/users/cdaboo/json/twext/who/idirectory.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/who/idirectory.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twext/who/idirectory.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -36,31 +36,39 @@
     "IDirectoryRecord",
 ]
 
+from uuid import UUID
+
 from zope.interface import Attribute, Interface
 
 from twisted.python.constants import Names, NamedConstant
 
 
 
-##
+#
 # Exceptions
-##
+#
 
 class DirectoryServiceError(Exception):
     """
     Directory service generic error.
     """
 
+
+
 class DirectoryConfigurationError(DirectoryServiceError):
     """
-    Directory configurtion error.
+    Directory configuration error.
     """
 
+
+
 class DirectoryAvailabilityError(DirectoryServiceError):
     """
     Directory not available.
     """
 
+
+
 class UnknownRecordTypeError(DirectoryServiceError):
     """
     Unknown record type.
@@ -69,28 +77,37 @@
         DirectoryServiceError.__init__(self, token)
         self.token = token
 
+
+
 class QueryNotSupportedError(DirectoryServiceError):
     """
     Query not supported.
     """
 
+
+
 class NoSuchRecordError(DirectoryServiceError):
     """
     Record does not exist.
     """
 
+
+
 class NotAllowedError(DirectoryServiceError):
     """
-    Apparently, you can't do that.
+    It seems you aren't permitted to do that.
     """
 
 
 
-##
+#
 # Data Types
-##
+#
 
 class RecordType(Names):
+    """
+    Constants for common directory record types.
+    """
     user  = NamedConstant()
     group = NamedConstant()
 
@@ -101,7 +118,31 @@
 
 class FieldName(Names):
     """
-    Constants for common field names.
+    Constants for common directory record field names.
+
+    Fields as assciated with either a single value or an iterable of values.
+
+    @cvar uid: The primary unique identifier for a directory record.
+        The associated value must be a L{unicode}.
+
+    @cvar guid: The globally unique identifier for a directory record.
+        The associated value must be a L{UUID} or C{None}.
+
+    @cvar recordType: The type of a directory record.
+        The associated value must be a L{NamedConstant}.
+
+    @cvar shortNames: The short names for a directory record.
+        The associated values must L{unicode}s and there must be at least
+        one associated value.
+
+    @cvar fullNames: The full names for a directory record.
+        The associated values must be L{unicode}s.
+
+    @cvar emailAddresses: The email addresses for a directory record.
+        The associated values must be L{unicodes}.
+
+    @cvar password: The clear text password for a directory record.
+        The associated value must be a L{unicode} or C{None}.
     """
     uid            = NamedConstant()
     guid           = NamedConstant()
@@ -119,17 +160,46 @@
     emailAddresses.description = "email addresses"
     password.description       = "password"
 
+    guid.valueType = UUID
+
     shortNames.multiValue     = True
     fullNames.multiValue      = True
     emailAddresses.multiValue = True
 
+
     @staticmethod
     def isMultiValue(name):
+        """
+        Check for whether a field is multi-value (as opposed to single-value).
+
+        @param name: The name of the field.
+        @type name: L{NamedConstant}
+
+        @return: C{True} if the field is multi-value, C{False} otherwise.
+        @rtype: L{BOOL}
+        """
         return getattr(name, "multiValue", False)
 
 
+    @staticmethod
+    def valueType(name):
+        """
+        Check for the expected type of values for a field.
 
+        @param name: The name of the field.
+        @type name: L{NamedConstant}
+
+        @return: The expected type.
+        @rtype: L{type}
+        """
+        return getattr(name, "valueType", unicode)
+
+
+
 class Operand(Names):
+    """
+    Contants for common operands.
+    """
     OR  = NamedConstant()
     AND = NamedConstant()
 
@@ -138,9 +208,9 @@
 
 
 
-##
+#
 # Interfaces
-##
+#
 
 class IDirectoryService(Interface):
     """
@@ -156,109 +226,172 @@
 
     A directory service may allow support the editing, removal and
     addition of records.
+    Services are read-only should fail with L{NotAllowedError} in editing
+    methods.
+
+    The L{FieldName.uid} field, the L{FieldName.guid} field (if not C{None}),
+    and the combination of the L{FieldName.recordType} and
+    L{FieldName.shortName} fields must be unique to each directory record
+    vended by a directory service.
     """
-    realmName = Attribute("The name of the authentication realm this service represents.")
 
+    realmName = Attribute(
+        "The name of the authentication realm this service represents."
+    )
+
+
     def recordTypes():
         """
-        @return: an iterable of L{NamedConstant}s denoting the record
-            types that are kept in this directory.
+        Get the record types supported by this directory service.
+
+        @return: The record types that are supported by this directory service.
+        @rtype: iterable of L{NamedConstant}s
         """
 
+
     def recordsFromExpression(self, expression):
         """
         Find records matching an expression.
+
         @param expression: an expression to apply
         @type expression: L{object}
-        @return: a deferred iterable of matching L{IDirectoryRecord}s.
+
+        @return: The matching records.
+        @rtype: deferred iterable of L{IDirectoryRecord}s
+
         @raises: L{QueryNotSupportedError} if the expression is not
             supported by this directory service.
         """
 
+
     def recordsFromQuery(expressions, operand=Operand.AND):
         """
         Find records by composing a query consisting of an iterable of
         expressions and an operand.
+
         @param expressions: expressions to query against
         @type expressions: iterable of L{object}s
+
         @param operand: an operand
         @type operand: a L{NamedConstant}
-        @return: a deferred iterable of matching L{IDirectoryRecord}s.
+
+        @return: The matching records.
+        @rtype: deferred iterable of L{IDirectoryRecord}s
+
         @raises: L{QueryNotSupportedError} if the query is not
             supported by this directory service.
         """
 
+
     def recordsWithFieldValue(fieldName, value):
         """
         Find records that have the given field name with the given
         value.
+
         @param fieldName: a field name
         @type fieldName: L{NamedConstant}
+
         @param value: a value to match
         @type value: L{bytes}
-        @return: a deferred iterable of L{IDirectoryRecord}s.
+
+        @return: The matching records.
+        @rtype: deferred iterable of L{IDirectoryRecord}s
         """
 
+
     def recordWithUID(uid):
         """
         Find the record that has the given UID.
+
         @param uid: a UID
         @type uid: L{bytes}
-        @return: a deferred iterable of L{IDirectoryRecord}s, or
-            C{None} if there is no such record.
+
+        @return: The matching record or C{None} if there is no match.
+        @rtype: deferred L{IDirectoryRecord}s or C{None}
         """
-               
+
+
     def recordWithGUID(guid):
         """
         Find the record that has the given GUID.
+
         @param guid: a GUID
-        @type guid: L{bytes}
-        @return: a deferred iterable of L{IDirectoryRecord}s, or
-            C{None} if there is no such record.
+        @type guid: L{UUID}
+
+        @return: The matching record or C{None} if there is no match.
+        @rtype: deferred L{IDirectoryRecord}s or C{None}
         """
 
+
     def recordsWithRecordType(recordType):
         """
         Find the records that have the given record type.
+
         @param recordType: a record type
         @type recordType: L{NamedConstant}
-        @return: a deferred iterable of L{IDirectoryRecord}s.
+
+        @return: The matching records.
+        @rtype: deferred iterable of L{IDirectoryRecord}s
         """
 
+
     def recordWithShortName(recordType, shortName):
         """
         Find the record that has the given record type and short name.
+
         @param recordType: a record type
         @type recordType: L{NamedConstant}
+
         @param shortName: a short name
         @type shortName: L{bytes}
-        @return: a deferred iterable of L{IDirectoryRecord}s, or
-            C{None} if there is no such record.
+
+        @return: The matching record or C{None} if there is no match.
+        @rtype: deferred L{IDirectoryRecord}s or C{None}
         """
 
+
     def recordsWithEmailAddress(emailAddress):
         """
         Find the records that have the given email address.
+
         @param emailAddress: an email address
         @type emailAddress: L{bytes}
-        @return: a deferred iterable of L{IDirectoryRecord}s, or
-            C{None} if there is no such record.
+
+        @return: The matching records.
+        @rtype: deferred iterable of L{IDirectoryRecord}s
         """
 
+
     def updateRecords(records, create=False):
         """
         Updates existing directory records.
+
         @param records: the records to update
         @type records: iterable of L{IDirectoryRecord}s
+
         @param create: if true, create records if necessary
         @type create: boolean
+
+        @return: unspecifiied
+        @rtype: deferred object
+
+        @raises L{NotAllowedError}: if the update is not allowed by the
+            directory service.
         """
 
+
     def removeRecords(uids):
         """
         Removes the records with the given UIDs.
+
         @param uids: the UIDs of the records to remove
         @type uids: iterable of L{bytes}
+
+        @return: unspecifiied
+        @rtype: deferred object
+
+        @raises L{NotAllowedError}: if the removal is not allowed by the
+            directory service.
         """
 
 
@@ -294,19 +427,23 @@
     service = Attribute("The L{IDirectoryService} this record exists in.")
     fields  = Attribute("A mapping with L{NamedConstant} keys.")
 
+
     def members():
         """
         Find the records that are members of this group.  Only direct
         members are included; members of members are not expanded.
+
         @return: a deferred iterable of L{IDirectoryRecord}s which are
             direct members of this group.
         """
 
+
     def groups():
         """
         Find the group records that this record is a member of.  Only
         groups for which this record is a direct member is are
         included; membership is not expanded.
+
         @return: a deferred iterable of L{IDirectoryRecord}s which are
             groups that this record is a member of.
         """

Modified: CalendarServer/branches/users/cdaboo/json/twext/who/index.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/who/index.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twext/who/index.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -29,7 +29,8 @@
 from twisted.python.constants import Names, NamedConstant
 from twisted.internet.defer import succeed, inlineCallbacks, returnValue
 
-from twext.who.util import ConstantsContainer, describe, uniqueResult, iterFlags
+from twext.who.util import ConstantsContainer
+from twext.who.util import describe, uniqueResult, iterFlags
 from twext.who.idirectory import FieldName as BaseFieldName
 from twext.who.expression import MatchExpression, MatchType, MatchFlags
 from twext.who.directory import DirectoryService as BaseDirectoryService
@@ -57,7 +58,10 @@
     XML directory service.
     """
 
-    fieldName = ConstantsContainer(chain(BaseDirectoryService.fieldName.iterconstants(), FieldName.iterconstants()))
+    fieldName = ConstantsContainer(chain(
+        BaseDirectoryService.fieldName.iterconstants(),
+        FieldName.iterconstants()
+    ))
 
     indexedFields = (
         BaseFieldName.recordType,
@@ -90,7 +94,7 @@
         """
         Load records.
         """
-        raise NotImplementedError("Subclasses should implement loadRecords().")
+        raise NotImplementedError("Subclasses must implement loadRecords().")
 
 
     def flush(self):
@@ -112,7 +116,9 @@
                 elif flag == MatchFlags.caseInsensitive:
                     normalize = lambda x: x.lower()
                 else:
-                    raise NotImplementedError("Unknown query flag: %s" % (describe(flag),))
+                    raise NotImplementedError(
+                        "Unknown query flag: {0}".format(describe(flag))
+                    )
 
         return predicate, normalize
 
@@ -131,16 +137,27 @@
         matchType  = expression.matchType
 
         if matchType == MatchType.startsWith:
-            indexKeys = (key for key in fieldIndex if predicate(normalize(key).startswith(matchValue)))
+            indexKeys = (
+                key for key in fieldIndex
+                if predicate(normalize(key).startswith(matchValue))
+            )
         elif matchType == MatchType.contains:
-            indexKeys = (key for key in fieldIndex if predicate(matchValue in normalize(key)))
+            indexKeys = (
+                key for key in fieldIndex
+                if predicate(matchValue in normalize(key))
+            )
         elif matchType == MatchType.equals:
             if predicate(True):
                 indexKeys = (matchValue,)
             else:
-                indexKeys = (key for key in fieldIndex if normalize(key) != matchValue)
+                indexKeys = (
+                    key for key in fieldIndex
+                    if normalize(key) != matchValue
+                )
         else:
-            raise NotImplementedError("Unknown match type: %s" % (describe(matchType),))
+            raise NotImplementedError(
+                "Unknown match type: {0}".format(describe(matchType))
+            )
 
         matchingRecords = set()
         for key in indexKeys:
@@ -165,18 +182,25 @@
         matchType  = expression.matchType
 
         if matchType == MatchType.startsWith:
-            match = lambda fieldValue: predicate(fieldValue.startswith(matchValue))
+            match = lambda fieldValue: predicate(
+                fieldValue.startswith(matchValue)
+            )
         elif matchType == MatchType.contains:
             match = lambda fieldValue: predicate(matchValue in fieldValue)
         elif matchType == MatchType.equals:
             match = lambda fieldValue: predicate(fieldValue == matchValue)
         else:
-            raise NotImplementedError("Unknown match type: %s" % (describe(matchType),))
+            raise NotImplementedError(
+                "Unknown match type: {0}".format(describe(matchType))
+            )
 
         result = set()
 
         if records is None:
-            records = (uniqueResult(values) for values in self.index[self.fieldName.uid].itervalues())
+            records = (
+                uniqueResult(values) for values
+                in self.index[self.fieldName.uid].itervalues()
+            )
 
         for record in records:
             fieldValues = record.fields.get(expression.fieldName, None)
@@ -194,11 +218,17 @@
     def recordsFromExpression(self, expression, records=None):
         if isinstance(expression, MatchExpression):
             if expression.fieldName in self.indexedFields:
-                return self.indexedRecordsFromMatchExpression(expression, records=records)
+                return self.indexedRecordsFromMatchExpression(
+                    expression, records=records
+                )
             else:
-                return self.unIndexedRecordsFromMatchExpression(expression, records=records)
+                return self.unIndexedRecordsFromMatchExpression(
+                    expression, records=records
+                )
         else:
-            return BaseDirectoryService.recordsFromExpression(self, expression, records=records)
+            return BaseDirectoryService.recordsFromExpression(
+                self, expression, records=records
+            )
 
 
 
@@ -206,6 +236,7 @@
     """
     XML directory record
     """
+
     @inlineCallbacks
     def members(self):
         members = set()
@@ -215,4 +246,6 @@
 
 
     def groups(self):
-        return self.service.recordsWithFieldValue(FieldName.memberUIDs, self.uid)
+        return self.service.recordsWithFieldValue(
+            FieldName.memberUIDs, self.uid
+        )

Modified: CalendarServer/branches/users/cdaboo/json/twext/who/test/test_aggregate.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/who/test/test_aggregate.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twext/who/test/test_aggregate.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -26,7 +26,8 @@
 from twext.who.util import ConstantsContainer
 
 from twext.who.test import test_directory, test_xml
-from twext.who.test.test_xml import QueryMixIn, xmlService, TestService as XMLTestService
+from twext.who.test.test_xml import QueryMixIn, xmlService
+from twext.who.test.test_xml import TestService as XMLTestService
 
 
 
@@ -68,7 +69,10 @@
 
 
 
-class DirectoryServiceImmutableTest(BaseTest, test_directory.DirectoryServiceImmutableTest):
+class DirectoryServiceImmutableTest(
+    BaseTest,
+    test_directory.BaseDirectoryServiceImmutableTest,
+):
     pass
 
 
@@ -81,24 +85,39 @@
         class GroupsDirectoryService(XMLTestService):
             recordType = ConstantsContainer((XMLTestService.recordType.group,))
 
-        usersService  = self.xmlService(testXMLConfigUsers, UsersDirectoryService)
-        groupsService = self.xmlService(testXMLConfigGroups, GroupsDirectoryService)
+        usersService = self.xmlService(
+            testXMLConfigUsers,
+            UsersDirectoryService
+        )
+        groupsService = self.xmlService(
+            testXMLConfigGroups,
+            GroupsDirectoryService
+        )
 
         return BaseTest.service(self, (usersService, groupsService))
 
 
 
-class DirectoryServiceAggregatedBaseTest(AggregatedBaseTest, DirectoryServiceBaseTest):
+class DirectoryServiceAggregatedBaseTest(
+    AggregatedBaseTest,
+    DirectoryServiceBaseTest,
+):
     pass
 
 
 
-class DirectoryServiceAggregatedQueryTest(AggregatedBaseTest, test_xml.DirectoryServiceQueryTest):
+class DirectoryServiceAggregatedQueryTest(
+    AggregatedBaseTest,
+    test_xml.DirectoryServiceQueryTest,
+):
     pass
 
 
 
-class DirectoryServiceAggregatedImmutableTest(AggregatedBaseTest, test_directory.DirectoryServiceImmutableTest):
+class DirectoryServiceAggregatedImmutableTest(
+    AggregatedBaseTest,
+    test_directory.BaseDirectoryServiceImmutableTest,
+):
     pass
 
 

Modified: CalendarServer/branches/users/cdaboo/json/twext/who/test/test_directory.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/who/test/test_directory.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twext/who/test/test_directory.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -31,7 +31,7 @@
 
 
 
-class BaseTest(unittest.TestCase):
+class ServiceMixIn(object):
     realmName = "xyzzy"
 
 
@@ -42,7 +42,7 @@
 
 
 
-class DirectoryServiceTest(BaseTest):
+class BaseDirectoryServiceTest(ServiceMixIn):
     def test_interface(self):
         service = self.service()
         try:
@@ -79,7 +79,10 @@
 
     def test_recordsFromQueryBogus(self):
         service = self.service()
-        self.assertFailure(service.recordsFromQuery((object(),)), QueryNotSupportedError)
+        self.assertFailure(
+            service.recordsFromQuery((object(),)),
+            QueryNotSupportedError
+        )
 
 
     def test_recordWithUID(self):
@@ -103,13 +106,61 @@
 
 
 
-class DirectoryServiceImmutableTest(BaseTest):
+class DirectoryServiceTest(unittest.TestCase, BaseDirectoryServiceTest):
+    def test_recordsFromExpression(self):
+        service = self.service()
+        result = yield(service.recordsFromExpression(None))
+        self.assertFailure(result, QueryNotSupportedError)
+
+
+    def test_recordWithUID(self):
+        service = self.service()
+        self.assertFailure(
+            service.recordWithUID(None),
+            QueryNotSupportedError
+        )
+
+
+    def test_recordWithGUID(self):
+        service = self.service()
+        self.assertFailure(
+            service.recordWithGUID(None),
+            QueryNotSupportedError
+        )
+
+
+    def test_recordsWithRecordType(self):
+        service = self.service()
+        self.assertFailure(
+            service.recordsWithRecordType(None),
+            QueryNotSupportedError
+        )
+
+
+    def test_recordWithShortName(self):
+        service = self.service()
+        self.assertFailure(
+            service.recordWithShortName(None, None),
+            QueryNotSupportedError
+        )
+
+
+    def test_recordsWithEmailAddress(self):
+        service = self.service()
+        self.assertFailure(
+            service.recordsWithEmailAddress(None),
+            QueryNotSupportedError
+        )
+
+
+
+class BaseDirectoryServiceImmutableTest(ServiceMixIn):
     def test_updateRecordsNotAllowed(self):
         service = self.service()
 
         newRecord = DirectoryRecord(
             service,
-            fields = {
+            fields={
                 service.fieldName.uid:        "__plugh__",
                 service.fieldName.recordType: service.recordType.user,
                 service.fieldName.shortNames: ("plugh",),
@@ -138,33 +189,55 @@
 
 
 
-class DirectoryRecordTest(BaseTest):
+class DirectoryServiceImmutableTest(
+    unittest.TestCase,
+    BaseDirectoryServiceImmutableTest,
+):
+    pass
+
+
+
+class BaseDirectoryRecordTest(ServiceMixIn):
     fields_wsanchez = {
-        FieldName.uid:            "UID:wsanchez",
-        FieldName.recordType:     RecordType.user,
-        FieldName.shortNames:     ("wsanchez", "wilfredo_sanchez"),
-        FieldName.fullNames:      ("Wilfredo Sanchez", "Wilfredo Sanchez Vega"),
-        FieldName.emailAddresses: ("wsanchez at calendarserver.org", "wsanchez at example.com")
+        FieldName.uid: "UID:wsanchez",
+        FieldName.recordType: RecordType.user,
+        FieldName.shortNames: ("wsanchez", "wilfredo_sanchez"),
+        FieldName.fullNames: (
+            "Wilfredo Sanchez",
+            "Wilfredo Sanchez Vega",
+        ),
+        FieldName.emailAddresses: (
+            "wsanchez at calendarserver.org",
+            "wsanchez at example.com",
+        )
     }
 
     fields_glyph = {
-        FieldName.uid:            "UID:glyph",
-        FieldName.recordType:     RecordType.user,
-        FieldName.shortNames:     ("glyph",),
-        FieldName.fullNames:      ("Glyph Lefkowitz",),
+        FieldName.uid: "UID:glyph",
+        FieldName.recordType: RecordType.user,
+        FieldName.shortNames: ("glyph",),
+        FieldName.fullNames: ("Glyph Lefkowitz",),
         FieldName.emailAddresses: ("glyph at calendarserver.org",)
     }
 
     fields_sagen = {
-        FieldName.uid:            "UID:sagen",
-        FieldName.recordType:     RecordType.user,
-        FieldName.shortNames:     ("sagen",),
-        FieldName.fullNames:      ("Morgen Sagen",),
+        FieldName.uid: "UID:sagen",
+        FieldName.recordType: RecordType.user,
+        FieldName.shortNames: ("sagen",),
+        FieldName.fullNames: ("Morgen Sagen",),
         FieldName.emailAddresses: ("sagen at CalendarServer.org",)
     }
 
+    fields_staff = {
+        FieldName.uid: "UID:staff",
+        FieldName.recordType: RecordType.group,
+        FieldName.shortNames: ("staff",),
+        FieldName.fullNames: ("Staff",),
+        FieldName.emailAddresses: ("staff at CalendarServer.org",)
+    }
 
-    def _testRecord(self, fields=None, service=None):
+
+    def makeRecord(self, fields=None, service=None):
         if fields is None:
             fields = self.fields_wsanchez
         if service is None:
@@ -173,7 +246,7 @@
 
 
     def test_interface(self):
-        record = self._testRecord()
+        record = self.makeRecord()
         try:
             verifyObject(IDirectoryRecord, record)
         except BrokenMethodImplementation as e:
@@ -182,58 +255,58 @@
 
     def test_init(self):
         service  = self.service()
-        wsanchez = self._testRecord(self.fields_wsanchez, service=service)
+        wsanchez = self.makeRecord(self.fields_wsanchez, service=service)
 
         self.assertEquals(wsanchez.service, service)
-        self.assertEquals(wsanchez.fields , self.fields_wsanchez)
+        self.assertEquals(wsanchez.fields, self.fields_wsanchez)
 
 
     def test_initWithNoUID(self):
         fields = self.fields_wsanchez.copy()
         del fields[FieldName.uid]
-        self.assertRaises(ValueError, self._testRecord, fields)
+        self.assertRaises(ValueError, self.makeRecord, fields)
 
         fields = self.fields_wsanchez.copy()
         fields[FieldName.uid] = ""
-        self.assertRaises(ValueError, self._testRecord, fields)
+        self.assertRaises(ValueError, self.makeRecord, fields)
 
 
     def test_initWithNoRecordType(self):
         fields = self.fields_wsanchez.copy()
         del fields[FieldName.recordType]
-        self.assertRaises(ValueError, self._testRecord, fields)
+        self.assertRaises(ValueError, self.makeRecord, fields)
 
         fields = self.fields_wsanchez.copy()
         fields[FieldName.recordType] = ""
-        self.assertRaises(ValueError, self._testRecord, fields)
+        self.assertRaises(ValueError, self.makeRecord, fields)
 
 
     def test_initWithNoShortNames(self):
         fields = self.fields_wsanchez.copy()
         del fields[FieldName.shortNames]
-        self.assertRaises(ValueError, self._testRecord, fields)
+        self.assertRaises(ValueError, self.makeRecord, fields)
 
         fields = self.fields_wsanchez.copy()
         fields[FieldName.shortNames] = ()
-        self.assertRaises(ValueError, self._testRecord, fields)
+        self.assertRaises(ValueError, self.makeRecord, fields)
 
         fields = self.fields_wsanchez.copy()
         fields[FieldName.shortNames] = ("",)
-        self.assertRaises(ValueError, self._testRecord, fields)
+        self.assertRaises(ValueError, self.makeRecord, fields)
 
         fields = self.fields_wsanchez.copy()
         fields[FieldName.shortNames] = ("wsanchez", "")
-        self.assertRaises(ValueError, self._testRecord, fields)
+        self.assertRaises(ValueError, self.makeRecord, fields)
 
 
     def test_initWithBogusRecordType(self):
         fields = self.fields_wsanchez.copy()
         fields[FieldName.recordType] = object()
-        self.assertRaises(ValueError, self._testRecord, fields)
+        self.assertRaises(ValueError, self.makeRecord, fields)
 
 
     def test_initNormalize(self):
-        sagen = self._testRecord(self.fields_sagen)
+        sagen = self.makeRecord(self.fields_sagen)
 
         self.assertEquals(
             sagen.fields[FieldName.emailAddresses],
@@ -245,30 +318,44 @@
         fields_glyphmod = self.fields_glyph.copy()
         del fields_glyphmod[FieldName.emailAddresses]
 
-        wsanchez    = self._testRecord(self.fields_wsanchez)
-        wsanchezmod = self._testRecord(self.fields_wsanchez, DirectoryService("plugh"))
-        glyph       = self._testRecord(self.fields_glyph)
-        glyphmod    = self._testRecord(fields_glyphmod)
+        plugh = DirectoryService("plugh")
 
+        wsanchez    = self.makeRecord(self.fields_wsanchez)
+        wsanchezmod = self.makeRecord(self.fields_wsanchez, plugh)
+        glyph       = self.makeRecord(self.fields_glyph)
+        glyphmod    = self.makeRecord(fields_glyphmod)
+
         self.assertEquals(wsanchez, wsanchez)
         self.assertNotEqual(wsanchez, glyph)
-        self.assertNotEqual(glyph, glyphmod) # UID matches, other fields do not
+        self.assertNotEqual(glyph, glyphmod)  # UID matches, other fields don't
         self.assertNotEqual(glyphmod, wsanchez)
-        self.assertNotEqual(wsanchez, wsanchezmod) # Different service
+        self.assertNotEqual(wsanchez, wsanchezmod)  # Different service
 
 
     def test_attributeAccess(self):
-        wsanchez = self._testRecord(self.fields_wsanchez)
+        wsanchez = self.makeRecord(self.fields_wsanchez)
 
-        self.assertEquals(wsanchez.recordType    , wsanchez.fields[FieldName.recordType    ])
-        self.assertEquals(wsanchez.uid           , wsanchez.fields[FieldName.uid           ])
-        self.assertEquals(wsanchez.shortNames    , wsanchez.fields[FieldName.shortNames    ])
-        self.assertEquals(wsanchez.emailAddresses, wsanchez.fields[FieldName.emailAddresses])
+        self.assertEquals(
+            wsanchez.recordType,
+            wsanchez.fields[FieldName.recordType]
+        )
+        self.assertEquals(
+            wsanchez.uid,
+            wsanchez.fields[FieldName.uid]
+        )
+        self.assertEquals(
+            wsanchez.shortNames,
+            wsanchez.fields[FieldName.shortNames]
+        )
+        self.assertEquals(
+            wsanchez.emailAddresses,
+            wsanchez.fields[FieldName.emailAddresses]
+        )
 
+
     @inlineCallbacks
     def test_members(self):
-        wsanchez = self._testRecord(self.fields_wsanchez)
-
+        wsanchez = self.makeRecord(self.fields_wsanchez)
         self.assertEquals(
             set((yield wsanchez.members())),
             set()
@@ -276,5 +363,24 @@
 
         raise SkipTest("Subclasses should implement this test.")
 
+
     def test_groups(self):
         raise SkipTest("Subclasses should implement this test.")
+
+
+
+class DirectoryRecordTest(unittest.TestCase, BaseDirectoryRecordTest):
+    def test_members(self):
+        wsanchez = self.makeRecord(self.fields_wsanchez)
+        self.assertEquals(
+            set((yield wsanchez.members())),
+            set()
+        )
+
+        staff = self.makeRecord(self.fields_staff)
+        self.assertFailure(staff.members(), NotImplementedError)
+
+
+    def test_groups(self):
+        wsanchez = self.makeRecord(self.fields_wsanchez)
+        self.assertFailure(wsanchez.groups(), NotImplementedError)

Modified: CalendarServer/branches/users/cdaboo/json/twext/who/test/test_util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/who/test/test_util.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twext/who/test/test_util.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -82,7 +82,11 @@
         )
 
     def test_lookupByName(self):
-        constants = set((Instruments.hammer, Tools.screwdriver, Instruments.chisel))
+        constants = set((
+            Instruments.hammer,
+            Tools.screwdriver,
+            Instruments.chisel,
+        ))
         container = ConstantsContainer(constants)
 
         self.assertEquals(
@@ -108,7 +112,7 @@
 class UtilTest(unittest.TestCase):
     def test_uniqueResult(self):
         self.assertEquals(1, uniqueResult((1,)))
-        self.assertRaises(DirectoryServiceError, uniqueResult, (1,2,3))
+        self.assertRaises(DirectoryServiceError, uniqueResult, (1, 2, 3))
 
     def test_describe(self):
         self.assertEquals("nail pounder", describe(Tools.hammer))
@@ -116,5 +120,5 @@
 
     def test_describeFlags(self):
         self.assertEquals("blue", describe(Switches.b))
-        self.assertEquals("red|green", describe(Switches.r|Switches.g))
-        self.assertEquals("blue|black", describe(Switches.b|Switches.black))
+        self.assertEquals("red|green", describe(Switches.r | Switches.g))
+        self.assertEquals("blue|black", describe(Switches.b | Switches.black))

Modified: CalendarServer/branches/users/cdaboo/json/twext/who/test/test_xml.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/who/test/test_xml.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twext/who/test/test_xml.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -19,6 +19,7 @@
 """
 
 from time import sleep
+from uuid import UUID
 
 from twisted.trial import unittest
 from twisted.python.filepath import FilePath
@@ -47,7 +48,10 @@
 
 
 
-class DirectoryServiceBaseTest(BaseTest, test_directory.DirectoryServiceTest):
+class DirectoryServiceBaseTest(
+    BaseTest,
+    test_directory.BaseDirectoryServiceTest,
+):
     def test_repr(self):
         service = self.service()
 
@@ -60,17 +64,21 @@
     def test_recordWithUID(self):
         service = self.service()
 
-        record = (yield service.recordWithUID("__null__"))
+        record = (yield service.recordWithUID(u"__null__"))
         self.assertEquals(record, None)
 
-        record = (yield service.recordWithUID("__wsanchez__"))
-        self.assertEquals(record.uid, "__wsanchez__")
+        record = (yield service.recordWithUID(u"__wsanchez__"))
+        self.assertEquals(record.uid, u"__wsanchez__")
 
 
     @inlineCallbacks
     def test_recordWithGUID(self):
         service = self.service()
-        record = (yield service.recordWithGUID("6C495FCD-7E78-4D5C-AA66-BC890AD04C9D"))
+        record = (
+            yield service.recordWithGUID(
+                UUID("6C495FCD-7E78-4D5C-AA66-BC890AD04C9D")
+            )
+        )
         self.assertEquals(record, None)
 
     @inlineCallbacks
@@ -80,27 +88,33 @@
         records = (yield service.recordsWithRecordType(object()))
         self.assertEquals(set(records), set())
 
-        records = (yield service.recordsWithRecordType(service.recordType.user))
-        self.assertRecords(records,
+        records = (
+            yield service.recordsWithRecordType(service.recordType.user)
+        )
+        self.assertRecords(
+            records,
             (
-                "__wsanchez__",
-                "__glyph__",
-                "__sagen__",
-                "__cdaboo__",
-                "__dre__",
-                "__exarkun__",
-                "__dreid__",
-                "__alyssa__",
-                "__joe__",
+                u"__wsanchez__",
+                u"__glyph__",
+                u"__sagen__",
+                u"__cdaboo__",
+                u"__dre__",
+                u"__exarkun__",
+                u"__dreid__",
+                u"__alyssa__",
+                u"__joe__",
             ),
         )
 
-        records = (yield service.recordsWithRecordType(service.recordType.group))
-        self.assertRecords(records,
+        records = (
+            yield service.recordsWithRecordType(service.recordType.group)
+        )
+        self.assertRecords(
+            records,
             (
-                "__calendar-dev__",
-                "__twisted__",
-                "__developers__",
+                u"__calendar-dev__",
+                u"__twisted__",
+                u"__developers__",
             ),
         )
 
@@ -109,28 +123,55 @@
     def test_recordWithShortName(self):
         service = self.service()
 
-        record = (yield service.recordWithShortName(service.recordType.user, "null"))
+        record = (
+            yield service.recordWithShortName(
+                service.recordType.user,
+                u"null",
+            )
+        )
         self.assertEquals(record, None)
 
-        record = (yield service.recordWithShortName(service.recordType.user, "wsanchez"))
-        self.assertEquals(record.uid, "__wsanchez__")
+        record = (
+            yield service.recordWithShortName(
+                service.recordType.user,
+                u"wsanchez",
+            )
+        )
+        self.assertEquals(record.uid, u"__wsanchez__")
 
-        record = (yield service.recordWithShortName(service.recordType.user, "wilfredo_sanchez"))
-        self.assertEquals(record.uid, "__wsanchez__")
+        record = (
+            yield service.recordWithShortName(
+                service.recordType.user,
+                u"wilfredo_sanchez",
+            )
+        )
+        self.assertEquals(record.uid, u"__wsanchez__")
 
 
     @inlineCallbacks
     def test_recordsWithEmailAddress(self):
         service = self.service()
 
-        records = (yield service.recordsWithEmailAddress("wsanchez at bitbucket.calendarserver.org"))
-        self.assertRecords(records, ("__wsanchez__",))
+        records = (
+            yield service.recordsWithEmailAddress(
+                u"wsanchez at bitbucket.calendarserver.org"
+            )
+        )
+        self.assertRecords(records, (u"__wsanchez__",))
 
-        records = (yield service.recordsWithEmailAddress("wsanchez at devnull.twistedmatrix.com"))
-        self.assertRecords(records, ("__wsanchez__",))
+        records = (
+            yield service.recordsWithEmailAddress(
+                u"wsanchez at devnull.twistedmatrix.com"
+            )
+        )
+        self.assertRecords(records, (u"__wsanchez__",))
 
-        records = (yield service.recordsWithEmailAddress("shared at example.com"))
-        self.assertRecords(records, ("__sagen__", "__dre__"))
+        records = (
+            yield service.recordsWithEmailAddress(
+                u"shared at example.com"
+            )
+        )
+        self.assertRecords(records, (u"__sagen__", u"__dre__"))
 
 
 
@@ -138,7 +179,7 @@
     def test_realmNameImmutable(self):
         def setRealmName():
             service = self.service()
-            service.realmName = "foo"
+            service.realmName = u"foo"
 
         self.assertRaises(AssertionError, setRealmName)
 
@@ -176,12 +217,12 @@
 
 
     def test_badRootElement(self):
-        service = self.service(xmlData=
-"""<?xml version="1.0" encoding="utf-8"?>
+        service = self.service(xmlData=(
+b"""<?xml version="1.0" encoding="utf-8"?>
 
 <frobnitz />
 """
-        )
+        ))
 
         self.assertRaises(ParseError, service.loadRecords)
         try:
@@ -193,12 +234,12 @@
 
 
     def test_noRealmName(self):
-        service = self.service(xmlData=
-"""<?xml version="1.0" encoding="utf-8"?>
+        service = self.service(xmlData=(
+b"""<?xml version="1.0" encoding="utf-8"?>
 
 <directory />
 """
-        )
+        ))
 
         self.assertRaises(ParseError, service.loadRecords)
         try:
@@ -215,8 +256,8 @@
 
 
     def test_unknownFieldElementsDirty(self):
-        service = self.service(xmlData=
-"""<?xml version="1.0" encoding="utf-8"?>
+        service = self.service(xmlData=(
+b"""<?xml version="1.0" encoding="utf-8"?>
 
 <directory realm="Unknown Record Types">
   <record type="user">
@@ -226,8 +267,11 @@
   </record>
 </directory>
 """
+        ))
+        self.assertEquals(
+            set(service.unknownFieldElements),
+            set((u"political-affiliation",))
         )
-        self.assertEquals(set(service.unknownFieldElements), set(("political-affiliation",)))
 
 
     def test_unknownRecordTypesClean(self):
@@ -236,8 +280,8 @@
 
 
     def test_unknownRecordTypesDirty(self):
-        service = self.service(xmlData=
-"""<?xml version="1.0" encoding="utf-8"?>
+        service = self.service(xmlData=(
+b"""<?xml version="1.0" encoding="utf-8"?>
 
 <directory realm="Unknown Record Types">
   <record type="camera">
@@ -247,8 +291,8 @@
   </record>
 </directory>
 """
-        )
-        self.assertEquals(set(service.unknownRecordTypes), set(("camera",)))
+        ))
+        self.assertEquals(set(service.unknownRecordTypes), set((u"camera",)))
 
 
 
@@ -258,12 +302,12 @@
         service = self.service()
         records = yield service.recordsFromQuery(
             (
-                service.query("emailAddresses", "shared at example.com"),
-                service.query("shortNames", "sagen"),
+                service.query(u"emailAddresses", u"shared at example.com"),
+                service.query(u"shortNames", u"sagen"),
             ),
             operand=Operand.AND
         )
-        self.assertRecords(records, ("__sagen__",))
+        self.assertRecords(records, (u"__sagen__",))
 
 
     @inlineCallbacks
@@ -274,8 +318,8 @@
         service = self.service()
         records = yield service.recordsFromQuery(
             (
-                service.query("emailAddresses", "nobody at example.com"),
-                service.query("shortNames", "sagen"),
+                service.query(u"emailAddresses", u"nobody at example.com"),
+                service.query(u"shortNames", u"sagen"),
             ),
             operand=Operand.AND
         )
@@ -287,12 +331,12 @@
         service = self.service()
         records = yield service.recordsFromQuery(
             (
-                service.query("emailAddresses", "shared at example.com"),
-                service.query("shortNames", "wsanchez"),
+                service.query(u"emailAddresses", u"shared at example.com"),
+                service.query(u"shortNames", u"wsanchez"),
             ),
             operand=Operand.OR
         )
-        self.assertRecords(records, ("__sagen__", "__dre__", "__wsanchez__"))
+        self.assertRecords(records, (u"__sagen__", u"__dre__", u"__wsanchez__"))
 
 
     @inlineCallbacks
@@ -300,12 +344,12 @@
         service = self.service()
         records = yield service.recordsFromQuery(
             (
-                service.query("emailAddresses", "shared at example.com"),
-                service.query("shortNames", "sagen", flags=MatchFlags.NOT),
+                service.query(u"emailAddresses", u"shared at example.com"),
+                service.query(u"shortNames", u"sagen", flags=MatchFlags.NOT),
             ),
             operand=Operand.AND
         )
-        self.assertRecords(records, ("__dre__",))
+        self.assertRecords(records, (u"__dre__",))
 
 
     @inlineCallbacks
@@ -313,48 +357,60 @@
         service = self.service()
         records = yield service.recordsFromQuery(
             (
-                service.query("emailAddresses", "shared at example.com"),
-                service.query("fullNames", "Andre LaBranche", flags=MatchFlags.NOT),
+                service.query(u"emailAddresses", u"shared at example.com"),
+                service.query(
+                    u"fullNames", u"Andre LaBranche",
+                    flags=MatchFlags.NOT
+                ),
             ),
             operand=Operand.AND
         )
-        self.assertRecords(records, ("__sagen__",))
+        self.assertRecords(records, (u"__sagen__",))
 
 
     @inlineCallbacks
     def test_queryCaseInsensitive(self):
         service = self.service()
         records = yield service.recordsFromQuery((
-            service.query("shortNames", "SagEn", flags=MatchFlags.caseInsensitive),
+            service.query(
+                u"shortNames", u"SagEn",
+                flags=MatchFlags.caseInsensitive
+            ),
         ))
-        self.assertRecords(records, ("__sagen__",))
+        self.assertRecords(records, (u"__sagen__",))
 
 
     @inlineCallbacks
     def test_queryCaseInsensitiveNoIndex(self):
         service = self.service()
         records = yield service.recordsFromQuery((
-            service.query("fullNames", "moRGen SAGen", flags=MatchFlags.caseInsensitive),
+            service.query(
+                u"fullNames", u"moRGen SAGen",
+                flags=MatchFlags.caseInsensitive
+            ),
         ))
-        self.assertRecords(records, ("__sagen__",))
+        self.assertRecords(records, (u"__sagen__",))
 
 
     @inlineCallbacks
     def test_queryStartsWith(self):
         service = self.service()
         records = yield service.recordsFromQuery((
-            service.query("shortNames", "wil", matchType=MatchType.startsWith),
+            service.query(u"shortNames", u"wil", matchType=MatchType.startsWith),
         ))
-        self.assertRecords(records, ("__wsanchez__",))
+        self.assertRecords(records, (u"__wsanchez__",))
 
 
     @inlineCallbacks
     def test_queryStartsWithNoIndex(self):
         service = self.service()
         records = yield service.recordsFromQuery((
-            service.query("fullNames", "Wilfredo", matchType=MatchType.startsWith),
+            service.query(
+                u"fullNames", u"Wilfredo",
+                matchType=MatchType.startsWith
+            ),
         ))
-        self.assertRecords(records, ("__wsanchez__",))
+        self.assertRecords(records, (u"__wsanchez__",))
 
 
     @inlineCallbacks
@@ -362,25 +418,25 @@
         service = self.service()
         records = yield service.recordsFromQuery((
             service.query(
-                "shortNames", "w",
-                matchType = MatchType.startsWith,
-                flags = MatchFlags.NOT,
+                u"shortNames", u"w",
+                matchType=MatchType.startsWith,
+                flags=MatchFlags.NOT,
             ),
         ))
         self.assertRecords(
             records,
             (
-                '__alyssa__',
-                '__calendar-dev__',
-                '__cdaboo__',
-                '__developers__',
-                '__dre__',
-                '__dreid__',
-                '__exarkun__',
-                '__glyph__',
-                '__joe__',
-                '__sagen__',
-                '__twisted__',
+                u"__alyssa__",
+                u"__calendar-dev__",
+                u"__cdaboo__",
+                u"__developers__",
+                u"__dre__",
+                u"__dreid__",
+                u"__exarkun__",
+                u"__glyph__",
+                u"__joe__",
+                u"__sagen__",
+                u"__twisted__",
             ),
         )
 
@@ -396,26 +452,26 @@
         service = self.service()
         records = yield service.recordsFromQuery((
             service.query(
-                "shortNames", "wil",
-                matchType = MatchType.startsWith,
-                flags = MatchFlags.NOT,
+                u"shortNames", u"wil",
+                matchType=MatchType.startsWith,
+                flags=MatchFlags.NOT,
             ),
         ))
         self.assertRecords(
             records,
             (
-                '__alyssa__',
-                '__calendar-dev__',
-                '__cdaboo__',
-                '__developers__',
-                '__dre__',
-                '__dreid__',
-                '__exarkun__',
-                '__glyph__',
-                '__joe__',
-                '__sagen__',
-                '__twisted__',
-                '__wsanchez__',
+                u"__alyssa__",
+                u"__calendar-dev__",
+                u"__cdaboo__",
+                u"__developers__",
+                u"__dre__",
+                u"__dreid__",
+                u"__exarkun__",
+                u"__glyph__",
+                u"__joe__",
+                u"__sagen__",
+                u"__twisted__",
+                u"__wsanchez__",
             ),
         )
 
@@ -425,25 +481,25 @@
         service = self.service()
         records = yield service.recordsFromQuery((
             service.query(
-                "fullNames", "Wilfredo",
-                matchType = MatchType.startsWith,
-                flags = MatchFlags.NOT,
+                u"fullNames", u"Wilfredo",
+                matchType=MatchType.startsWith,
+                flags=MatchFlags.NOT,
             ),
         ))
         self.assertRecords(
             records,
             (
-                '__alyssa__',
-                '__calendar-dev__',
-                '__cdaboo__',
-                '__developers__',
-                '__dre__',
-                '__dreid__',
-                '__exarkun__',
-                '__glyph__',
-                '__joe__',
-                '__sagen__',
-                '__twisted__',
+                u"__alyssa__",
+                u"__calendar-dev__",
+                u"__cdaboo__",
+                u"__developers__",
+                u"__dre__",
+                u"__dreid__",
+                u"__exarkun__",
+                u"__glyph__",
+                u"__joe__",
+                u"__sagen__",
+                u"__twisted__",
             ),
         )
 
@@ -453,12 +509,12 @@
         service = self.service()
         records = yield service.recordsFromQuery((
             service.query(
-                "shortNames", "WIL",
-                matchType = MatchType.startsWith,
-                flags = MatchFlags.caseInsensitive,
+                u"shortNames", u"WIL",
+                matchType=MatchType.startsWith,
+                flags=MatchFlags.caseInsensitive,
             ),
         ))
-        self.assertRecords(records, ("__wsanchez__",))
+        self.assertRecords(records, (u"__wsanchez__",))
 
 
     @inlineCallbacks
@@ -466,30 +522,33 @@
         service = self.service()
         records = yield service.recordsFromQuery((
             service.query(
-                "fullNames", "wilfrEdo",
-                matchType = MatchType.startsWith,
-                flags = MatchFlags.caseInsensitive,
+                u"fullNames", u"wilfrEdo",
+                matchType=MatchType.startsWith,
+                flags=MatchFlags.caseInsensitive,
             ),
         ))
-        self.assertRecords(records, ("__wsanchez__",))
+        self.assertRecords(records, (u"__wsanchez__",))
 
 
     @inlineCallbacks
     def test_queryContains(self):
         service = self.service()
         records = yield service.recordsFromQuery((
-            service.query("shortNames", "sanchez", matchType=MatchType.contains),
+            service.query(
+                u"shortNames", u"sanchez",
+                matchType=MatchType.contains
+            ),
         ))
-        self.assertRecords(records, ("__wsanchez__",))
+        self.assertRecords(records, (u"__wsanchez__",))
 
 
     @inlineCallbacks
     def test_queryContainsNoIndex(self):
         service = self.service()
         records = yield service.recordsFromQuery((
-            service.query("fullNames", "fred", matchType=MatchType.contains),
+            service.query(u"fullNames", u"fred", matchType=MatchType.contains),
         ))
-        self.assertRecords(records, ("__wsanchez__",))
+        self.assertRecords(records, (u"__wsanchez__",))
 
 
     @inlineCallbacks
@@ -497,25 +556,25 @@
         service = self.service()
         records = yield service.recordsFromQuery((
             service.query(
-                "shortNames", "sanchez",
-                matchType = MatchType.contains,
-                flags = MatchFlags.NOT,
+                u"shortNames", u"sanchez",
+                matchType=MatchType.contains,
+                flags=MatchFlags.NOT,
             ),
         ))
         self.assertRecords(
             records,
             (
-                '__alyssa__',
-                '__calendar-dev__',
-                '__cdaboo__',
-                '__developers__',
-                '__dre__',
-                '__dreid__',
-                '__exarkun__',
-                '__glyph__',
-                '__joe__',
-                '__sagen__',
-                '__twisted__',
+                u"__alyssa__",
+                u"__calendar-dev__",
+                u"__cdaboo__",
+                u"__developers__",
+                u"__dre__",
+                u"__dreid__",
+                u"__exarkun__",
+                u"__glyph__",
+                u"__joe__",
+                u"__sagen__",
+                u"__twisted__",
             ),
         )
 
@@ -525,25 +584,25 @@
         service = self.service()
         records = yield service.recordsFromQuery((
             service.query(
-                "fullNames", "fred",
-                matchType = MatchType.contains,
-                flags = MatchFlags.NOT,
+                u"fullNames", u"fred",
+                matchType=MatchType.contains,
+                flags=MatchFlags.NOT,
             ),
         ))
         self.assertRecords(
             records,
             (
-                '__alyssa__',
-                '__calendar-dev__',
-                '__cdaboo__',
-                '__developers__',
-                '__dre__',
-                '__dreid__',
-                '__exarkun__',
-                '__glyph__',
-                '__joe__',
-                '__sagen__',
-                '__twisted__',
+                u"__alyssa__",
+                u"__calendar-dev__",
+                u"__cdaboo__",
+                u"__developers__",
+                u"__dre__",
+                u"__dreid__",
+                u"__exarkun__",
+                u"__glyph__",
+                u"__joe__",
+                u"__sagen__",
+                u"__twisted__",
             ),
         )
 
@@ -553,12 +612,12 @@
         service = self.service()
         records = yield service.recordsFromQuery((
             service.query(
-                "shortNames", "Sanchez",
+                u"shortNames", u"Sanchez",
                 matchType=MatchType.contains,
                 flags=MatchFlags.caseInsensitive,
             ),
         ))
-        self.assertRecords(records, ("__wsanchez__",))
+        self.assertRecords(records, (u"__wsanchez__",))
 
 
     @inlineCallbacks
@@ -566,12 +625,12 @@
         service = self.service()
         records = yield service.recordsFromQuery((
             service.query(
-                "fullNames", "frEdo",
+                u"fullNames", u"frEdo",
                 matchType=MatchType.contains,
                 flags=MatchFlags.caseInsensitive,
             ),
         ))
-        self.assertRecords(records, ("__wsanchez__",))
+        self.assertRecords(records, (u"__wsanchez__",))
 
 
 
@@ -580,22 +639,28 @@
     def test_updateRecord(self):
         service = self.service()
 
-        record = (yield service.recordWithUID("__wsanchez__"))
+        record = (yield service.recordWithUID(u"__wsanchez__"))
 
         fields = record.fields.copy()
-        fields[service.fieldName.fullNames] = ["Wilfredo Sanchez Vega"]
+        fields[service.fieldName.fullNames] = [u"Wilfredo Sanchez Vega"]
 
         updatedRecord = DirectoryRecord(service, fields)
         yield service.updateRecords((updatedRecord,))
 
         # Verify change is present immediately
-        record = (yield service.recordWithUID("__wsanchez__"))
-        self.assertEquals(set(record.fullNames), set(("Wilfredo Sanchez Vega",)))
+        record = (yield service.recordWithUID(u"__wsanchez__"))
+        self.assertEquals(
+            set(record.fullNames),
+            set((u"Wilfredo Sanchez Vega",))
+        )
 
         # Verify change is persisted
         service.flush()
-        record = (yield service.recordWithUID("__wsanchez__"))
-        self.assertEquals(set(record.fullNames), set(("Wilfredo Sanchez Vega",)))
+        record = (yield service.recordWithUID(u"__wsanchez__"))
+        self.assertEquals(
+            set(record.fullNames),
+            set((u"Wilfredo Sanchez Vega",))
+        )
 
 
     @inlineCallbacks
@@ -604,23 +669,23 @@
 
         newRecord = DirectoryRecord(
             service,
-            fields = {
-                service.fieldName.uid:        "__plugh__",
+            fields={
+                service.fieldName.uid:        u"__plugh__",
                 service.fieldName.recordType: service.recordType.user,
-                service.fieldName.shortNames: ("plugh",),
+                service.fieldName.shortNames: (u"plugh",),
             }
         )
 
         yield service.updateRecords((newRecord,), create=True)
 
         # Verify change is present immediately
-        record = (yield service.recordWithUID("__plugh__"))
-        self.assertEquals(set(record.shortNames), set(("plugh",)))
+        record = (yield service.recordWithUID(u"__plugh__"))
+        self.assertEquals(set(record.shortNames), set((u"plugh",)))
 
         # Verify change is persisted
         service.flush()
-        record = (yield service.recordWithUID("__plugh__"))
-        self.assertEquals(set(record.shortNames), set(("plugh",)))
+        record = (yield service.recordWithUID(u"__plugh__"))
+        self.assertEquals(set(record.shortNames), set((u"plugh",)))
 
 
     def test_addRecordNoCreate(self):
@@ -628,67 +693,70 @@
 
         newRecord = DirectoryRecord(
             service,
-            fields = {
-                service.fieldName.uid:        "__plugh__",
+            fields={
+                service.fieldName.uid:        u"__plugh__",
                 service.fieldName.recordType: service.recordType.user,
-                service.fieldName.shortNames: ("plugh",),
+                service.fieldName.shortNames: (u"plugh",),
             }
         )
 
-        self.assertFailure(service.updateRecords((newRecord,)), NoSuchRecordError)
+        self.assertFailure(
+            service.updateRecords((newRecord,)),
+            NoSuchRecordError
+        )
 
 
     @inlineCallbacks
     def test_removeRecord(self):
         service = self.service()
 
-        yield service.removeRecords(("__wsanchez__",))
+        yield service.removeRecords((u"__wsanchez__",))
 
         # Verify change is present immediately
-        self.assertEquals((yield service.recordWithUID("__wsanchez__")), None)
+        self.assertEquals((yield service.recordWithUID(u"__wsanchez__")), None)
 
         # Verify change is persisted
         service.flush()
-        self.assertEquals((yield service.recordWithUID("__wsanchez__")), None)
+        self.assertEquals((yield service.recordWithUID(u"__wsanchez__")), None)
 
 
     def test_removeRecordNoExist(self):
         service = self.service()
 
-        return service.removeRecords(("__plugh__",))
+        return service.removeRecords((u"__plugh__",))
 
 
 
-class DirectoryRecordTest(BaseTest, test_directory.DirectoryRecordTest):
+class DirectoryRecordTest(BaseTest, test_directory.BaseDirectoryRecordTest):
     @inlineCallbacks
     def test_members(self):
         service = self.service()
 
-        record = (yield service.recordWithUID("__wsanchez__"))
+        record = (yield service.recordWithUID(u"__wsanchez__"))
         members = (yield record.members())
         self.assertEquals(set(members), set())
 
-        record = (yield service.recordWithUID("__twisted__"))
+        record = (yield service.recordWithUID(u"__twisted__"))
         members = (yield record.members())
         self.assertEquals(
             set((member.uid for member in members)),
             set((
-                "__wsanchez__",
-                "__glyph__",
-                "__exarkun__",
-                "__dreid__",
-                "__dre__",
+                u"__wsanchez__",
+                u"__glyph__",
+                u"__exarkun__",
+                u"__dreid__",
+                u"__dre__",
             ))
         )
 
-        record = (yield service.recordWithUID("__developers__"))
+        record = (yield service.recordWithUID(u"__developers__"))
         members = (yield record.members())
         self.assertEquals(
             set((member.uid for member in members)),
             set((
-                "__calendar-dev__",
-                "__twisted__",
-                "__alyssa__",
+                u"__calendar-dev__",
+                u"__twisted__",
+                u"__alyssa__",
             ))
         )
 
@@ -696,13 +764,13 @@
     def test_groups(self):
         service = self.service()
 
-        record = (yield service.recordWithUID("__wsanchez__"))
+        record = (yield service.recordWithUID(u"__wsanchez__"))
         groups = (yield record.groups())
         self.assertEquals(
             set(group.uid for group in groups),
             set((
-                "__calendar-dev__",
-                "__twisted__",
+                u"__calendar-dev__",
+                u"__twisted__",
             ))
         )
 
@@ -714,8 +782,8 @@
         assert name is not None
         return MatchExpression(
             name, value,
-            matchType = matchType,
-            flags = flags,
+            matchType=matchType,
+            flags=flags,
         )
 
 
@@ -739,7 +807,7 @@
 
 
 
-testXMLConfig = """<?xml version="1.0" encoding="utf-8"?>
+testXMLConfig = b"""<?xml version="1.0" encoding="utf-8"?>
 
 <directory realm="xyzzy">
 

Modified: CalendarServer/branches/users/cdaboo/json/twext/who/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/who/util.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twext/who/util.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -40,20 +40,23 @@
         myConstants = {}
         for constant in constants:
             if constant.name in myConstants:
-                raise ValueError("Name conflict: %r" % (constant.name,))
+                raise ValueError("Name conflict: {0}".format(constant.name))
             myConstants[constant.name] = constant
 
         self._constants = myConstants
 
+
     def __getattr__(self, name):
         try:
             return self._constants[name]
         except KeyError:
             raise AttributeError(name)
 
+
     def iterconstants(self):
         return self._constants.itervalues()
 
+
     def lookupByName(self, name):
         try:
             return self._constants[name]
@@ -61,16 +64,20 @@
             raise ValueError(name)
 
 
+
 def uniqueResult(values):
     result = None
     for value in values:
         if result is None:
             result = value
         else:
-            raise DirectoryServiceError("Multiple values found where one expected.")
+            raise DirectoryServiceError(
+                "Multiple values found where one expected."
+            )
     return result
 
 
+
 def describe(constant):
     if isinstance(constant, FlagConstant):
         parts = []
@@ -81,6 +88,7 @@
         return getattr(constant, "description", constant.name)
 
 
+
 def iterFlags(flags):
     if hasattr(flags, "__iter__"):
         return flags

Modified: CalendarServer/branches/users/cdaboo/json/twext/who/xml.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/who/xml.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twext/who/xml.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -28,6 +28,7 @@
 ]
 
 from time import time
+from uuid import UUID
 
 from xml.etree.ElementTree import parse as parseXML
 from xml.etree.ElementTree import ParseError as XMLParseError
@@ -62,38 +63,38 @@
 ##
 
 class Element(Values):
-    directory = ValueConstant("directory")
-    record    = ValueConstant("record")
+    directory = ValueConstant(u"directory")
+    record    = ValueConstant(u"record")
 
     #
     # Field names
     #
-    uid = ValueConstant("uid")
+    uid = ValueConstant(u"uid")
     uid.fieldName = BaseFieldName.uid
 
-    guid = ValueConstant("guid")
+    guid = ValueConstant(u"guid")
     guid.fieldName = BaseFieldName.guid
 
-    shortName = ValueConstant("short-name")
+    shortName = ValueConstant(u"short-name")
     shortName.fieldName = BaseFieldName.shortNames
 
-    fullName = ValueConstant("full-name")
+    fullName = ValueConstant(u"full-name")
     fullName.fieldName = BaseFieldName.fullNames
 
-    emailAddress = ValueConstant("email")
+    emailAddress = ValueConstant(u"email")
     emailAddress.fieldName = BaseFieldName.emailAddresses
 
-    password = ValueConstant("password")
+    password = ValueConstant(u"password")
     password.fieldName = BaseFieldName.password
 
-    memberUID = ValueConstant("member-uid")
+    memberUID = ValueConstant(u"member-uid")
     memberUID.fieldName = IndexFieldName.memberUIDs
 
 
 
 class Attribute(Values):
-    realm      = ValueConstant("realm")
-    recordType = ValueConstant("type")
+    realm      = ValueConstant(u"realm")
+    recordType = ValueConstant(u"type")
 
 
 
@@ -101,16 +102,16 @@
     #
     # Booleans
     #
-    true  = ValueConstant("true")
-    false = ValueConstant("false")
+    true  = ValueConstant(u"true")
+    false = ValueConstant(u"false")
 
     #
     # Record types
     #
-    user = ValueConstant("user")
+    user = ValueConstant(u"user")
     user.recordType = RecordType.user
 
-    group = ValueConstant("group")
+    group = ValueConstant(u"group")
     group.recordType = RecordType.group
 
 
@@ -144,9 +145,11 @@
         else:
             realmName = repr(realmName)
 
-        return "<%s %s>" % (
-            self.__class__.__name__,
-            realmName,
+        return (
+            "<{self.__class__.__name__} {realmName}>".format(
+                self=self,
+                realmName=realmName,
+            )
         )
 
 
@@ -201,7 +204,10 @@
         #
         if stat:
             self.filePath.restat()
-            cacheTag = (self.filePath.getModificationTime(), self.filePath.getsize())
+            cacheTag = (
+                self.filePath.getModificationTime(),
+                self.filePath.getsize()
+            )
             if cacheTag == self._cacheTag:
                 return
         else:
@@ -225,9 +231,13 @@
         #
         directoryNode = etree.getroot()
         if directoryNode.tag != self.element.directory.value:
-            raise ParseError("Incorrect root element: %s" % (directoryNode.tag,))
+            raise ParseError(
+                "Incorrect root element: {0}".format(directoryNode.tag)
+            )
 
-        realmName = directoryNode.get(self.attribute.realm.value, "").encode("utf-8")
+        realmName = directoryNode.get(
+            self.attribute.realm.value, u""
+        )
 
         if not realmName:
             raise ParseError("No realm name.")
@@ -239,7 +249,9 @@
 
         for recordNode in directoryNode:
             try:
-                records.add(self.parseRecordNode(recordNode, unknownFieldElements))
+                records.add(
+                    self.parseRecordNode(recordNode, unknownFieldElements)
+                )
             except UnknownRecordTypeError as e:
                 unknownRecordTypes.add(e.token)
 
@@ -277,10 +289,14 @@
 
 
     def parseRecordNode(self, recordNode, unknownFieldElements=None):
-        recordTypeAttribute = recordNode.get(self.attribute.recordType.value, "").encode("utf-8")
+        recordTypeAttribute = recordNode.get(
+            self.attribute.recordType.value, u""
+        )
         if recordTypeAttribute:
             try:
-                recordType = self.value.lookupByValue(recordTypeAttribute).recordType
+                recordType = (
+                    self.value.lookupByValue(recordTypeAttribute).recordType
+                )
             except (ValueError, AttributeError):
                 raise UnknownRecordTypeError(recordTypeAttribute)
         else:
@@ -302,8 +318,16 @@
                 if unknownFieldElements is not None:
                     unknownFieldElements.add(fieldNode.tag)
 
-            value = fieldNode.text.encode("utf-8")
+            vType = BaseFieldName.valueType(fieldName)
 
+            if vType in (unicode, UUID):
+                value = unicode(fieldNode.text)
+            else:
+                raise AssertionError(
+                    "Unknown value type {0} for field {1}",
+                    vType, fieldName
+                )
+
             if BaseFieldName.isMultiValue(fieldName):
                 values = fields.setdefault(fieldName, [])
                 values.append(value)
@@ -357,9 +381,14 @@
             for (name, value) in record.fields.items():
                 if name == self.fieldName.recordType:
                     if value in recordTypes:
-                        recordNode.set(self.attribute.recordType.value, recordTypes[value])
+                        recordNode.set(
+                            self.attribute.recordType.value,
+                            recordTypes[value]
+                        )
                     else:
-                        raise AssertionError("Unknown record type: %r" % (value,))
+                        raise AssertionError(
+                            "Unknown record type: {0}".format(value)
+                        )
 
                 else:
                     if name in fieldNames:
@@ -376,7 +405,9 @@
                             recordNode.append(subNode)
 
                     else:
-                        raise AssertionError("Unknown field name: %r" % (name,))
+                        raise AssertionError(
+                            "Unknown field name: {0!r}".format(name)
+                        )
 
         # Walk through the record nodes in the XML tree and apply
         # updates.

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/config.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/config.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/config.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -97,6 +97,9 @@
             self._defaults = ConfigDict()
         else:
             self._defaults = ConfigDict(copy.deepcopy(defaults))
+        self.importedFiles = []
+        self.includedFiles = []
+        self.missingFiles = []
 
 
     def getDefaults(self):

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/augment.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/augment.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/augment.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -25,11 +25,11 @@
 from twext.python.log import Logger
 
 from twistedcaldav.config import fullServerPath, config
-from twistedcaldav.database import AbstractADBAPIDatabase, ADBAPISqliteMixin,\
+from twistedcaldav.database import AbstractADBAPIDatabase, ADBAPISqliteMixin, \
     ADBAPIPostgreSQLMixin
 from twistedcaldav.directory import xmlaugmentsparser
 from twistedcaldav.directory.xmlaugmentsparser import XMLAugmentsParser
-from twistedcaldav.xmlutil import newElementTreeWithRoot, addSubElement,\
+from twistedcaldav.xmlutil import newElementTreeWithRoot, addSubElement, \
     writeXML, readXML
 from twistedcaldav.directory.util import normalizeUUID
 
@@ -56,7 +56,6 @@
         uid,
         enabled=False,
         serverID="",
-        partitionID="",
         enabledForCalendaring=False,
         autoSchedule=False,
         autoScheduleMode="default",
@@ -67,7 +66,6 @@
         self.uid = uid
         self.enabled = enabled
         self.serverID = serverID
-        self.partitionID = partitionID
         self.enabledForCalendaring = enabledForCalendaring
         self.enabledForAddressBooks = enabledForAddressBooks
         self.enabledForLogin = enabledForLogin
@@ -87,9 +85,9 @@
     """
     Abstract base class for an augment record database.
     """
-    
+
     def __init__(self):
-        
+
         self.cachedRecords = {}
 
 
@@ -126,10 +124,10 @@
 
         @param uid: directory UID to lookup
         @type uid: C{str}
-        
+
         @return: L{Deferred}
         """
-        
+
         recordType = recordTypesMap[recordType]
 
         result = (yield self._lookupAugmentRecord(uid))
@@ -166,6 +164,7 @@
         result.clonedFromDefault = True
         returnValue(result)
 
+
     @inlineCallbacks
     def getAllUIDs(self):
         """
@@ -173,21 +172,23 @@
 
         @return: L{Deferred}
         """
-        
+
         raise NotImplementedError("Child class must define this.")
 
+
     def _lookupAugmentRecord(self, uid):
         """
         Get an AugmentRecord for the specified UID.
 
         @param uid: directory UID to lookup
         @type uid: C{str}
-        
+
         @return: L{Deferred}
         """
-        
+
         raise NotImplementedError("Child class must define this.")
 
+
     @inlineCallbacks
     def _cachedAugmentRecord(self, uid):
         """
@@ -195,59 +196,64 @@
 
         @param uid: directory UID to lookup
         @type uid: C{str}
-        
+
         @return: L{Deferred}
         """
-        
+
         if not uid in self.cachedRecords:
             result = (yield self._lookupAugmentRecord(uid))
             self.cachedRecords[uid] = result
         returnValue(self.cachedRecords[uid])
 
+
     def addAugmentRecords(self, records):
         """
         Add an AugmentRecord to the DB.
 
         @param record: augment records to add
         @type record: C{list} of L{AugmentRecord}
-        
+
         @return: L{Deferred}
         """
 
         raise NotImplementedError("Child class must define this.")
 
+
     def removeAugmentRecords(self, uids):
         """
         Remove AugmentRecords with the specified UIDs.
 
         @param uid: directory UIDs to remove
         @type uid: C{list} of C{str}
-        
+
         @return: L{Deferred}
         """
 
         raise NotImplementedError("Child class must define this.")
 
+
     def refresh(self):
         """
         Refresh any cached data.
-        
+
         @return: L{Deferred}
         """
 
         self.cachedRecords.clear()
         return succeed(None)
-    
+
+
     def clean(self):
         """
         Remove all records.
-        
+
         @return: L{Deferred}
         """
 
         raise NotImplementedError("Child class must define this.")
 
 
+
 class AugmentXMLDB(AugmentDB):
     """
     XMLFile based augment database implementation.
@@ -257,7 +263,7 @@
 
         super(AugmentXMLDB, self).__init__()
         self.xmlFiles = [fullServerPath(config.DataRoot, path) for path in xmlFiles]
-        self.xmlFileStats = { }
+        self.xmlFileStats = {}
         for path in self.xmlFiles:
             self.xmlFileStats[path] = (0, 0) # mtime, size
 
@@ -290,16 +296,17 @@
 
         @param uid: directory UID to lookup
         @type uid: C{str}
-        
+
         @return: L{Deferred}
         """
-        
+
         # May need to re-cache
         if time.time() - self.lastCached > self.statSeconds:
             self.refresh()
-            
+
         return succeed(self.db.get(uid))
 
+
     def addAugmentRecords(self, records):
         """
         Add an AugmentRecord to the DB.
@@ -308,13 +315,13 @@
         @type records: C{list} of L{AugmentRecord}
         @param update: C{True} if changing an existing record
         @type update: C{bool}
-        
+
         @return: L{Deferred}
         """
 
         # Look at each record and determine whether it is new or a modify
         new_records = list()
-        existing_records = list() 
+        existing_records = list()
         for record in records:
             (existing_records if record.uid in self.db else new_records).append(record)
 
@@ -332,6 +339,7 @@
 
         return succeed(None)
 
+
     def _doAddToFile(self, xmlfile, records):
 
         if not os.path.exists(xmlfile):
@@ -343,7 +351,6 @@
             for record in self.db.itervalues():
                 self._addRecordToXMLDB(record, augments_node)
 
-
             writeXML(xmlfile, augments_node)
 
             # Set permissions
@@ -362,33 +369,33 @@
             if uid != -1 and gid != -1:
                 os.chown(xmlfile, uid, gid)
 
-
         _ignore_etree, augments_node = readXML(xmlfile)
 
         # Create new record
         for record in records:
             self._addRecordToXMLDB(record, augments_node)
-        
+
         # Modify xmlfile
         writeXML(xmlfile, augments_node)
-        
+
+
     def _doModifyInFile(self, xmlfile, records):
-    
+
         if not os.path.exists(xmlfile):
             return
 
         _ignore_etree, augments_node = readXML(xmlfile)
-    
+
         # Map uid->record for fast lookup
         recordMap = dict([(record.uid, record) for record in records])
 
         # Make sure UID is present
         changed = False
         for record_node in augments_node:
-            
+
             if record_node.tag != xmlaugmentsparser.ELEMENT_RECORD:
                 continue
-    
+
             uid = record_node.find(xmlaugmentsparser.ELEMENT_UID).text
             if uid in recordMap:
                 # Modify record
@@ -400,13 +407,14 @@
         if changed:
             writeXML(xmlfile, augments_node)
 
+
     def removeAugmentRecords(self, uids):
         """
         Remove AugmentRecords with the specified UIDs.
 
         @param uids: list of uids to remove
         @type uids: C{list} of C{str}
-        
+
         @return: L{Deferred}
         """
 
@@ -423,10 +431,11 @@
 
         return succeed(None)
 
+
     def _doRemoveFromFile(self, xmlfile, uids):
-    
+
         _ignore_etree, augments_node = readXML(xmlfile)
-    
+
         # Remove all UIDs present
         changed = False
         for child in augments_node:
@@ -440,20 +449,19 @@
         # Modify xmlfile
         if changed:
             writeXML(xmlfile, augments_node)
-        
-        
+
+
     def _addRecordToXMLDB(self, record, parentNode):
         record_node = addSubElement(parentNode, xmlaugmentsparser.ELEMENT_RECORD)
         self._updateRecordInXMLDB(record, record_node)
 
+
     def _updateRecordInXMLDB(self, record, recordNode):
         del recordNode[:]
         addSubElement(recordNode, xmlaugmentsparser.ELEMENT_UID, record.uid)
         addSubElement(recordNode, xmlaugmentsparser.ELEMENT_ENABLE, "true" if record.enabled else "false")
         if record.serverID:
             addSubElement(recordNode, xmlaugmentsparser.ELEMENT_SERVERID, record.serverID)
-        if record.partitionID:
-            addSubElement(recordNode, xmlaugmentsparser.ELEMENT_PARTITIONID, record.partitionID)
         addSubElement(recordNode, xmlaugmentsparser.ELEMENT_ENABLECALENDAR, "true" if record.enabledForCalendaring else "false")
         addSubElement(recordNode, xmlaugmentsparser.ELEMENT_ENABLEADDRESSBOOK, "true" if record.enabledForAddressBooks else "false")
         addSubElement(recordNode, xmlaugmentsparser.ELEMENT_ENABLELOGIN, "true" if record.enabledForLogin else "false")
@@ -463,6 +471,7 @@
         if record.autoAcceptGroup:
             addSubElement(recordNode, xmlaugmentsparser.ELEMENT_AUTOACCEPTGROUP, record.autoAcceptGroup)
 
+
     def refresh(self):
         """
         Refresh any cached data.
@@ -479,6 +488,7 @@
 
         return succeed(None)
 
+
     def clean(self):
         """
         Remove all records.
@@ -487,6 +497,7 @@
         self.removeAugmentRecords(self.db.keys())
         return succeed(None)
 
+
     def _shouldReparse(self, xmlFiles):
         """
         Check to see whether any of the given files have been modified since
@@ -501,6 +512,7 @@
                     return True
         return False
 
+
     def _parseXML(self):
         """
         Parse self.xmlFiles into AugmentRecords.
@@ -536,19 +548,22 @@
 
         return results
 
+
+
 class AugmentADAPI(AugmentDB, AbstractADBAPIDatabase):
     """
     DBAPI based augment database implementation.
     """
 
-    schema_version = "2"
-    schema_type    = "AugmentDB"
-    
+    schema_version = "3"
+    schema_type = "AugmentDB"
+
     def __init__(self, dbID, dbapiName, dbapiArgs, **kwargs):
-        
+
         AugmentDB.__init__(self)
         AbstractADBAPIDatabase.__init__(self, dbID, dbapiName, dbapiArgs, True, **kwargs)
-        
+
+
     @inlineCallbacks
     def getAllUIDs(self):
         """
@@ -556,11 +571,12 @@
 
         @return: L{Deferred}
         """
-        
+
         # Query for the record information
         results = (yield self.queryList("select UID from AUGMENTS", ()))
         returnValue(results)
 
+
     @inlineCallbacks
     def _lookupAugmentRecord(self, uid):
         """
@@ -571,34 +587,34 @@
 
         @return: L{Deferred}
         """
-        
+
         # Query for the record information
-        results = (yield self.query("select UID, ENABLED, SERVERID, PARTITIONID, CALENDARING, ADDRESSBOOKS, AUTOSCHEDULE, AUTOSCHEDULEMODE, AUTOACCEPTGROUP, LOGINENABLED from AUGMENTS where UID = :1", (uid,)))
+        results = (yield self.query("select UID, ENABLED, SERVERID, CALENDARING, ADDRESSBOOKS, AUTOSCHEDULE, AUTOSCHEDULEMODE, AUTOACCEPTGROUP, LOGINENABLED from AUGMENTS where UID = :1", (uid,)))
         if not results:
             returnValue(None)
         else:
-            uid, enabled, serverid, partitionid, enabledForCalendaring, enabledForAddressBooks, autoSchedule, autoScheduleMode, autoAcceptGroup, enabledForLogin = results[0]
-            
+            uid, enabled, serverid, enabledForCalendaring, enabledForAddressBooks, autoSchedule, autoScheduleMode, autoAcceptGroup, enabledForLogin = results[0]
+
             record = AugmentRecord(
-                uid = uid,
-                enabled = enabled == "T",
-                serverID = serverid,
-                partitionID = partitionid,
-                enabledForCalendaring = enabledForCalendaring == "T",
-                enabledForAddressBooks = enabledForAddressBooks == "T",
-                enabledForLogin = enabledForLogin == "T",
-                autoSchedule = autoSchedule == "T",
-                autoScheduleMode = autoScheduleMode,
-                autoAcceptGroup = autoAcceptGroup,
+                uid=uid,
+                enabled=enabled == "T",
+                serverID=serverid,
+                enabledForCalendaring=enabledForCalendaring == "T",
+                enabledForAddressBooks=enabledForAddressBooks == "T",
+                enabledForLogin=enabledForLogin == "T",
+                autoSchedule=autoSchedule == "T",
+                autoScheduleMode=autoScheduleMode,
+                autoAcceptGroup=autoAcceptGroup,
             )
-            
+
             returnValue(record)
 
+
     @inlineCallbacks
     def addAugmentRecords(self, records):
 
         for record in records:
-            
+
             results = (yield self.query("select UID from AUGMENTS where UID = :1", (record.uid,)))
             update = len(results) > 0
 
@@ -607,6 +623,7 @@
             else:
                 yield self._addRecord(record)
 
+
     @inlineCallbacks
     def removeAugmentRecords(self, uids):
         """
@@ -614,32 +631,36 @@
 
         @param uids: list of uids to remove
         @type uids: C{list} of C{str}
-        
+
         @return: L{Deferred}
         """
 
         for uid in uids:
             yield self.execute("delete from AUGMENTS where UID = :1", (uid,))
 
+
     def clean(self):
         """
         Remove all records.
         """
 
         return self.execute("delete from AUGMENTS", ())
-        
+
+
     def _db_version(self):
         """
         @return: the schema version assigned to this index.
         """
         return AugmentADAPI.schema_version
-        
+
+
     def _db_type(self):
         """
         @return: the collection type assigned to this index.
         """
         return AugmentADAPI.schema_type
-    
+
+
     @inlineCallbacks
     def _db_init_data_tables(self):
         """
@@ -652,45 +673,47 @@
         yield self._create_table(
             "AUGMENTS",
             (
-                ("UID",              "text unique"),
-                ("ENABLED",          "text(1)"),
-                ("SERVERID",         "text"),
-                ("PARTITIONID",      "text"),
-                ("CALENDARING",      "text(1)"),
-                ("ADDRESSBOOKS",     "text(1)"),
-                ("AUTOSCHEDULE",     "text(1)"),
+                ("UID", "text unique"),
+                ("ENABLED", "text(1)"),
+                ("SERVERID", "text"),
+                ("CALENDARING", "text(1)"),
+                ("ADDRESSBOOKS", "text(1)"),
+                ("AUTOSCHEDULE", "text(1)"),
                 ("AUTOSCHEDULEMODE", "text"),
-                ("AUTOACCEPTGROUP",  "text"),
-                ("LOGINENABLED",     "text(1)"),
+                ("AUTOACCEPTGROUP", "text"),
+                ("LOGINENABLED", "text(1)"),
             ),
             ifnotexists=True,
         )
 
+
     @inlineCallbacks
     def _db_empty_data_tables(self):
         yield self._db_execute("delete from AUGMENTS")
 
+
+
 class AugmentSqliteDB(ADBAPISqliteMixin, AugmentADAPI):
     """
     Sqlite based augment database implementation.
     """
 
     def __init__(self, dbpath):
-        
+
         ADBAPISqliteMixin.__init__(self)
         AugmentADAPI.__init__(self, "Augments", "sqlite3", (fullServerPath(config.DataRoot, dbpath),))
 
+
     @inlineCallbacks
     def _addRecord(self, record):
         yield self.execute(
             """insert or replace into AUGMENTS
-            (UID, ENABLED, SERVERID, PARTITIONID, CALENDARING, ADDRESSBOOKS, AUTOSCHEDULE, AUTOSCHEDULEMODE, AUTOACCEPTGROUP, LOGINENABLED)
-            values (:1, :2, :3, :4, :5, :6, :7, :8, :9, :10)""",
+            (UID, ENABLED, SERVERID, CALENDARING, ADDRESSBOOKS, AUTOSCHEDULE, AUTOSCHEDULEMODE, AUTOACCEPTGROUP, LOGINENABLED)
+            values (:1, :2, :3, :4, :5, :6, :7, :8, :9)""",
             (
                 record.uid,
                 "T" if record.enabled else "F",
                 record.serverID,
-                record.partitionID,
                 "T" if record.enabledForCalendaring else "F",
                 "T" if record.enabledForAddressBooks else "F",
                 "T" if record.autoSchedule else "F",
@@ -700,30 +723,33 @@
             )
         )
 
+
     def _modifyRecord(self, record):
         return self._addRecord(record)
 
+
+
 class AugmentPostgreSQLDB(ADBAPIPostgreSQLMixin, AugmentADAPI):
     """
     PostgreSQL based augment database implementation.
     """
 
     def __init__(self, host, database, user=None, password=None):
-        
+
         ADBAPIPostgreSQLMixin.__init__(self)
         AugmentADAPI.__init__(self, "Augments", "pgdb", (), host=host, database=database, user=user, password=password,)
 
+
     @inlineCallbacks
     def _addRecord(self, record):
         yield self.execute(
             """insert into AUGMENTS
-            (UID, ENABLED, SERVERID, PARTITIONID, CALENDARING, ADDRESSBOOKS, AUTOSCHEDULE, AUTOSCHEDULEMODE, AUTOACCEPTGROUP, LOGINENABLED)
-            values (:1, :2, :3, :4, :5, :6, :7, :8, :9, :10)""",
+            (UID, ENABLED, SERVERID, CALENDARING, ADDRESSBOOKS, AUTOSCHEDULE, AUTOSCHEDULEMODE, AUTOACCEPTGROUP, LOGINENABLED)
+            values (:1, :2, :3, :4, :5, :6, :7, :8, :9)""",
             (
                 record.uid,
                 "T" if record.enabled else "F",
                 record.serverID,
-                record.partitionID,
                 "T" if record.enabledForCalendaring else "F",
                 "T" if record.enabledForAddressBooks else "F",
                 "T" if record.autoSchedule else "F",
@@ -733,17 +759,17 @@
             )
         )
 
+
     @inlineCallbacks
     def _modifyRecord(self, record):
         yield self.execute(
             """update AUGMENTS set
-            (UID, ENABLED, SERVERID, PARTITIONID, CALENDARING, ADDRESSBOOKS, AUTOSCHEDULE, AUTOSCHEDULEMODE, AUTOACCEPTGROUP, LOGINENABLED) =
-            (:1, :2, :3, :4, :5, :6, :7, :8, :9, :10) where UID = :11""",
+            (UID, ENABLED, SERVERID, CALENDARING, ADDRESSBOOKS, AUTOSCHEDULE, AUTOSCHEDULEMODE, AUTOACCEPTGROUP, LOGINENABLED) =
+            (:1, :2, :3, :4, :5, :6, :7, :8, :9) where UID = :10""",
             (
                 record.uid,
                 "T" if record.enabled else "F",
                 record.serverID,
-                record.partitionID,
                 "T" if record.enabledForCalendaring else "F",
                 "T" if record.enabledForAddressBooks else "F",
                 "T" if record.autoSchedule else "F",

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/common.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/common.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/common.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -42,7 +42,7 @@
     Common ancestor for addressbook/calendar UID provisioning resources.
 
     Must be mixed in to the hierarchy I{before} the appropriate resource type.
-    
+
     @ivar homeResourceTypeName: The name of the home resource type ('calendars'
         or 'addressbooks').
 
@@ -78,13 +78,11 @@
 
         assert len(name) > 4, "Directory record has an invalid GUID: %r" % (
             name,)
-        
-        if record.locallyHosted():
+
+        if record.thisServer():
             child = yield self.homeResourceCreator(record, transaction)
-        elif record.thisServer():
+        else:
             child = DirectoryReverseProxyResource(self, record)
-        else:
-            child = None # Use a redirect?
 
         returnValue(child)
 
@@ -108,6 +106,7 @@
         # Not a listable collection
         raise HTTPError(responsecode.FORBIDDEN)
 
+
     ##
     # ACL
     ##
@@ -115,12 +114,15 @@
     def principalCollections(self):
         return self.parent.principalCollections()
 
+
     def principalForRecord(self, record):
         return self.parent.principalForRecord(record)
+
+
     ##
     # DAV
     ##
-    
+
     def isCollection(self):
         return True
 
@@ -129,9 +131,11 @@
         raise NotImplementedError(self.__class__.__name__ +
                                   ".getChild no longer exists.")
 
+
     def displayName(self):
         return uidsResourceName
 
+
     def url(self):
         return joinURL(self.parent.url(), uidsResourceName)
 
@@ -153,4 +157,3 @@
 
         child = yield self._parent.homeForDirectoryRecord(record, request)
         returnValue((child, segments[1:]))
-

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/directory-principal-resource.html
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/directory-principal-resource.html	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/directory-principal-resource.html	2013-11-08 03:33:05 UTC (rev 11912)
@@ -5,8 +5,7 @@
 ---------------------
 Directory GUID: <t:slot name="directoryGUID"/>
 Realm: <t:slot name="realm"/>
-<t:transparent t:render="serversEnabled">Hosted-At: <t:slot name="hostedAt"/>
-Partition: <t:slot name="partition"/></t:transparent>
+<t:transparent t:render="serversEnabled">Hosted-At: <t:slot name="hostedAt"/></t:transparent>
 Principal Information
 ---------------------
 GUID: <t:slot name="principalGUID"/>

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/directory.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/directory.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/directory.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -89,8 +89,8 @@
 
     searchContext_location = "location"
     searchContext_resource = "resource"
-    searchContext_user     = "user"
-    searchContext_group    = "group"
+    searchContext_user = "user"
+    searchContext_group = "group"
     searchContext_attendee = "attendee"
 
     aggregateService = None
@@ -533,10 +533,11 @@
         )
         for record in resources:
             guid = record.guid
-            assignments.append(("%s#calendar-proxy-write" % (guid,),
-                               record.externalProxies()))
-            assignments.append(("%s#calendar-proxy-read" % (guid,),
-                               record.externalReadOnlyProxies()))
+            if record.enabledForCalendaring:
+                assignments.append(("%s#calendar-proxy-write" % (guid,),
+                                   record.externalProxies()))
+                assignments.append(("%s#calendar-proxy-read" % (guid,),
+                                   record.externalReadOnlyProxies()))
 
         return assignments
 
@@ -627,6 +628,7 @@
         self.expireSeconds = expireSeconds
         self.lockSeconds = lockSeconds
 
+
     def setGroupsFor(self, guid, memberships):
         self.log.debug("set groups-for %s : %s" % (guid, memberships))
         return self.set("groups-for:%s" %
@@ -674,7 +676,6 @@
         return self.add("group-cacher-lock", "1", expireTime=self.lockSeconds)
 
 
-
     def extendLock(self):
         """
         Update the expiration time of the memcached lock
@@ -693,6 +694,7 @@
         return self.delete("group-cacher-lock")
 
 
+
 class GroupMembershipCacheUpdater(object):
     """
     Responsible for updating memcached with group memberships.  This will run
@@ -1129,7 +1131,7 @@
     implements(IDirectoryRecord, ICalendarStoreDirectoryRecord)
 
     def __repr__(self):
-        return "<%s[%s@%s(%s)] %s(%s) %r @ %s/#%s>" % (
+        return "<%s[%s@%s(%s)] %s(%s) %r @ %s>" % (
             self.__class__.__name__,
             self.recordType,
             self.service.guid,
@@ -1138,7 +1140,6 @@
             ",".join(self.shortNames),
             self.fullName,
             self.serverURI(),
-            self.partitionID,
         )
 
 
@@ -1174,7 +1175,6 @@
         self.uid = uid
         self.enabled = False
         self.serverID = ""
-        self.partitionID = ""
         self.shortNames = shortNames
         self.authIDs = authIDs
         self.fullName = fullName
@@ -1256,7 +1256,6 @@
         if augment:
             self.enabled = augment.enabled
             self.serverID = augment.serverID
-            self.partitionID = augment.partitionID
             self.enabledForCalendaring = augment.enabledForCalendaring
             self.enabledForAddressBooks = augment.enabledForAddressBooks
             self.autoSchedule = augment.autoSchedule
@@ -1277,7 +1276,6 @@
             # Groups are by default always enabled
             self.enabled = (self.recordType == self.service.recordType_groups)
             self.serverID = ""
-            self.partitionID = ""
             self.enabledForCalendaring = False
             self.enabledForAddressBooks = False
             self.enabledForLogin = False
@@ -1495,46 +1493,9 @@
             return None
 
 
-    def partitionURI(self):
-        """
-        URL of the server hosting this record. Return None if hosted on this server.
-        """
-        if config.Servers.Enabled and self.serverID:
-            s = Servers.getServerById(self.serverID)
-            if s:
-                return s.getPartitionURIForId(self.partitionID)
-        return None
-
-
-    def locallyHosted(self):
-        """
-        Hosted on this server/partition instance.
-        """
-
-        if config.Servers.Enabled and self.serverID:
-            s = Servers.getServerById(self.serverID)
-            if s:
-                return s.thisServer and (not s.isPartitioned() or not self.partitionID or self.partitionID == config.ServerPartitionID)
-        return True
-
-
-    def effectivePartitionID(self):
-        """
-        Record partition ID taking into account whether the server is partitioned.
-        """
-        if config.Servers.Enabled and self.serverID:
-            s = Servers.getServerById(self.serverID)
-            if s and s.isPartitioned():
-                return self.partitionID
-        return ""
-
-
     def thisServer(self):
-        if config.Servers.Enabled and self.serverID:
-            s = Servers.getServerById(self.serverID)
-            if s:
-                return s.thisServer
-        return True
+        s = self.server()
+        return s.thisServer if s is not None else True
 
 
     def autoAcceptMembers(self):

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/idirectory.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/idirectory.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/idirectory.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -32,20 +32,20 @@
     realmName = Attribute("The name of the authentication realm this service represents.")
     guid = Attribute("A GUID for this service.")
 
-    def recordTypes():
+    def recordTypes(): #@NoSelf
         """
         @return: a sequence of strings denoting the record types that
             are kept in the directory.  For example: C{["users",
             "groups", "resources"]}.
         """
 
-    def listRecords(recordType):
+    def listRecords(recordType): #@NoSelf
         """
         @param type: the type of records to retrieve.
         @return: an iterable of records of the given type.
         """
 
-    def recordWithShortName(recordType, shortName):
+    def recordWithShortName(recordType, shortName): #@NoSelf
         """
         @param recordType: the type of the record to look up.
         @param shortName: the short name of the record to look up.
@@ -53,21 +53,21 @@
             C{None} if no such record exists.
         """
 
-    def recordWithUID(uid):
+    def recordWithUID(uid): #@NoSelf
         """
         @param uid: the UID of the record to look up.
         @return: an L{IDirectoryRecord} with the given UID, or C{None}
             if no such record exists.
         """
 
-    def recordWithGUID(guid):
+    def recordWithGUID(guid): #@NoSelf
         """
         @param guid: the GUID of the record to look up.
         @return: an L{IDirectoryRecord} with the given GUID, or
             C{None} if no such record exists.
         """
 
-    def recordWithCalendarUserAddress(address):
+    def recordWithCalendarUserAddress(address): #@NoSelf
         """
         @param address: the calendar user address of the record to look up.
         @type address: C{str}
@@ -81,7 +81,7 @@
             directory service may not be aware of these addresses.
         """
 
-    def recordWithCachedGroupsAlias(recordType, alias):
+    def recordWithCachedGroupsAlias(recordType, alias): #@NoSelf
         """
         @param recordType: the type of the record to look up.
         @param alias: the cached-groups alias of the record to look up.
@@ -91,14 +91,13 @@
             alias, or C{None} if no such record is found.
         """
 
-
-    def recordsMatchingFields(fields):
+    def recordsMatchingFields(fields): #@NoSelf
         """
         @return: a deferred sequence of L{IDirectoryRecord}s which
             match the given fields.
         """
 
-    def recordsMatchingTokens(tokens, context=None):
+    def recordsMatchingTokens(tokens, context=None): #@NoSelf
         """
         @param tokens: The tokens to search on
         @type tokens: C{list} of C{str} (utf-8 bytes)
@@ -119,31 +118,31 @@
             "attendee", only users, groups, and resources are considered.
         """
 
-
-    def setRealm(realmName):
+    def setRealm(realmName): #@NoSelf
         """
         Set a new realm name for this (and nested services if any)
 
         @param realmName: the realm name this service should use.
         """
 
+
+
 class IDirectoryRecord(Interface):
     """
     Directory Record
     """
-    service               = Attribute("The L{IDirectoryService} this record exists in.")
-    recordType            = Attribute("The type of this record.")
-    guid                  = Attribute("The GUID of this record.")
-    uid                   = Attribute("The UID of this record.")
-    enabled               = Attribute("Determines whether this record should allow a principal to be created.")
-    serverID              = Attribute("Identifies the server that actually hosts data for the record.")
-    partitionID           = Attribute("Identifies the partition node that actually hosts data for the record.")
-    shortNames            = Attribute("The names for this record.")
-    authIDs               = Attribute("Alternative security identities for this record.")
-    fullName              = Attribute("The full name of this record.")
-    firstName             = Attribute("The first name of this record.")
-    lastName              = Attribute("The last name of this record.")
-    emailAddresses        = Attribute("The email addresses of this record.")
+    service = Attribute("The L{IDirectoryService} this record exists in.")
+    recordType = Attribute("The type of this record.")
+    guid = Attribute("The GUID of this record.")
+    uid = Attribute("The UID of this record.")
+    enabled = Attribute("Determines whether this record should allow a principal to be created.")
+    serverID = Attribute("Identifies the server that actually hosts data for the record.")
+    shortNames = Attribute("The names for this record.")
+    authIDs = Attribute("Alternative security identities for this record.")
+    fullName = Attribute("The full name of this record.")
+    firstName = Attribute("The first name of this record.")
+    lastName = Attribute("The last name of this record.")
+    emailAddresses = Attribute("The email addresses of this record.")
     enabledForCalendaring = Attribute("Determines whether this record creates a principal with a calendar home.")
     enabledForAddressBooks = Attribute("Determines whether this record creates a principal with an address book home.")
     calendarUserAddresses = Attribute(
@@ -159,19 +158,19 @@
         """
     )
 
-    def members():
+    def members(): #@NoSelf
         """
         @return: an iterable of L{IDirectoryRecord}s for the members of this
             (group) record.
         """
 
-    def groups():
+    def groups(): #@NoSelf
         """
         @return: an iterable of L{IDirectoryRecord}s for the groups this
             record is a member of.
         """
 
-    def verifyCredentials(credentials):
+    def verifyCredentials(credentials): #@NoSelf
         """
         Verify that the given credentials can authenticate the principal
         represented by this record.

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/ldapdirectory.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/ldapdirectory.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/ldapdirectory.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -75,12 +75,13 @@
         return "<%s %r: %r>" % (self.__class__.__name__, self.realmName,
             self.uri)
 
+
     def __init__(self, params):
         """
         @param params: a dictionary containing the following keys:
             cacheTimeout, realmName, uri, tls, tlsCACertFile, tlsCACertDir,
             tlsRequireCert, credentials, rdnSchema, groupSchema, resourceSchema
-            partitionSchema
+            poddingSchema
         """
 
         defaults = {
@@ -188,9 +189,8 @@
                 "readOnlyProxyAttr": None, # list of GUIDs
                 "autoAcceptGroupAttr": None, # single group GUID
             },
-            "partitionSchema": {
+            "poddingSchema": {
                 "serverIdAttr": None, # maps to augments server-id
-                "partitionIdAttr": None, # maps to augments partition-id
             },
         }
         ignored = None
@@ -222,7 +222,7 @@
         self.rdnSchema = params["rdnSchema"]
         self.groupSchema = params["groupSchema"]
         self.resourceSchema = params["resourceSchema"]
-        self.partitionSchema = params["partitionSchema"]
+        self.poddingSchema = params["poddingSchema"]
 
         self.base = ldap.dn.str2dn(self.rdnSchema["base"])
 
@@ -272,10 +272,8 @@
             attrSet.add(self.resourceSchema["proxyAttr"])
         if self.resourceSchema["readOnlyProxyAttr"]:
             attrSet.add(self.resourceSchema["readOnlyProxyAttr"])
-        if self.partitionSchema["serverIdAttr"]:
-            attrSet.add(self.partitionSchema["serverIdAttr"])
-        if self.partitionSchema["partitionIdAttr"]:
-            attrSet.add(self.partitionSchema["partitionIdAttr"])
+        if self.poddingSchema["serverIdAttr"]:
+            attrSet.add(self.poddingSchema["serverIdAttr"])
         self.attrlist = list(attrSet)
 
         self.typeDNs = {}
@@ -284,10 +282,8 @@
                 self.rdnSchema[recordType]["rdn"].lower()
             ) + self.base
 
-
         self.ldap = None
 
-
         # Separate LDAP connection used solely for authenticating clients
         self.authLDAP = None
 
@@ -314,7 +310,7 @@
 
         # Query the LDAP server
         self.log.debug("Querying ldap for records matching base {base} and "
-            "filter {filter} for attributes {attrs}.", 
+            "filter {filter} for attributes {attrs}.",
             base=ldap.dn.dn2str(base), filter=filterstr, attrs=self.attrlist)
 
         # This takes a while, so if you don't want to have a "long request"
@@ -353,6 +349,7 @@
 
         return records
 
+
     @inlineCallbacks
     def recordWithCachedGroupsAlias(self, recordType, alias):
         """
@@ -373,6 +370,7 @@
         else:
             returnValue(None)
 
+
     def getExternalProxyAssignments(self):
         """
         Retrieve proxy assignments for locations and resources from the
@@ -391,6 +389,12 @@
 
         # Build filter
         filterstr = "(|(%s=*)(%s=*))" % (readAttr, writeAttr)
+        # ...taking into account only calendar-enabled records
+        enabledAttr = self.rdnSchema["locations"]["calendarEnabledAttr"]
+        enabledValue = self.rdnSchema["locations"]["calendarEnabledValue"]
+        if enabledAttr and enabledValue:
+            filterstr = "(&(%s=%s)%s)" % (enabledAttr, enabledValue, filterstr)
+
         attrlist = [guidAttr, readAttr, writeAttr]
 
         # Query the LDAP server
@@ -419,6 +423,7 @@
 
         return assignments
 
+
     def getLDAPConnection(self):
         if self.ldap is None:
             self.log.info("Connecting to LDAP {uri}", uri=repr(self.uri))
@@ -437,6 +442,7 @@
                     raise DirectoryConfigurationError()
         return self.ldap
 
+
     def createLDAPConnection(self):
         """
         Create and configure LDAP connection
@@ -472,7 +478,7 @@
         """
         TRIES = 3
 
-        for i in xrange(TRIES):
+        for _ignore_i in xrange(TRIES):
             self.log.debug("Authenticating %s" % (dn,))
 
             if self.authLDAP is None:
@@ -549,11 +555,11 @@
                 self.log.warn("LDAP timeout exceeded: %d seconds" % (timeoutSeconds,))
             except ldap.SERVER_DOWN:
                 self.ldap = None
-                self.log.error("LDAP server unavailable (tried %d times)" % (i+1,))
+                self.log.error("LDAP server unavailable (tried %d times)" % (i + 1,))
                 continue
 
             # change format, ignoring resultsType
-            result = [resultItem for resultType, resultItem in s.allResults]
+            result = [resultItem for _ignore_resultType, resultItem in s.allResults]
 
             totalTime = time.time() - startTime
             if totalTime > self.warningThresholdSeconds:
@@ -763,7 +769,6 @@
         """
 
         guid = None
-        shortNames = ()
         authIDs = set()
         fullName = None
         firstName = ""
@@ -885,30 +890,27 @@
                     autoAcceptGroup = self._getUniqueLdapAttribute(attrs,
                         self.resourceSchema["autoAcceptGroupAttr"])
 
-        serverID = partitionID = None
-        if self.partitionSchema["serverIdAttr"]:
+        serverID = None
+        if self.poddingSchema["serverIdAttr"]:
             serverID = self._getUniqueLdapAttribute(attrs,
-                self.partitionSchema["serverIdAttr"])
-        if self.partitionSchema["partitionIdAttr"]:
-            partitionID = self._getUniqueLdapAttribute(attrs,
-                self.partitionSchema["partitionIdAttr"])
+                self.poddingSchema["serverIdAttr"])
 
         record = LdapDirectoryRecord(
-            service                 = self,
-            recordType              = recordType,
-            guid                    = guid,
-            shortNames              = shortNames,
-            authIDs                 = authIDs,
-            fullName                = fullName,
-            firstName               = firstName,
-            lastName                = lastName,
-            emailAddresses          = emailAddresses,
-            uid                     = uid,
-            dn                      = dn,
-            memberGUIDs             = memberGUIDs,
-            extProxies              = proxyGUIDs,
-            extReadOnlyProxies      = readOnlyProxyGUIDs,
-            attrs                   = attrs,
+            service=self,
+            recordType=recordType,
+            guid=guid,
+            shortNames=shortNames,
+            authIDs=authIDs,
+            fullName=fullName,
+            firstName=firstName,
+            lastName=lastName,
+            emailAddresses=emailAddresses,
+            uid=uid,
+            dn=dn,
+            memberGUIDs=memberGUIDs,
+            extProxies=proxyGUIDs,
+            extReadOnlyProxies=readOnlyProxyGUIDs,
+            attrs=attrs,
         )
 
         if self.augmentService is not None:
@@ -918,7 +920,7 @@
             # immediately.
             d = self.augmentService.getAugmentRecord(record.guid,
                 recordType)
-            d.addCallback(lambda x:record.addAugmentInformation(x))
+            d.addCallback(lambda x: record.addAugmentInformation(x))
 
         else:
             # Generate augment record based on information retrieved from LDAP
@@ -926,7 +928,6 @@
                 guid,
                 enabled=True,
                 serverID=serverID,
-                partitionID=partitionID,
                 enabledForCalendaring=enabledForCalendaring,
                 autoSchedule=autoSchedule,
                 autoAcceptGroup=autoAcceptGroup,
@@ -959,7 +960,7 @@
         matching the indexType and indexKey parameters.
 
         recordTypes is a list of record types to limit the search to.
-        indexType specifies one of the CachingDirectoryService contstants
+        indexType specifies one of the CachingDirectoryService constants
             identifying which attribute to search on.
         indexKey is the value to search for.
 
@@ -988,7 +989,8 @@
                 # Query on guid only works if guid attribute has been defined.
                 # Support for query on guid even if is auto-generated should
                 # be added.
-                if not guidAttr: return
+                if not guidAttr:
+                    return
                 filterstr = "(&%s(%s=%s))" % (filterstr, guidAttr, indexKey)
 
             elif indexType == self.INDEX_TYPE_SHORTNAME:
@@ -1046,7 +1048,7 @@
 
                 try:
                     record = self._ldapResultToRecord(dn, attrs, recordType)
-                    self.log.debug("Got LDAP record %s" % (record,))
+                    self.log.debug("Got LDAP record {rec}", rec=record)
 
                     if not unrestricted:
                         self.log.debug("%s is not enabled because it's not a member of group: %s" % (dn, self.restrictToGroup))
@@ -1151,7 +1153,6 @@
 
                 self.log.debug("LDAP search returned %d results, %d usable" % (len(results), typeCounts[recordType]))
 
-
         typeCountsStr = ", ".join(["%s:%d" % (rt, ct) for (rt, ct) in typeCounts.iteritems()])
         totalTime = time.time() - startTime
         self.log.info("Calendar user search for %s matched %d records (%s) in %.2f seconds" % (tokens, len(records), typeCountsStr, totalTime))
@@ -1166,7 +1167,7 @@
         """
         records = []
 
-        self.log.debug("Peforming principal property search for %s" % (fields,))
+        self.log.debug("Performing principal property search for %s" % (fields,))
 
         if recordType is None:
             # Make a copy since we're modifying it
@@ -1318,6 +1319,7 @@
 
         returnValue(recordsByAlias.values())
 
+
     def recordTypeForDN(self, dnStr):
         """
         Examine a DN to determine which recordType it belongs to
@@ -1333,6 +1335,7 @@
         return None
 
 
+
 def dnContainedIn(child, parent):
     """
     Return True if child dn is contained within parent dn, otherwise False.
@@ -1340,6 +1343,7 @@
     return child[-len(parent):] == parent
 
 
+
 def normalizeDNstr(dnStr):
     """
     Convert to lowercase and remove extra whitespace
@@ -1350,6 +1354,7 @@
     return ' '.join(ldap.dn.dn2str(ldap.dn.str2dn(dnStr.lower())).split())
 
 
+
 def _convertValue(value, matchType):
     if matchType == "starts-with":
         value = "%s*" % (ldapEsc(value),)
@@ -1360,6 +1365,8 @@
         value = ldapEsc(value)
     return value
 
+
+
 def buildFilter(recordType, mapping, fields, operand="or", optimizeMultiName=False):
     """
     Create an LDAP filter string from a list of tuples representing directory
@@ -1397,8 +1404,8 @@
                 # try the various firstName/lastName permutations:
                 if recordType == "users":
                     converted = []
-                    for firstName, firstCaseless, firstMatchType in combined["firstName"]:
-                        for lastName, lastCaseless, lastMatchType in combined["lastName"]:
+                    for firstName, _ignore_firstCaseless, firstMatchType in combined["firstName"]:
+                        for lastName, _ignore_lastCaseless, lastMatchType in combined["lastName"]:
                             if firstName != lastName:
                                 firstValue = _convertValue(firstName, firstMatchType)
                                 lastValue = _convertValue(lastName, lastMatchType)
@@ -1421,7 +1428,7 @@
         # name, guid)
         additional = []
         for key in ("recordName", "guid"):
-            if mapping.has_key(key):
+            if key in mapping:
                 additional.append("(%s=*)" % (mapping.get(key),))
         if additional:
             filterstr = "(&%s%s)" % ("".join(additional), filterstr)
@@ -1429,6 +1436,7 @@
     return filterstr
 
 
+
 def buildFilterFromTokens(recordType, mapping, tokens, extra=None):
     """
     Create an LDAP filter string from a list of query tokens.  Each token is
@@ -1491,6 +1499,7 @@
     return filterStr
 
 
+
 class LdapDirectoryRecord(CachingDirectoryRecord):
     """
     LDAP implementation of L{IDirectoryRecord}.
@@ -1503,18 +1512,18 @@
         attrs
     ):
         super(LdapDirectoryRecord, self).__init__(
-            service               = service,
-            recordType            = recordType,
-            guid                  = guid,
-            shortNames            = shortNames,
-            authIDs               = authIDs,
-            fullName              = fullName,
-            firstName             = firstName,
-            lastName              = lastName,
-            emailAddresses        = emailAddresses,
-            extProxies            = extProxies,
-            extReadOnlyProxies    = extReadOnlyProxies,
-            uid                   = uid,
+            service=service,
+            recordType=recordType,
+            guid=guid,
+            shortNames=shortNames,
+            authIDs=authIDs,
+            fullName=fullName,
+            firstName=firstName,
+            lastName=lastName,
+            emailAddresses=emailAddresses,
+            extProxies=extProxies,
+            extReadOnlyProxies=extReadOnlyProxies,
+            uid=uid,
         )
 
         # Save attributes of dn and attrs in case you might need them later
@@ -1542,6 +1551,7 @@
             self._members_storage = self._members()
             return self._members_storage
 
+
     def _members(self):
         """ Fault in records for the members of this group """
 
@@ -1572,7 +1582,7 @@
 
                 dn, attrs = result.pop()
                 dn = normalizeDNstr(dn)
-                self.log.debug("Retrieved: %s %s" % (dn,attrs))
+                self.log.debug("Retrieved: %s %s" % (dn, attrs))
                 recordType = self.service.recordTypeForDN(dn)
                 if recordType is None:
                     self.log.error("Unable to map %s to a record type" % (dn,))
@@ -1589,6 +1599,7 @@
 
         return results
 
+
     def groups(self):
         """ Return the records representing groups this record is a member of """
         try:
@@ -1597,6 +1608,7 @@
             self._groups_storage = self._groups()
             return self._groups_storage
 
+
     def _groups(self):
         """ Fault in the groups of which this record is a member """
 
@@ -1612,7 +1624,7 @@
         if len(membersAttrs) == 1:
             filterstr = "(%s=%s)" % (membersAttrs[0], self._memberId)
         else:
-            filterstr = "(|%s)" % ( "".join(
+            filterstr = "(|%s)" % ("".join(
                     ["(%s=%s)" % (a, self._memberId) for a in membersAttrs]
                 ),
             )
@@ -1635,6 +1647,7 @@
 
         return groups
 
+
     def cachedGroupsAlias(self):
         """
         See directory.py for full description
@@ -1644,6 +1657,7 @@
         """
         return self._memberId
 
+
     def memberGUIDs(self):
         return set(self._memberGUIDs)
 
@@ -1711,10 +1725,13 @@
         return super(LdapDirectoryRecord, self).verifyCredentials(credentials)
 
 
+
 class MissingRecordNameException(Exception):
     """ Raised when LDAP record is missing recordName """
     pass
 
+
+
 class MissingGuidException(Exception):
     """ Raised when LDAP record is missing guidAttr and it's required """
     pass

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/principal.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/principal.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/principal.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -601,7 +601,6 @@
         record = self.resource.record
         return tag.fillSlots(
             hostedAt=str(record.serverURI()),
-            partition=str(record.effectivePartitionID()),
         )
 
 
@@ -1066,14 +1065,6 @@
         return self.record.server()
 
 
-    def partitionURI(self):
-        return self.record.partitionURI()
-
-
-    def locallyHosted(self):
-        return self.record.locallyHosted()
-
-
     def thisServer(self):
         return self.record.thisServer()
 

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/resource.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/resource.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/resource.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -25,14 +25,13 @@
 __all__ = ["DirectoryReverseProxyResource"]
 
 class DirectoryReverseProxyResource(ReverseProxyResource):
-    
+
     def __init__(self, parent, record):
         self.parent = parent
         self.record = record
-        
-        super(DirectoryReverseProxyResource, self).__init__(self.record.partitionID)
-    
-    def url(self):
-        return joinURL(self.parent.url(), self.record.uid)
 
+        super(DirectoryReverseProxyResource, self).__init__(self.record.serverID)
 
+
+    def url(self):
+        return joinURL(self.parent.url(), self.record.uid)

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/augments-test-default.xml
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/augments-test-default.xml	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/augments-test-default.xml	2013-11-08 03:33:05 UTC (rev 11912)
@@ -24,40 +24,34 @@
     <enable>true</enable>
     <enable-calendar>true</enable-calendar>
     <enable-addressbook>true</enable-addressbook>
-    <partition-id>00001</partition-id>
   </record>
   <record>
     <uid>Location-Default</uid>
     <enable>true</enable>
     <enable-calendar>true</enable-calendar>
-    <partition-id>00004</partition-id>
     <auto-schedule>true</auto-schedule>
   </record>
   <record>
     <uid>Location-AA*</uid>
     <enable>true</enable>
     <enable-calendar>true</enable-calendar>
-    <partition-id>00005</partition-id>
     <auto-schedule>true</auto-schedule>
   </record>
   <record>
     <uid>Resource-Default</uid>
     <enable>true</enable>
     <enable-calendar>true</enable-calendar>
-    <partition-id>00006</partition-id>
     <auto-schedule>true</auto-schedule>
   </record>
   <record>
     <uid>Resource-AA*</uid>
     <enable>true</enable>
     <enable-calendar>true</enable-calendar>
-    <partition-id>00007</partition-id>
     <auto-schedule>true</auto-schedule>
   </record>
   <record>
     <uid>AA*</uid>
     <enable>true</enable>
-    <partition-id>00001</partition-id>
   </record>
   <record>
     <uid>AB*</uid>
@@ -66,14 +60,12 @@
   <record>
     <uid>B*</uid>
     <enable>true</enable>
-    <partition-id>00002</partition-id>
     <enable-calendar>true</enable-calendar>
     <enable-addressbook>true</enable-addressbook>
   </record>
   <record>
     <uid>C*</uid>
     <enable>true</enable>
-    <partition-id>00003</partition-id>
     <enable-calendar>true</enable-calendar>
     <enable-addressbook>true</enable-addressbook>
     <auto-schedule>true</auto-schedule>
@@ -102,17 +94,14 @@
   <record>
     <uid>5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1</uid>
     <enable>true</enable>
-    <partition-id>00001</partition-id>
   </record>
   <record>
     <uid>543D28BA-F74F-4D5F-9243-B3E3A61171E5</uid>
     <enable>true</enable>
-    <partition-id>00002</partition-id>
   </record>
   <record>
     <uid>6A73326A-F781-47E7-A9F8-AF47364D4152</uid>
     <enable>true</enable>
-    <partition-id>00002</partition-id>
     <enable-calendar>true</enable-calendar>
     <enable-addressbook>true</enable-addressbook>
     <auto-schedule>true</auto-schedule>

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/augments-test.xml
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/augments-test.xml	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/augments-test.xml	2013-11-08 03:33:05 UTC (rev 11912)
@@ -42,12 +42,10 @@
   <record>
     <uid>5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1</uid>
     <enable>true</enable>
-    <partition-id>00001</partition-id>
   </record>
   <record>
     <uid>543D28BA-F74F-4D5F-9243-B3E3A61171E5</uid>
     <enable>true</enable>
-    <partition-id>00002</partition-id>
   </record>
   <record>
     <uid>6A73326A-F781-47E7-A9F8-AF47364D4152</uid>

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/resources/caldavd.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/resources/caldavd.plist	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/resources/caldavd.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -175,7 +175,6 @@
         Augments for the directory service records to add calendar specific attributes.
 
         A variety of augment services are available for use.
-        When using a partitioned server, a service that can be accessed from each host will be needed.
       -->
 
     <!-- XML File Augment Service -->

Deleted: CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/sudoers.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/sudoers.plist	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/sudoers.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -1,28 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-  <dict>
-    <key>users</key>
-      <array>
-        <dict>
-          <key>authorize-as</key>
-          <dict>
-            <key>allow</key>
-            <true />
-            <key>principals</key>
-            <array>
-              <string>all</string>
-            </array>
-          </dict>
-          <key>authorize-from</key>
-          <array>
-            <string>127.0.0.1</string>
-          </array>
-          <key>password</key>
-          <string>alice</string>
-          <key>username</key>
-          <string>alice</string>
-        </dict>
-      </array>
-  </dict>
-</plist>

Deleted: CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/sudoers2.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/sudoers2.plist	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/sudoers2.plist	2013-11-08 03:33:05 UTC (rev 11912)
@@ -1,47 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-  <dict>
-    <key>users</key>
-      <array>
-        <dict>
-          <key>authorize-as</key>
-          <dict>
-            <key>allow</key>
-            <true />
-            <key>principals</key>
-            <array>
-              <string>all</string>
-            </array>
-          </dict>
-          <key>authorize-from</key>
-          <array>
-            <string>127.0.0.1</string>
-          </array>
-          <key>password</key>
-          <string>alice</string>
-          <key>username</key>
-          <string>alice</string>
-        </dict>
-        <dict>
-          <key>authorize-as</key>
-          <dict>
-            <key>allow</key>
-            <true />
-            <key>principals</key>
-            <array>
-              <string>all</string>
-            </array>
-          </dict>
-          <key>authorize-from</key>
-          <array>
-            <string>127.0.0.1</string>
-          </array>
-          <key>password</key>
-          <string>bob</string>
-          <key>username</key>
-          <string>bob</string>
-        </dict>
-      </array>
-  </dict>
-</plist>

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/test_augment.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/test_augment.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/test_augment.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -32,46 +32,46 @@
 xmlFileNormalization = os.path.join(os.path.dirname(__file__), "augments-normalization.xml")
 
 testRecords = (
-    {"uid": "D11F03A0-97EA-48AF-9A6C-FAC7F3975766", "enabled": True, "partitionID": "", "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
-    {"uid": "6423F94A-6B76-4A3A-815B-D52CFD77935D", "enabled": True, "partitionID": "", "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": False, "autoScheduleMode": "default"},
-    {"uid": "5A985493-EE2C-4665-94CF-4DFEA3A89500", "enabled": False, "partitionID": "", "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
-    {"uid": "8B4288F6-CC82-491D-8EF9-642EF4F3E7D0", "enabled": True, "partitionID": "", "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
-    {"uid": "5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1", "enabled": True, "partitionID": "00001", "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
-    {"uid": "543D28BA-F74F-4D5F-9243-B3E3A61171E5", "enabled": True, "partitionID": "00002", "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
-    {"uid": "6A73326A-F781-47E7-A9F8-AF47364D4152", "enabled": True, "partitionID": "00002", "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": True, "autoScheduleMode": "default"},
-    {"uid": "C5BAADEE-6B35-4FD5-A98A-5DF6BBAAC47A", "enabled": True, "partitionID": "", "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": True, "autoScheduleMode": "default"},
-    {"uid": "8AB34DF9-0297-4BA3-AADB-DB557DDD21E7", "enabled": True, "partitionID": "", "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": True, "autoScheduleMode": "accept-always"},
+    {"uid": "D11F03A0-97EA-48AF-9A6C-FAC7F3975766", "enabled": True, "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
+    {"uid": "6423F94A-6B76-4A3A-815B-D52CFD77935D", "enabled": True, "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": False, "autoScheduleMode": "default"},
+    {"uid": "5A985493-EE2C-4665-94CF-4DFEA3A89500", "enabled": False, "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
+    {"uid": "8B4288F6-CC82-491D-8EF9-642EF4F3E7D0", "enabled": True, "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
+    {"uid": "5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1", "enabled": True, "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
+    {"uid": "543D28BA-F74F-4D5F-9243-B3E3A61171E5", "enabled": True, "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
+    {"uid": "6A73326A-F781-47E7-A9F8-AF47364D4152", "enabled": True, "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": True, "autoScheduleMode": "default"},
+    {"uid": "C5BAADEE-6B35-4FD5-A98A-5DF6BBAAC47A", "enabled": True, "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": True, "autoScheduleMode": "default"},
+    {"uid": "8AB34DF9-0297-4BA3-AADB-DB557DDD21E7", "enabled": True, "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": True, "autoScheduleMode": "accept-always"},
     {"uid": "FC674703-8008-4A77-B80E-0DB55A9CE620", "enabledForLogin": False, }, # Explicitly false
     {"uid": "B473DC32-1B0D-45EE-9BAC-DA878AE9CE74", "enabledForLogin": True, }, # Explicitly True
     {"uid": "9F2B176D-B3F5-483A-AA63-0A1FC6E6D54B", "enabledForLogin": True, }, # Default is True
 )
 
 testRecordWildcardDefault = (
-    {"uid": "A4318887-F2C7-4A70-9056-B88CC8DB26F1", "enabled": True, "partitionID": "00001", "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": False, "autoScheduleMode": "default"},
-    {"uid": "AA5F935F-3358-4510-A649-B391D63279F2", "enabled": True, "partitionID": "00001", "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
-    {"uid": "ABF1A83B-1A29-4E04-BDC3-A6A66ECF27CA", "enabled": False, "partitionID": "", "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
-    {"uid": "BC22A734-5E41-4FB7-B5C1-51DC0656DC2F", "enabled": True, "partitionID": "00002", "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": False, "autoScheduleMode": "default"},
-    {"uid": "C6DEEBB1-E14A-47F2-98BA-7E3BB4353E3A", "enabled": True, "partitionID": "00003", "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": True, "autoScheduleMode": "accept-always"},
-    {"uid": "AA859321-2C72-4974-ADCF-0CBA0C76F95D", "enabled": True, "partitionID": "00001", "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
-    {"uid": "AB7C488B-9ED2-4265-881C-7E2E38A63584", "enabled": False, "partitionID": "", "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
-    {"uid": "BB0C0DA1-0545-45F6-8D08-917C554D93A4", "enabled": True, "partitionID": "00002", "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": False, "autoScheduleMode": "default"},
-    {"uid": "CCD30AD3-582F-4682-8B65-2EDE92C5656E", "enabled": True, "partitionID": "00003", "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": True, "autoScheduleMode": "accept-always"},
+    {"uid": "A4318887-F2C7-4A70-9056-B88CC8DB26F1", "enabled": True, "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": False, "autoScheduleMode": "default"},
+    {"uid": "AA5F935F-3358-4510-A649-B391D63279F2", "enabled": True, "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
+    {"uid": "ABF1A83B-1A29-4E04-BDC3-A6A66ECF27CA", "enabled": False, "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
+    {"uid": "BC22A734-5E41-4FB7-B5C1-51DC0656DC2F", "enabled": True, "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": False, "autoScheduleMode": "default"},
+    {"uid": "C6DEEBB1-E14A-47F2-98BA-7E3BB4353E3A", "enabled": True, "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": True, "autoScheduleMode": "accept-always"},
+    {"uid": "AA859321-2C72-4974-ADCF-0CBA0C76F95D", "enabled": True, "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
+    {"uid": "AB7C488B-9ED2-4265-881C-7E2E38A63584", "enabled": False, "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
+    {"uid": "BB0C0DA1-0545-45F6-8D08-917C554D93A4", "enabled": True, "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": False, "autoScheduleMode": "default"},
+    {"uid": "CCD30AD3-582F-4682-8B65-2EDE92C5656E", "enabled": True, "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": True, "autoScheduleMode": "accept-always"},
 )
 
 testRecordTypeDefault = (
-    ("locations", {"uid": "A4318887-F2C7-4A70-9056-B88CC8DB26F1", "enabled": True, "partitionID": "00004", "enabledForCalendaring": True, "enabledForAddressBooks": False, "autoSchedule": True, "autoScheduleMode": "default"}),
-    ("locations", {"uid": "AA5F935F-3358-4510-A649-B391D63279F2", "enabled": True, "partitionID": "00005", "enabledForCalendaring": True, "enabledForAddressBooks": False, "autoSchedule": True, "autoScheduleMode": "default"}),
-    ("resources", {"uid": "A5318887-F2C7-4A70-9056-B88CC8DB26F1", "enabled": True, "partitionID": "00006", "enabledForCalendaring": True, "enabledForAddressBooks": False, "autoSchedule": True, "autoScheduleMode": "default"}),
-    ("resources", {"uid": "AA6F935F-3358-4510-A649-B391D63279F2", "enabled": True, "partitionID": "00007", "enabledForCalendaring": True, "enabledForAddressBooks": False, "autoSchedule": True, "autoScheduleMode": "default"}),
+    ("locations", {"uid": "A4318887-F2C7-4A70-9056-B88CC8DB26F1", "enabled": True, "enabledForCalendaring": True, "enabledForAddressBooks": False, "autoSchedule": True, "autoScheduleMode": "default"}),
+    ("locations", {"uid": "AA5F935F-3358-4510-A649-B391D63279F2", "enabled": True, "enabledForCalendaring": True, "enabledForAddressBooks": False, "autoSchedule": True, "autoScheduleMode": "default"}),
+    ("resources", {"uid": "A5318887-F2C7-4A70-9056-B88CC8DB26F1", "enabled": True, "enabledForCalendaring": True, "enabledForAddressBooks": False, "autoSchedule": True, "autoScheduleMode": "default"}),
+    ("resources", {"uid": "AA6F935F-3358-4510-A649-B391D63279F2", "enabled": True, "enabledForCalendaring": True, "enabledForAddressBooks": False, "autoSchedule": True, "autoScheduleMode": "default"}),
 )
 
 
 testAddRecords = (
-    {"uid": "D11F03A0-97EA-48AF-9A6C-FAC7F3975767", "enabled": True, "partitionID": "", "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
+    {"uid": "D11F03A0-97EA-48AF-9A6C-FAC7F3975767", "enabled": True, "enabledForCalendaring": False, "enabledForAddressBooks": False, "autoSchedule": False, "autoScheduleMode": "default"},
 )
 
 testModifyRecords = (
-    {"uid": "D11F03A0-97EA-48AF-9A6C-FAC7F3975767", "enabled": True, "partitionID": "", "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": False, "autoScheduleMode": "default"},
+    {"uid": "D11F03A0-97EA-48AF-9A6C-FAC7F3975767", "enabled": True, "enabledForCalendaring": True, "enabledForAddressBooks": True, "autoSchedule": False, "autoScheduleMode": "default"},
 )
 
 

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/test_directory.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/test_directory.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/test_directory.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -539,7 +539,167 @@
                 groups,
             )
 
+        #
+        # Now remove all external assignments, and those should take effect.
+        #
+        def fakeExternalProxiesEmpty():
+            return []
 
+        updater = GroupMembershipCacheUpdater(
+            calendaruserproxy.ProxyDBService, self.directoryService, 30, 30, 30,
+            cache=cache, useExternalProxies=True,
+            externalProxiesSource=fakeExternalProxiesEmpty)
+
+        yield updater.updateCache()
+
+        delegates = (
+
+            # record name
+            # read-write delegators
+            # read-only delegators
+            # groups delegate is in (restricted to only those groups
+            #   participating in delegation)
+
+            # Note: "transporter" is now gone for everyone
+
+            ("wsanchez",
+             set(["mercury", "apollo", "orion", "gemini"]),
+             set(["non_calendar_proxy"]),
+             set(['left_coast',
+                  'both_coasts',
+                  'recursive1_coasts',
+                  'recursive2_coasts',
+                  'gemini#calendar-proxy-write',
+                ]),
+            ),
+            ("cdaboo",
+             set(["apollo", "orion", "non_calendar_proxy"]),
+             set(["non_calendar_proxy"]),
+             set(['both_coasts',
+                  'non_calendar_group',
+                  'recursive1_coasts',
+                  'recursive2_coasts',
+                ]),
+            ),
+            ("lecroy",
+             set(["apollo", "mercury", "non_calendar_proxy"]),
+             set(),
+             set(['both_coasts',
+                  'left_coast',
+                      'non_calendar_group',
+                ]),
+            ),
+        )
+
+        for name, write, read, groups in delegates:
+            delegate = self._getPrincipalByShortName(DirectoryService.recordType_users, name)
+
+            proxyFor = (yield delegate.proxyFor(True))
+            self.assertEquals(
+                set([p.record.guid for p in proxyFor]),
+                write,
+            )
+            proxyFor = (yield delegate.proxyFor(False))
+            self.assertEquals(
+                set([p.record.guid for p in proxyFor]),
+                read,
+            )
+            groupsIn = (yield delegate.groupMemberships())
+            uids = set()
+            for group in groupsIn:
+                try:
+                    uid = group.uid # a sub-principal
+                except AttributeError:
+                    uid = group.record.guid # a regular group
+                uids.add(uid)
+            self.assertEquals(
+                set(uids),
+                groups,
+            )
+
+        #
+        # Now add back an external assignments, and those should take effect.
+        #
+        def fakeExternalProxiesAdded():
+            return [
+                (
+                    "transporter#calendar-proxy-write",
+                    set(["8B4288F6-CC82-491D-8EF9-642EF4F3E7D0"])
+                ),
+            ]
+
+        updater = GroupMembershipCacheUpdater(
+            calendaruserproxy.ProxyDBService, self.directoryService, 30, 30, 30,
+            cache=cache, useExternalProxies=True,
+            externalProxiesSource=fakeExternalProxiesAdded)
+
+        yield updater.updateCache()
+
+        delegates = (
+
+            # record name
+            # read-write delegators
+            # read-only delegators
+            # groups delegate is in (restricted to only those groups
+            #   participating in delegation)
+
+            ("wsanchez",
+             set(["mercury", "apollo", "orion", "gemini"]),
+             set(["non_calendar_proxy"]),
+             set(['left_coast',
+                  'both_coasts',
+                  'recursive1_coasts',
+                  'recursive2_coasts',
+                  'gemini#calendar-proxy-write',
+                ]),
+            ),
+            ("cdaboo",
+             set(["apollo", "orion", "non_calendar_proxy"]),
+             set(["non_calendar_proxy"]),
+             set(['both_coasts',
+                  'non_calendar_group',
+                  'recursive1_coasts',
+                  'recursive2_coasts',
+                ]),
+            ),
+            ("lecroy",
+             set(["apollo", "mercury", "non_calendar_proxy", "transporter"]),
+             set(),
+             set(['both_coasts',
+                  'left_coast',
+                  'non_calendar_group',
+                  'transporter#calendar-proxy-write',
+                ]),
+            ),
+        )
+
+        for name, write, read, groups in delegates:
+            delegate = self._getPrincipalByShortName(DirectoryService.recordType_users, name)
+
+            proxyFor = (yield delegate.proxyFor(True))
+            self.assertEquals(
+                set([p.record.guid for p in proxyFor]),
+                write,
+            )
+            proxyFor = (yield delegate.proxyFor(False))
+            self.assertEquals(
+                set([p.record.guid for p in proxyFor]),
+                read,
+            )
+            groupsIn = (yield delegate.groupMemberships())
+            uids = set()
+            for group in groupsIn:
+                try:
+                    uid = group.uid # a sub-principal
+                except AttributeError:
+                    uid = group.record.guid # a regular group
+                uids.add(uid)
+            self.assertEquals(
+                set(uids),
+                groups,
+            )
+
+
     def test_diffAssignments(self):
         """
         Ensure external proxy assignment diffing works
@@ -728,7 +888,7 @@
         }
         members = pickle.loads(snapshotFile.getContent())
         self.assertEquals(members, expected)
-        
+
         # "Corrupt" the snapshot and verify it is regenerated properly
         snapshotFile.setContent("xyzzy")
         cache.delete("group-cacher-populated")
@@ -739,8 +899,8 @@
         self.assertTrue(snapshotFile.exists())
         members = pickle.loads(snapshotFile.getContent())
         self.assertEquals(members, expected)
-        
 
+
     def test_autoAcceptMembers(self):
         """
         autoAcceptMembers( ) returns an empty list if no autoAcceptGroup is

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/test_ldapdirectory.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/test_ldapdirectory.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/test_ldapdirectory.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -588,9 +588,8 @@
                     "readOnlyProxyAttr": "read-only-proxy",
                     "autoAcceptGroupAttr": None,
                 },
-                "partitionSchema": {
+                "poddingSchema": {
                     "serverIdAttr": "server-id", # maps to augments server-id
-                    "partitionIdAttr": "partition-id", # maps to augments partition-id
                 },
             }
         )
@@ -804,9 +803,8 @@
                     "readOnlyProxyAttr": None,
                     "autoAcceptGroupAttr": None,
                 },
-                "partitionSchema": {
+                "poddingSchema": {
                     "serverIdAttr": "server-id", # maps to augments server-id
-                    "partitionIdAttr": "partition-id", # maps to augments partition-id
                 },
             }
         )
@@ -1022,9 +1020,8 @@
                     "readOnlyProxyAttr": None,
                     "autoAcceptGroupAttr": None,
                 },
-                "partitionSchema": {
+                "poddingSchema": {
                     "serverIdAttr": "server-id", # maps to augments server-id
-                    "partitionIdAttr": "partition-id", # maps to augments partition-id
                 },
             }
         )
@@ -1236,9 +1233,8 @@
                     "readOnlyProxyAttr": None,
                     "autoAcceptGroupAttr": None,
                 },
-                "partitionSchema": {
+                "poddingSchema": {
                     "serverIdAttr": "server-id", # maps to augments server-id
-                    "partitionIdAttr": "partition-id", # maps to augments partition-id
                 },
             }
         )
@@ -1296,7 +1292,6 @@
             self.assertEquals(record.firstName, 'Amanda')
             self.assertEquals(record.lastName, 'Test')
             self.assertEquals(record.serverID, None)
-            self.assertEquals(record.partitionID, None)
             self.assertFalse(record.enabledForCalendaring)
 
             # User with enabled-for-calendaring specified
@@ -1326,13 +1321,11 @@
                 'apple-generateduid': [guid],
                 'cn': ['Amanda Test'],
                 'server-id' : ["test-server-id"],
-                'partition-id' : ["test-partition-id"],
             }
 
             record = self.service._ldapResultToRecord(dn, attrs,
                 self.service.recordType_users)
             self.assertEquals(record.serverID, "test-server-id")
-            self.assertEquals(record.partitionID, "test-partition-id")
 
             # User missing guidAttr
 

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/xmlaugmentsparser.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/xmlaugmentsparser.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/xmlaugmentsparser.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -30,38 +30,36 @@
 
 log = Logger()
 
-ELEMENT_AUGMENTS          = "augments"
-ELEMENT_RECORD            = "record"
+ELEMENT_AUGMENTS = "augments"
+ELEMENT_RECORD = "record"
 
-ELEMENT_UID               = "uid"
-ELEMENT_ENABLE            = "enable"
-ELEMENT_SERVERID          = "server-id"
-ELEMENT_PARTITIONID       = "partition-id"
-ELEMENT_HOSTEDAT          = "hosted-at"   # Backwards compatibility
-ELEMENT_ENABLECALENDAR    = "enable-calendar"
+ELEMENT_UID = "uid"
+ELEMENT_ENABLE = "enable"
+ELEMENT_SERVERID = "server-id"
+ELEMENT_PARTITIONID = "partition-id"   # Backwards compatibility
+ELEMENT_HOSTEDAT = "hosted-at"   # Backwards compatibility
+ELEMENT_ENABLECALENDAR = "enable-calendar"
 ELEMENT_ENABLEADDRESSBOOK = "enable-addressbook"
-ELEMENT_ENABLELOGIN       = "enable-login"
-ELEMENT_AUTOSCHEDULE      = "auto-schedule"
+ELEMENT_ENABLELOGIN = "enable-login"
+ELEMENT_AUTOSCHEDULE = "auto-schedule"
 ELEMENT_AUTOSCHEDULE_MODE = "auto-schedule-mode"
-ELEMENT_AUTOACCEPTGROUP   = "auto-accept-group"
+ELEMENT_AUTOACCEPTGROUP = "auto-accept-group"
 
-ATTRIBUTE_REPEAT          = "repeat"
+ATTRIBUTE_REPEAT = "repeat"
 
-VALUE_TRUE                = "true"
-VALUE_FALSE               = "false"
+VALUE_TRUE = "true"
+VALUE_FALSE = "false"
 
 ELEMENT_AUGMENTRECORD_MAP = {
-    ELEMENT_UID:               "uid",
-    ELEMENT_ENABLE:            "enabled",
-    ELEMENT_SERVERID:          "serverID",
-    ELEMENT_PARTITIONID:       "partitionID",
-    ELEMENT_HOSTEDAT:          "partitionID",   # Backwards compatibility
-    ELEMENT_ENABLECALENDAR:    "enabledForCalendaring",
+    ELEMENT_UID: "uid",
+    ELEMENT_ENABLE: "enabled",
+    ELEMENT_SERVERID: "serverID",
+    ELEMENT_ENABLECALENDAR: "enabledForCalendaring",
     ELEMENT_ENABLEADDRESSBOOK: "enabledForAddressBooks",
-    ELEMENT_ENABLELOGIN:       "enabledForLogin",
-    ELEMENT_AUTOSCHEDULE:      "autoSchedule",
+    ELEMENT_ENABLELOGIN: "enabledForLogin",
+    ELEMENT_AUTOSCHEDULE: "autoSchedule",
     ELEMENT_AUTOSCHEDULE_MODE: "autoScheduleMode",
-    ELEMENT_AUTOACCEPTGROUP:   "autoAcceptGroup",
+    ELEMENT_AUTOACCEPTGROUP: "autoAcceptGroup",
 }
 
 class XMLAugmentsParser(object):
@@ -71,6 +69,7 @@
     def __repr__(self):
         return "<%s %r>" % (self.__class__.__name__, self.xmlFile)
 
+
     def __init__(self, xmlFile, items):
 
         self.items = items
@@ -84,13 +83,14 @@
 
         self._parseXML(augments_node)
 
+
     def _parseXML(self, rootnode):
         """
         Parse the XML root node from the augments configuration document.
         @param rootnode: the L{Element} to parse.
         """
         for child in rootnode:
-            
+
             if child.tag != ELEMENT_RECORD:
                 raise RuntimeError("Unknown augment type: '%s' in augment file: '%s'" % (child.tag, self.xmlFile,))
 
@@ -98,7 +98,7 @@
 
             fields = {}
             for node in child:
-                
+
                 if node.tag in (
                     ELEMENT_UID,
                     ELEMENT_SERVERID,
@@ -118,33 +118,35 @@
                     fields[node.tag] = node.text == VALUE_TRUE
                 else:
                     raise RuntimeError("Invalid element '%s' in augment file: '%s'" % (node.tag, self.xmlFile,))
-                    
+
             # Must have at least a uid
             if ELEMENT_UID not in fields:
                 raise RuntimeError("Invalid record '%s' without a uid in augment file: '%s'" % (child, self.xmlFile,))
-                
+
             if repeat > 1:
-                for i in xrange(1, repeat+1):
+                for i in xrange(1, repeat + 1):
                     self.buildRecord(fields, i)
             else:
                 self.buildRecord(fields)
-    
+
+
     def buildRecord(self, fields, count=None):
-        
+
         from twistedcaldav.directory.augment import AugmentRecord
 
         def expandCount(value, count):
-            
+
             if type(value) in types.StringTypes:
                 return value % (count,) if count and "%" in value else value
             elif type(value) == set:
                 return set([item % (count,) if count and "%" in item else item for item in value])
             else:
                 return value
-        
+
         actualFields = {}
-        for k,v in fields.iteritems():
-            actualFields[ELEMENT_AUGMENTRECORD_MAP[k]] = expandCount(v, count)
+        for k, v in fields.iteritems():
+            if k in ELEMENT_AUGMENTRECORD_MAP:
+                actualFields[ELEMENT_AUGMENTRECORD_MAP[k]] = expandCount(v, count)
 
         record = AugmentRecord(**actualFields)
         self.items[record.uid] = record

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/extensions.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/extensions.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/extensions.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -494,13 +494,13 @@
             size = child.contentLength()
             lastModified = child.lastModified()
             rtypes = []
-            fullrtype = child.resourceType()
+            fullrtype = child.resourceType() if hasattr(child, "resourceType") else None
             if fullrtype is not None:
                 for rtype in fullrtype.children:
                     rtypes.append(rtype.name)
             if rtypes:
                 rtypes = "(%s)" % (", ".join(rtypes),)
-            if child.isCollection():
+            if child.isCollection() if hasattr(child, "isCollection") else False:
                 contentType = rtypes
             else:
                 mimeType = child.contentType()

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/ical.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/ical.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/ical.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -63,9 +63,7 @@
 iCalendarProductID = "-//CALENDARSERVER.ORG//NONSGML Version 1//EN"
 
 allowedStoreComponents = (
-    "VEVENT",
-    "VTODO",
-    "VPOLL",
+    "VEVENT", "VTODO", "VPOLL",
 )
 allowedSchedulingComponents = allowedStoreComponents + ("VFREEBUSY",)
 allowedComponents = allowedSchedulingComponents + ("VTIMEZONE",)

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/resource.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/resource.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/resource.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -334,6 +334,12 @@
             else:
                 yield transaction.commit()
 
+                # Log extended item
+                if transaction.logItems:
+                    if not hasattr(request, "extendedLogItems"):
+                        request.extendedLogItems = {}
+                    request.extendedLogItems.update(transaction.logItems)
+
                 # May need to reset the last-modified header in the response as txn.commit() can change it due to pre-commit hooks
                 if response.headers.hasHeader("last-modified"):
                     response.headers.setHeader("last-modified", self.lastModified())
@@ -2559,15 +2565,6 @@
         return self._newStoreHome.hasCalendarResourceUIDSomewhereElse(uid, ok_object._newStoreObject, mode)
 
 
-    def getCalendarResourcesForUID(self, uid, allow_shared=False):
-        """
-        Return all child object resources with the specified UID.
-
-        Pass through direct to store.
-        """
-        return self._newStoreHome.getCalendarResourcesForUID(uid, allow_shared)
-
-
     def defaultAccessControlList(self):
         myPrincipal = self.principalForRecord()
 

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/scheduling_store/caldav/resource.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/scheduling_store/caldav/resource.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/scheduling_store/caldav/resource.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -428,8 +428,12 @@
                 authz = (yield request.locateResource(principalURL))
                 self._associatedTransaction._authz_uid = authz.record.guid
 
+        # Log extended item
+        if not hasattr(request, "extendedLogItems"):
+            request.extendedLogItems = {}
+
         # This is a local CALDAV scheduling operation.
-        scheduler = CalDAVScheduler(self._associatedTransaction, self.parent._newStoreHome.uid())
+        scheduler = CalDAVScheduler(self._associatedTransaction, self.parent._newStoreHome.uid(), logItems=request.extendedLogItems)
 
         # Do the POST processing treating
         result = (yield scheduler.doSchedulingViaPOST(originator, recipients, calendar))

Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/stdconfig.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/stdconfig.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/stdconfig.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -156,9 +156,8 @@
             "resourceInfoAttr": None, # contains location/resource info
             "autoAcceptGroupAttr": None, # auto accept group
         },
-        "partitionSchema": {
+        "poddingSchema": {
             "serverIdAttr": None, # maps to augments server-id
-            "partitionIdAttr": None, # maps to augments partition-id
         },
     },
 }
@@ -307,9 +306,15 @@
     "FailIfUpgradeNeeded"  : True, # Set to True to prevent the server or utility tools
                                    # tools from running if the database needs a schema
                                    # upgrade.
-    "StopAfterUpgradeTriggerFile" : "stop_after_upgrade", # if this file exists
-        # in ConfigRoot, stop the service after finishing upgrade phase
+    "StopAfterUpgradeTriggerFile" : "stop_after_upgrade",   # if this file exists in ConfigRoot, stop
+                                                            # the service after finishing upgrade phase
 
+    "UpgradeHomePrefix"    : "",    # When upgrading, only upgrade homes where the owner UID starts with
+                                    # with the specified prefix. The upgrade will only be partial and only
+                                    # apply to upgrade pieces that affect entire homes. The upgrade will
+                                    # need to be run again without this prefix set to complete the overall
+                                    # upgrade.
+
     #
     # Types of service provided
     #
@@ -569,8 +574,8 @@
         }
     },
 
-    "EnableTimezonesByReference" : False, # Strip out VTIMEZONES that are known
-    "UsePackageTimezones" : False, # Use timezone data from twistedcaldav.zoneinfo - don't copy to Data directory
+    "EnableTimezonesByReference" : True, # Strip out VTIMEZONES that are known
+    "UsePackageTimezones"        : False, # Use timezone data from twistedcaldav.zoneinfo - don't copy to Data directory
 
     "EnableBatchUpload"       : True, # POST batch uploads
     "MaxResourcesBatchUpload" : 100, # Maximum number of resources in a batch POST
@@ -811,11 +816,11 @@
     # Support multiple hosts within a domain
     #
     "Servers" : {
-        "Enabled": False, # Multiple servers/partitions enabled or not
-        "ConfigFile": "localservers.xml", # File path for server information
-        "MaxClients": 5, # Pool size for connections to each partition
+        "Enabled": False,                   # Multiple servers enabled or not
+        "ConfigFile": "localservers.xml",   # File path for server information
+        "MaxClients": 5,                    # Pool size for connections to between servers
+        "InboxName": "podding",             # Name for top-level inbox resource
     },
-    "ServerPartitionID": "", # Unique ID for this server's partition instance.
 
     #
     # Performance tuning
@@ -1016,7 +1021,8 @@
     # means no automatic shutdown.
     "AgentInactivityTimeoutSeconds"  : 4 * 60 * 60,
 
-    # These two aren't relative to ConfigRoot:
+    # These aren't relative to ConfigRoot:
+    "ImportConfig": "", # Config to read first and merge
     "Includes": [], # Other plists to parse after this one
     "WritableConfigFile" : "", # which config file calendarserver_config should
         # write to for changes; empty string means the main config file.
@@ -1048,18 +1054,41 @@
         if self._configFileName:
             configDict = self._parseConfigFromFile(self._configFileName)
         configDict = ConfigDict(configDict)
-        # Now check for Includes and parse and add each of those
-        if "Includes" in configDict:
-            for include in configDict.Includes:
-                # Includes are not relative to ConfigRoot
-                path = _expandPath(include)
+
+        def _loadImport(childDict):
+            # Look for an import and read that one as the main config and merge the current one into that
+            if "ImportConfig" in childDict and childDict.ImportConfig:
+                configRoot = os.path.join(childDict.ServerRoot, childDict.ConfigRoot)
+                path = _expandPath(fullServerPath(configRoot, childDict.ImportConfig))
                 if os.path.exists(path):
-                    additionalDict = ConfigDict(self._parseConfigFromFile(path))
-                    if additionalDict:
-                        log.info("Adding configuration from file: '%s'" % (path,))
-                        mergeData(configDict, additionalDict)
-                else:
-                    log.debug("Missing configuration file: '%s'" % (path,))
+                    importDict = ConfigDict(self._parseConfigFromFile(path))
+                    if importDict:
+                        self.importedFiles.append(path)
+                        importDict = _loadImport(importDict)
+                        mergeData(importDict, childDict)
+                        return importDict
+                raise ConfigurationError("Import configuration file '{path}' must exist and be valid.".format(path=path))
+            else:
+                return childDict
+
+        def _loadIncludes(parentDict):
+            # Now check for Includes and parse and add each of those
+            if "Includes" in parentDict:
+                configRoot = os.path.join(parentDict.ServerRoot, parentDict.ConfigRoot)
+                for include in parentDict.Includes:
+                    # Includes are not relative to ConfigRoot
+                    path = _expandPath(fullServerPath(configRoot, include))
+                    if os.path.exists(path):
+                        additionalDict = ConfigDict(self._parseConfigFromFile(path))
+                        if additionalDict:
+                            self.includedFiles.append(path)
+                            _loadIncludes(additionalDict)
+                            mergeData(parentDict, additionalDict)
+                    else:
+                        self.missingFiles.append(path)
+
+        configDict = _loadImport(configDict)
+        _loadIncludes(configDict)
         return configDict
 
 
@@ -1484,6 +1513,15 @@
 
 
 
+def _updateICalendar(configDict, reloading=False):
+    """
+    Updated support iCalendar components.
+    """
+    from twistedcaldav import ical
+    ical.allowedStoreComponents = tuple(configDict.SupportedComponents)
+
+
+
 def _updateScheduling(configDict, reloading=False):
     #
     # Scheduling
@@ -1523,8 +1561,7 @@
     from txdav.caldav.datastore.scheduling.ischedule.localservers import Servers
     if configDict.Servers.Enabled:
         Servers.load()
-        Servers.getThisServer().installReverseProxies(
-            configDict.ServerPartitionID,
+        Servers.installReverseProxies(
             configDict.Servers.MaxClients,
         )
     else:
@@ -1595,6 +1632,7 @@
     _updateRejectClients,
     _updateLogLevels,
     _updateNotifications,
+    _updateICalendar,
     _updateScheduling,
     _updateServers,
     _updateCompliance,

Modified: CalendarServer/branches/users/cdaboo/json/txdav/base/datastore/subpostgres.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/base/datastore/subpostgres.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/base/datastore/subpostgres.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -454,6 +454,10 @@
             self.deactivateDelayedShutdown()
 
         def gotReady(result):
+            """
+            We started postgres; we're responsible for stopping it later.
+            Call pgCtl status to get the pid.
+            """
             log.warn("{cmd} exited", cmd=pgCtl)
             self.shouldStopDatabase = True
             d = Deferred()
@@ -465,13 +469,32 @@
             )
             return d.addCallback(gotStatus)
 
-        def reportit(f):
-            log.failure("starting postgres", f)
+        def couldNotStart(f):
+            """
+            There was an error trying to start postgres.  Try to connect
+            because it might already be running.  In this case, we won't
+            be the one to stop it.
+            """
+            d = Deferred()
+            statusMonitor = CapturingProcessProtocol(d, None)
+            self.reactor.spawnProcess(
+                statusMonitor, pgCtl, [pgCtl, "status"],
+                env=self.env, path=self.workingDir.path,
+                uid=self.uid, gid=self.gid,
+            )
+            return d.addCallback(gotStatus).addErrback(giveUp)
+
+        def giveUp(f):
+            """
+            We can't start postgres or connect to a running instance.  Shut
+            down.
+            """
+            log.failure("Can't start or connect to postgres", f)
             self.deactivateDelayedShutdown()
             self.reactor.stop()
 
         self.monitor.completionDeferred.addCallback(
-            gotReady).addErrback(reportit)
+            gotReady).addErrback(couldNotStart)
 
     shouldStopDatabase = False
 

Modified: CalendarServer/branches/users/cdaboo/json/txdav/base/datastore/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/base/datastore/util.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/base/datastore/util.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -92,6 +92,12 @@
         return "objectWithName:%s:%s" % (homeResourceID, name)
 
 
+    # Home child objects by id
+
+    def keyForObjectWithResourceID(self, homeResourceID, resourceID):
+        return "objectWithName:%s:%s" % (homeResourceID, resourceID)
+
+
     # Home metadata (Created/Modified)
 
     def keyForHomeMetaData(self, homeResourceID):

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/file.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/file.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/file.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -136,7 +136,7 @@
     @inlineCallbacks
     def hasCalendarResourceUIDSomewhereElse(self, uid, ok_object, type):
 
-        objectResources = (yield self.objectResourcesWithUID(uid, ("inbox",)))
+        objectResources = (yield self.getCalendarResourcesForUID(uid))
         for objectResource in objectResources:
             if ok_object and objectResource._path == ok_object._path:
                 continue
@@ -148,14 +148,9 @@
 
 
     @inlineCallbacks
-    def getCalendarResourcesForUID(self, uid, allow_shared=False):
+    def getCalendarResourcesForUID(self, uid):
 
-        results = []
-        objectResources = (yield self.objectResourcesWithUID(uid, ("inbox",)))
-        for objectResource in objectResources:
-            if allow_shared or objectResource._parentCollection.owned():
-                results.append(objectResource)
-
+        results = (yield self.objectResourcesWithUID(uid, ("inbox",)))
         returnValue(results)
 
 

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/schedule.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/schedule.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/schedule.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -112,8 +112,8 @@
         return self._calendarHome.hasCalendarResourceUIDSomewhereElse(uid, ok_object, type)
 
 
-    def getCalendarResourcesForUID(self, uid, allow_shared=False):
-        return self._calendarHome.getCalendarResourcesForUID(uid, allow_shared)
+    def getCalendarResourcesForUID(self, uid):
+        return self._calendarHome.getCalendarResourcesForUID(uid)
 
 
 

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/addressmapping.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/addressmapping.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/addressmapping.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -53,7 +53,7 @@
     @inlineCallbacks
     def getCalendarUser(self, cuaddr, principal):
 
-        # If we have a principal always treat the user as local or partitioned
+        # If we have a principal always treat the user as local
         if principal:
             returnValue(calendarUserFromPrincipal(cuaddr, principal))
 

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/caldav/delivery.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/caldav/delivery.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/caldav/delivery.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -26,8 +26,7 @@
 from twistedcaldav.config import config
 
 from txdav.base.propertystore.base import PropertyName
-from txdav.caldav.datastore.scheduling.cuaddress import LocalCalendarUser, RemoteCalendarUser, \
-    PartitionedCalendarUser, OtherServerCalendarUser
+from txdav.caldav.datastore.scheduling.cuaddress import LocalCalendarUser, RemoteCalendarUser, OtherServerCalendarUser
 from txdav.caldav.datastore.scheduling.delivery import DeliveryService
 from txdav.caldav.datastore.scheduling.freebusy import processAvailabilityFreeBusy, \
     generateFreeBusyInfo, buildFreeBusyResult
@@ -99,7 +98,7 @@
         uid = self.scheduler.calendar.resourceUID()
 
         organizerPrincipal = None
-        if type(self.scheduler.organizer) in (LocalCalendarUser, PartitionedCalendarUser, OtherServerCalendarUser,):
+        if type(self.scheduler.organizer) in (LocalCalendarUser, OtherServerCalendarUser,):
             organizerPrincipal = self.scheduler.organizer.principal.uid
 
         for recipient in self.recipients:

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/caldav/scheduler.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/caldav/scheduler.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/caldav/scheduler.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -101,6 +101,14 @@
                 "No principal for originator",
             ))
         else:
+            if not (originatorPrincipal.calendarsEnabled() and originatorPrincipal.thisServer()):
+                log.error("Originator not enabled or hosted on this server: %s" % (self.originator,))
+                raise HTTPError(self.errorResponse(
+                    responsecode.FORBIDDEN,
+                    self.errorElements["originator-denied"],
+                    "Originator cannot be scheduled",
+                ))
+
             self.originator = LocalCalendarUser(self.originator, originatorPrincipal)
 
 
@@ -127,8 +135,8 @@
             else:
                 # Map recipient to their inbox
                 inbox = None
-                if principal.calendarsEnabled() and principal.thisServer():
-                    if principal.locallyHosted():
+                if principal.calendarsEnabled():
+                    if principal.thisServer():
                         recipient_home = yield self.txn.calendarHomeWithUID(principal.uid, create=True)
                         if recipient_home:
                             inbox = (yield recipient_home.calendarWithName("inbox"))
@@ -138,7 +146,7 @@
                 if inbox:
                     results.append(calendarUserFromPrincipal(recipient, principal, inbox))
                 else:
-                    log.error("No schedule inbox for principal: %s" % (principal,))
+                    log.error("Recipient not enabled for calendaring: %s" % (principal,))
                     results.append(InvalidCalendarUser(recipient))
 
         self.recipients = results

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/cuaddress.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/cuaddress.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/cuaddress.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -21,7 +21,6 @@
 
 __all__ = [
     "LocalCalendarUser",
-    "PartitionedCalendarUser",
     "OtherServerCalendarUser",
     "RemoteCalendarUser",
     "EmailCalendarUser",
@@ -53,19 +52,6 @@
 
 
 
-class PartitionedCalendarUser(CalendarUser):
-
-    def __init__(self, cuaddr, principal):
-        self.cuaddr = cuaddr
-        self.principal = principal
-        self.serviceType = DeliveryService.serviceType_ischedule
-
-
-    def __str__(self):
-        return "Partitioned calendar user: %s" % (self.cuaddr,)
-
-
-
 class OtherServerCalendarUser(CalendarUser):
 
     def __init__(self, cuaddr, principal):
@@ -145,9 +131,7 @@
     Get the appropriate calendar user address class for the provided principal.
     """
 
-    if principal.locallyHosted():
+    if principal.thisServer():
         return LocalCalendarUser(recipient, principal, inbox)
-    elif principal.thisServer():
-        return PartitionedCalendarUser(recipient, principal)
     else:
         return OtherServerCalendarUser(recipient, principal)

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/implicit.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/implicit.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/implicit.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -28,7 +28,7 @@
 from txdav.caldav.datastore.scheduling import addressmapping
 from txdav.caldav.datastore.scheduling.caldav.scheduler import CalDAVScheduler
 from txdav.caldav.datastore.scheduling.cuaddress import InvalidCalendarUser, \
-    LocalCalendarUser, PartitionedCalendarUser, OtherServerCalendarUser, \
+    LocalCalendarUser, OtherServerCalendarUser, \
     normalizeCUAddr
 from txdav.caldav.datastore.scheduling.icaldiff import iCalDiff
 from txdav.caldav.datastore.scheduling.itip import iTipGenerator, iTIPRequestStatus
@@ -55,10 +55,10 @@
     STATUS_ORPHANED_CANCELLED_EVENT = 1
     STATUS_ORPHANED_EVENT = 2
 
-    def __init__(self):
+    def __init__(self, logItems=None):
 
         self.return_status = ImplicitScheduler.STATUS_OK
-        self.logItems = {}
+        self.logItems = logItems
         self.allowed_to_schedule = True
         self.suppress_refresh = False
 
@@ -250,7 +250,7 @@
         # to create new scheduling resources.
         if self.action == "create":
             if self.organizerPrincipal and not self.organizerPrincipal.enabledAsOrganizer():
-                log.error("ORGANIZER not allowed to be an Organizer: %s" % (self.organizer,))
+                log.error("ORGANIZER not allowed to be an Organizer: {organizer}", organizer=self.organizer)
                 raise HTTPError(ErrorResponse(
                     responsecode.FORBIDDEN,
                     (caldav_namespace, "organizer-allowed"),
@@ -382,7 +382,7 @@
             if self.txn.doing_attendee_refresh == 0:
                 delattr(self.txn, "doing_attendee_refresh")
 
-        if refreshCount:
+        if refreshCount and self.logItems is not None:
             self.logItems["itip.refreshes"] = refreshCount
 
 
@@ -426,7 +426,7 @@
             self.organizer = self.calendar.validOrganizerForScheduling()
         except ValueError:
             # We have different ORGANIZERs in the same iCalendar object - this is an error
-            log.error("Only one ORGANIZER is allowed in an iCalendar object:\n%s" % (self.calendar,))
+            log.error("Only one ORGANIZER is allowed in an iCalendar object:\n{calendar}", calendar=self.calendar)
             raise HTTPError(ErrorResponse(
                 responsecode.FORBIDDEN,
                 (caldav_namespace, "single-organizer"),
@@ -461,7 +461,7 @@
         # Check for matching resource somewhere else in the home
         foundElsewhere = (yield self.calendar_home.hasCalendarResourceUIDSomewhereElse(self.uid, check_resource, mode))
         if foundElsewhere is not None:
-            log.debug("Implicit - found component with same UID in a different collection: %s" % (check_resource,))
+            log.debug("Implicit - found component with same UID in a different collection: {resource}", resource=check_resource)
             raise HTTPError(ErrorResponse(
                 responsecode.FORBIDDEN,
                 (caldav_namespace, "unique-scheduling-object-resource"),
@@ -530,7 +530,7 @@
         # Check for a delete
         if self.action == "remove":
 
-            log.debug("Implicit - organizer '%s' is removing UID: '%s'" % (self.organizer, self.uid))
+            log.debug("Implicit - organizer '{organizer}' is removing UID: '{uid}'", organizer=self.organizer, uid=self.uid)
             self.oldcalendar = self.calendar
 
             # Cancel all attendees
@@ -556,16 +556,16 @@
             no_change, self.changed_rids, self.needs_action_rids, reinvites, recurrence_reschedule = self.isOrganizerChangeInsignificant()
             if no_change:
                 if reinvites:
-                    log.debug("Implicit - organizer '%s' is re-inviting UID: '%s', attendees: %s" % (self.organizer, self.uid, ", ".join(reinvites)))
+                    log.debug("Implicit - organizer '{organizer}' is re-inviting UID: '{uid}', attendees: {attendees}", organizer=self.organizer, uid=self.uid, attendees=", ".join(reinvites))
                     self.reinvites = reinvites
                 else:
                     # Nothing to do
-                    log.debug("Implicit - organizer '%s' is modifying UID: '%s' but change is not significant" % (self.organizer, self.uid))
+                    log.debug("Implicit - organizer '{organizer}' is modifying UID: '{uid}' but change is not significant", organizer=self.organizer, uid=self.uid)
                     returnValue(None)
             else:
                 # Do not change PARTSTATs for a split operation
                 if self.split_details is None:
-                    log.debug("Implicit - organizer '%s' is modifying UID: '%s'" % (self.organizer, self.uid))
+                    log.debug("Implicit - organizer '{organizer}' is modifying UID: '{uid}'", organizer=self.organizer, uid=self.uid)
 
                     for rid in self.needs_action_rids:
                         comp = self.calendar.overriddenComponent(rid)
@@ -586,7 +586,7 @@
 
                                 attendee.setParameter("PARTSTAT", "NEEDS-ACTION")
                 else:
-                    log.debug("Implicit - organizer '%s' is splitting UID: '%s'" % (self.organizer, self.uid))
+                    log.debug("Implicit - organizer '{organizer}' is splitting UID: '{uid}'", organizer=self.organizer, uid=self.uid)
 
                 # Check for removed attendees
                 if not recurrence_reschedule:
@@ -600,10 +600,10 @@
 
         elif self.action == "create":
             if self.split_details is None:
-                log.debug("Implicit - organizer '%s' is creating UID: '%s'" % (self.organizer, self.uid))
+                log.debug("Implicit - organizer '{organizer}' is creating UID: '{uid}'", organizer=self.organizer, uid=self.uid)
                 self.coerceAttendeesPartstatOnCreate()
             else:
-                log.debug("Implicit - organizer '%s' is creating a split UID: '%s'" % (self.organizer, self.uid))
+                log.debug("Implicit - organizer '{organizer}' is creating a split UID: '{uid}'", organizer=self.organizer, uid=self.uid)
                 self.needs_sequence_change = False
 
         # Always set RSVP=TRUE for any NEEDS-ACTION
@@ -697,7 +697,7 @@
                 oldOrganizer = self.oldcalendar.getOrganizer()
                 newOrganizer = self.calendar.getOrganizer()
                 if oldOrganizer != newOrganizer:
-                    log.error("Cannot change ORGANIZER: UID:%s" % (self.uid,))
+                    log.error("Cannot change ORGANIZER: UID:{uid}", uid=self.uid)
                     raise HTTPError(ErrorResponse(
                         responsecode.FORBIDDEN,
                         (caldav_namespace, "valid-organizer-change"),
@@ -905,7 +905,7 @@
                 if cuaddr not in coerced:
                     attendeePrincipal = self.calendar_home.directoryService().recordWithCalendarUserAddress(cuaddr)
                     attendeeAddress = (yield addressmapping.mapper.getCalendarUser(cuaddr, attendeePrincipal))
-                    local_attendee = type(attendeeAddress) in (LocalCalendarUser, PartitionedCalendarUser, OtherServerCalendarUser,)
+                    local_attendee = type(attendeeAddress) in (LocalCalendarUser, OtherServerCalendarUser,)
                     coerced[cuaddr] = local_attendee
                 if coerced[cuaddr]:
                     attendee.removeParameter("SCHEDULE-AGENT")
@@ -924,7 +924,8 @@
         if self.action in ("create", "modify",):
             total += (yield self.processRequests())
 
-        self.logItems["itip.requests"] = total
+        if self.logItems is not None:
+            self.logItems["itip.requests"] = total
 
 
     @inlineCallbacks
@@ -975,7 +976,7 @@
                 scheduler = self.makeScheduler()
 
                 # Do the PUT processing
-                log.info("Implicit CANCEL - organizer: '%s' to attendee: '%s', UID: '%s', RIDs: '%s'" % (self.organizer, attendee, self.uid, rids))
+                log.info("Implicit CANCEL - organizer: '{organizer}' to attendee: '{attendee}', UID: '{uid}', RIDs: '{rids}'", organizer=self.organizer, attendee=attendee, uid=self.uid, rids=rids)
                 response = (yield scheduler.doSchedulingViaPUT(self.originator, (attendee,), itipmsg, internal_request=True, suppress_refresh=self.suppress_refresh))
                 self.handleSchedulingResponse(response, True)
 
@@ -1032,7 +1033,7 @@
                 scheduler = self.makeScheduler()
 
                 # Do the PUT processing
-                log.info("Implicit REQUEST - organizer: '%s' to attendee: '%s', UID: '%s'" % (self.organizer, attendee, self.uid,))
+                log.info("Implicit REQUEST - organizer: '{organizer}' to attendee: '{attendee}', UID: '{uid}'", organizer=self.organizer, attendee=attendee, uid=self.uid)
                 response = (yield scheduler.doSchedulingViaPUT(self.originator, (attendee,), itipmsg, internal_request=True, suppress_refresh=self.suppress_refresh))
                 self.handleSchedulingResponse(response, True)
 
@@ -1067,19 +1068,19 @@
 
         if self.action == "remove":
             if self.calendar.hasPropertyValueInAllComponents(Property("STATUS", "CANCELLED")):
-                log.debug("Implicit - attendee '%s' is removing cancelled UID: '%s'" % (self.attendee, self.uid))
+                log.debug("Implicit - attendee '{attendee}' is removing cancelled UID: '{uid}'", attendee=self.attendee, uid=self.uid)
                 # Nothing else to do
             elif doScheduling:
                 # If attendee is already marked as declined in all components - nothing to do
                 attendees = self.calendar.getAttendeeProperties((self.attendee,))
                 if all([attendee.parameterValue("PARTSTAT", "NEEDS-ACTION") == "DECLINED" for attendee in attendees]):
-                    log.debug("Implicit - attendee '%s' is removing fully declined UID: '%s'" % (self.attendee, self.uid))
+                    log.debug("Implicit - attendee '{attendee}' is removing fully declined UID: '{uid}'", attendee=self.attendee, uid=self.uid)
                     # Nothing else to do
                 else:
-                    log.debug("Implicit - attendee '%s' is cancelling UID: '%s'" % (self.attendee, self.uid))
+                    log.debug("Implicit - attendee '{attendee}' is cancelling UID: '{uid}'", attendee=self.attendee, uid=self.uid)
                     yield self.scheduleCancelWithOrganizer()
             else:
-                log.debug("Implicit - attendee '%s' is removing UID without server scheduling: '%s'" % (self.attendee, self.uid))
+                log.debug("Implicit - attendee '{attendee}' is removing UID without server scheduling: '{uid}'", attendee=self.attendee, uid=self.uid)
                 # Nothing else to do
             returnValue(None)
 
@@ -1090,7 +1091,7 @@
                 oldOrganizer = self.oldcalendar.getOrganizer()
                 newOrganizer = self.calendar.getOrganizer()
                 if oldOrganizer != newOrganizer:
-                    log.error("Cannot change ORGANIZER: UID:%s" % (self.uid,))
+                    log.error("Cannot change ORGANIZER: UID:{uid}", uid=self.uid)
                     raise HTTPError(ErrorResponse(
                         responsecode.FORBIDDEN,
                         (caldav_namespace, "valid-attendee-change"),
@@ -1105,7 +1106,7 @@
 
                 # If Organizer copy exists we cannot allow SCHEDULE-AGENT=CLIENT or NONE
                 if not doScheduling:
-                    log.error("Attendee '%s' is not allowed to change SCHEDULE-AGENT on organizer: UID:%s" % (self.attendeePrincipal, self.uid,))
+                    log.error("Attendee '{attendee}' is not allowed to change SCHEDULE-AGENT on organizer: UID:{uid}", attendee=self.attendeePrincipal, uid=self.uid)
                     raise HTTPError(ErrorResponse(
                         responsecode.FORBIDDEN,
                         (caldav_namespace, "valid-attendee-change"),
@@ -1119,11 +1120,11 @@
 
                 if not changeAllowed:
                     if self.calendar.hasPropertyValueInAllComponents(Property("STATUS", "CANCELLED")):
-                        log.debug("Attendee '%s' is creating CANCELLED event for mismatched UID: '%s' - removing entire event" % (self.attendee, self.uid,))
+                        log.debug("Attendee '{attendee}' is creating CANCELLED event for mismatched UID: '{uid}' - removing entire event", attendee=self.attendee, uid=self.uid)
                         self.return_status = ImplicitScheduler.STATUS_ORPHANED_EVENT
                         returnValue(None)
                     else:
-                        log.error("Attendee '%s' is not allowed to make an unauthorized change to an organized event: UID:%s" % (self.attendeePrincipal, self.uid,))
+                        log.error("Attendee '{attendee}' is not allowed to make an unauthorized change to an organized event: UID:{uid}", attendee=self.attendeePrincipal, uid=self.uid)
                         raise HTTPError(ErrorResponse(
                             responsecode.FORBIDDEN,
                             (caldav_namespace, "valid-attendee-change"),
@@ -1133,21 +1134,21 @@
                 # Check that the return calendar actually has any components left - this can happen if a cancelled
                 # component is removed and replaced by another cancelled or invalid one
                 if self.calendar.mainType() is None:
-                    log.debug("Attendee '%s' is replacing CANCELLED event: '%s' - removing entire event" % (self.attendee, self.uid,))
+                    log.debug("Attendee '{attendee}' is replacing CANCELLED event: '{uid}' - removing entire event", attendee=self.attendee, uid=self.uid)
                     self.return_status = ImplicitScheduler.STATUS_ORPHANED_EVENT
                     returnValue(None)
 
                 if not doITipReply:
-                    log.debug("Implicit - attendee '%s' is updating UID: '%s' but change is not significant" % (self.attendee, self.uid))
+                    log.debug("Implicit - attendee '{attendee}' is updating UID: '{uid}' but change is not significant", attendee=self.attendee, uid=self.uid)
                     returnValue(self.return_calendar)
-                log.debug("Attendee '%s' is allowed to update UID: '%s' with local organizer '%s'" % (self.attendee, self.uid, self.organizer))
+                log.debug("Attendee '{attendee}' is allowed to update UID: '{uid}' with local organizer '{organizer}'", attendee=self.attendee, uid=self.uid, organizer=self.organizer)
 
             elif isinstance(self.organizerAddress, LocalCalendarUser):
                 # If Organizer copy does not exists we cannot allow SCHEDULE-AGENT=SERVER
                 if doScheduling:
                     # Check to see whether all instances are CANCELLED
                     if self.calendar.hasPropertyValueInAllComponents(Property("STATUS", "CANCELLED")):
-                        log.debug("Attendee '%s' is creating CANCELLED event for missing UID: '%s' - removing entire event" % (self.attendee, self.uid,))
+                        log.debug("Attendee '{attendee}' is creating CANCELLED event for missing UID: '{uid}' - removing entire event", attendee=self.attendee, uid=self.uid)
                         self.return_status = ImplicitScheduler.STATUS_ORPHANED_CANCELLED_EVENT
                         returnValue(None)
                     else:
@@ -1155,25 +1156,25 @@
                         if self.oldcalendar:
                             oldScheduling = self.oldcalendar.getOrganizerScheduleAgent()
                             if not oldScheduling:
-                                log.error("Attendee '%s' is not allowed to set SCHEDULE-AGENT=SERVER on organizer: UID:%s" % (self.attendeePrincipal, self.uid,))
+                                log.error("Attendee '{attendee}' is not allowed to set SCHEDULE-AGENT=SERVER on organizer: UID:{uid}", attendee=self.attendeePrincipal, uid=self.uid)
                                 raise HTTPError(ErrorResponse(
                                     responsecode.FORBIDDEN,
                                     (caldav_namespace, "valid-attendee-change"),
                                     "Attendee cannot change organizer state",
                                 ))
 
-                        log.debug("Attendee '%s' is not allowed to update UID: '%s' - missing organizer copy - removing entire event" % (self.attendee, self.uid,))
+                        log.debug("Attendee '{attendee}' is not allowed to update UID: '{uid}' - missing organizer copy - removing entire event", attendee=self.attendee, uid=self.uid)
                         self.return_status = ImplicitScheduler.STATUS_ORPHANED_EVENT
                         returnValue(None)
                 else:
-                    log.debug("Implicit - attendee '%s' is modifying UID without server scheduling: '%s'" % (self.attendee, self.uid))
+                    log.debug("Implicit - attendee '{attendee}' is modifying UID without server scheduling: '{uid}'", attendee=self.attendee, uid=self.uid)
                     # Nothing else to do
                     returnValue(None)
 
             elif isinstance(self.organizerAddress, InvalidCalendarUser):
                 # We will allow the attendee to do anything in this case, but we will mark the organizer
                 # with an schedule-status error
-                log.debug("Attendee '%s' is allowed to update UID: '%s' with invalid organizer '%s'" % (self.attendee, self.uid, self.organizer))
+                log.debug("Attendee '{attendee}' is allowed to update UID: '{uid}' with invalid organizer '{organizer}'", attendee=self.attendee, uid=self.uid, organizer=self.organizer)
                 if doScheduling:
                     self.calendar.setParameterToValueForPropertyWithValue(
                         "SCHEDULE-STATUS",
@@ -1187,14 +1188,14 @@
                 # to make any change they like as we cannot verify what is reasonable. In reality
                 # we ought to be comparing the Attendee changes against the attendee's own copy
                 # and restrict changes based on that when the organizer's copy is not available.
-                log.debug("Attendee '%s' is allowed to update UID: '%s' with remote organizer '%s'" % (self.attendee, self.uid, self.organizer))
+                log.debug("Attendee '{attendee}' is allowed to update UID: '{uid}' with remote organizer '{organizer}'", attendee=self.attendee, uid=self.uid, organizer=self.organizer)
                 changedRids = None
 
             if doScheduling:
-                log.debug("Implicit - attendee '%s' is updating UID: '%s'" % (self.attendee, self.uid))
+                log.debug("Implicit - attendee '{attendee}' is updating UID: '{uid}'", attendee=self.attendee, uid=self.uid)
                 yield self.scheduleWithOrganizer(changedRids)
             else:
-                log.debug("Implicit - attendee '%s' is updating UID without server scheduling: '%s'" % (self.attendee, self.uid))
+                log.debug("Implicit - attendee '{attendee}' is updating UID without server scheduling: '{uid}'", attendee=self.attendee, uid=self.uid)
                 # Nothing else to do
 
 
@@ -1203,7 +1204,7 @@
 
         if self.action == "remove":
             # Nothing else to do
-            log.debug("Implicit - missing attendee is removing UID without server scheduling: '%s'" % (self.uid,))
+            log.debug("Implicit - missing attendee is removing UID without server scheduling: '{uid}'", uid=self.uid)
 
         else:
             # Make sure ORGANIZER is not changed if originally SCHEDULE-AGENT=SERVER
@@ -1212,7 +1213,7 @@
                 oldOrganizer = self.oldcalendar.getOrganizer()
                 newOrganizer = self.calendar.getOrganizer()
                 if oldOrganizer != newOrganizer and self.oldcalendar.getOrganizerScheduleAgent():
-                    log.error("Cannot change ORGANIZER: UID:%s" % (self.uid,))
+                    log.error("Cannot change ORGANIZER: UID:{uid}", uid=self.uid)
                     raise HTTPError(ErrorResponse(
                         responsecode.FORBIDDEN,
                         (caldav_namespace, "valid-attendee-change"),
@@ -1221,7 +1222,7 @@
 
             # Never allow a missing attendee with a locally hosted organizer
             if isinstance(self.organizerAddress, LocalCalendarUser):
-                log.error("Cannot remove ATTENDEE: UID:%s" % (self.uid,))
+                log.error("Cannot remove ATTENDEE: UID:{uid}", uid=self.uid)
                 raise HTTPError(ErrorResponse(
                     responsecode.FORBIDDEN,
                     (caldav_namespace, "valid-attendee-change"),
@@ -1230,7 +1231,7 @@
 
             # We will allow the attendee to do anything in this case, but we will mark the organizer
             # with an schedule-status error and schedule-agent none
-            log.debug("Missing attendee is allowed to update UID: '%s' with invalid organizer '%s'" % (self.uid, self.organizer))
+            log.debug("Missing attendee is allowed to update UID: '{uid}' with invalid organizer '{organizer}'", uid=self.uid, organizer=self.organizer)
 
             # Check SCHEDULE-AGENT and coerce SERVER to NONE
             if self.calendar.getOrganizerScheduleAgent():
@@ -1241,14 +1242,14 @@
     def checkOrganizerScheduleAgent(self):
 
         is_server = self.calendar.getOrganizerScheduleAgent()
-        local_organizer = type(self.organizerAddress) in (LocalCalendarUser, PartitionedCalendarUser, OtherServerCalendarUser,)
+        local_organizer = type(self.organizerAddress) in (LocalCalendarUser, OtherServerCalendarUser,)
 
         if config.Scheduling.iMIP.Enabled and self.organizerAddress.cuaddr.lower().startswith("mailto:"):
             return is_server
 
         if not config.Scheduling.iSchedule.Enabled and not local_organizer and is_server:
             # Coerce ORGANIZER to SCHEDULE-AGENT=NONE
-            log.debug("Attendee '%s' is not allowed to use SCHEDULE-AGENT=SERVER on organizer: UID:%s" % (self.attendeePrincipal, self.uid,))
+            log.debug("Attendee '{attendee}' is not allowed to use SCHEDULE-AGENT=SERVER on organizer: UID:{uid}", attendee=self.attendeePrincipal, uid=self.uid)
             self.calendar.setParameterToValueForPropertyWithValue("SCHEDULE-AGENT", "NONE", "ORGANIZER", None)
             self.calendar.setParameterToValueForPropertyWithValue("SCHEDULE-STATUS", iTIPRequestStatus.NO_USER_SUPPORT_CODE, "ORGANIZER", None)
             is_server = False
@@ -1270,8 +1271,8 @@
         calendar_resource = (yield getCalendarObjectForRecord(self.calendar_home.transaction(), self.organizerPrincipal, self.uid))
         if calendar_resource is not None:
             self.organizer_calendar = (yield calendar_resource.componentForUser())
-        elif type(self.organizerAddress) in (PartitionedCalendarUser, OtherServerCalendarUser,):
-            # For partitioning where the organizer is on a different node, we will assume that the attendee's copy
+        elif type(self.organizerAddress) in (OtherServerCalendarUser,):
+            # For podding where the organizer is on a different node, we will assume that the attendee's copy
             # of the event is up to date and "authoritative". So we pretend that is the organizer copy
             self.organizer_calendar = self.oldcalendar
 
@@ -1288,7 +1289,7 @@
             oldcalendar = self.organizer_calendar
             oldcalendar.attendeesView((self.attendee,), onlyScheduleAgentServer=True)
             if oldcalendar.mainType() is None:
-                log.debug("Implicit - attendee '%s' cannot use an event they are not an attendee of, UID: '%s'" % (self.attendee, self.uid))
+                log.debug("Implicit - attendee '{attendee}' cannot use an event they are not an attendee of, UID: '{uid}'", attendee=self.attendee, uid=self.uid)
                 raise HTTPError(ErrorResponse(
                     responsecode.FORBIDDEN,
                     (caldav_namespace, "valid-attendee-change"),
@@ -1303,7 +1304,8 @@
         # First make sure we are allowed to schedule
         self.testSchedulingAllowed()
 
-        self.logItems["itip.reply"] = "reply"
+        if self.logItems is not None:
+            self.logItems["itip.reply"] = "reply"
 
         itipmsg = iTipGenerator.generateAttendeeReply(self.calendar, self.attendee, changedRids=changedRids)
 
@@ -1316,7 +1318,8 @@
         # First make sure we are allowed to schedule
         self.testSchedulingAllowed()
 
-        self.logItems["itip.reply"] = "cancel"
+        if self.logItems is not None:
+            self.logItems["itip.reply"] = "cancel"
 
         itipmsg = iTipGenerator.generateAttendeeReply(self.calendar, self.attendee, force_decline=True)
 
@@ -1335,7 +1338,7 @@
         def _gotResponse(response):
             self.handleSchedulingResponse(response, False)
 
-        log.info("Implicit %s - attendee: '%s' to organizer: '%s', UID: '%s'" % (action, self.attendee, self.organizer, self.uid,))
+        log.info("Implicit {action} - attendee: '{attendee}' to organizer: '{organizer}', UID: '{uid}'", action=action, attendee=self.attendee, organizer=self.organizer, uid=self.uid)
         d = scheduler.doSchedulingViaPUT(self.originator, (self.organizer,), itipmsg, internal_request=True)
         d.addCallback(_gotResponse)
         return d

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/delivery.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/delivery.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/delivery.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -40,8 +40,7 @@
 from twistedcaldav.ical import normalizeCUAddress, Component
 from twistedcaldav.util import utf8String
 
-from txdav.caldav.datastore.scheduling.cuaddress import PartitionedCalendarUser, RemoteCalendarUser, \
-    OtherServerCalendarUser
+from txdav.caldav.datastore.scheduling.cuaddress import RemoteCalendarUser, OtherServerCalendarUser
 from txdav.caldav.datastore.scheduling.delivery import DeliveryService
 from txdav.caldav.datastore.scheduling.ischedule.dkim import DKIMRequest, DKIMUtils
 from txdav.caldav.datastore.scheduling.ischedule.remoteservers import IScheduleServerRecord
@@ -58,7 +57,7 @@
 
 """
 Handles the sending of iSchedule scheduling messages. Used for both cross-domain scheduling,
-as well as internal partitioning or podding.
+as well as internal podding.
 """
 
 __all__ = [
@@ -72,6 +71,7 @@
 class ScheduleViaISchedule(DeliveryService):
 
     domainServerMap = {}
+    servermgr = None
 
     @classmethod
     def serviceType(cls):
@@ -82,9 +82,7 @@
     @inlineCallbacks
     def matchCalendarUserAddress(cls, cuaddr):
 
-        # TODO: here is where we would attempt service discovery based on the cuaddr.
-
-        # Only handle mailtos:
+        # Handle mailtos:
         if cuaddr.lower().startswith("mailto:"):
             domain = extractEmailDomain(cuaddr)
             server = (yield cls.serverForDomain(domain))
@@ -100,25 +98,30 @@
     def serverForDomain(cls, domain):
         if domain not in cls.domainServerMap:
 
-            # First check built-in list of remote servers
-            servermgr = IScheduleServers()
-            server = servermgr.mapDomain(domain)
-            if server is not None:
-                cls.domainServerMap[domain] = server
-            else:
-                # Lookup domain
-                result = (yield lookupServerViaSRV(domain))
-                if result is None:
+            if config.Scheduling.iSchedule.Enabled:
+
+                # First check built-in list of remote servers
+                if cls.servermgr is None:
+                    cls.servermgr = IScheduleServers()
+                server = cls.servermgr.mapDomain(domain)
+                if server is not None:
+                    cls.domainServerMap[domain] = server
+                else:
                     # Lookup domain
-                    result = (yield lookupServerViaSRV(domain, service="_ischedule"))
+                    result = (yield lookupServerViaSRV(domain))
                     if result is None:
-                        cls.domainServerMap[domain] = None
+                        # Lookup domain
+                        result = (yield lookupServerViaSRV(domain, service="_ischedule"))
+                        if result is None:
+                            cls.domainServerMap[domain] = None
+                        else:
+                            # Create the iSchedule server record for this server
+                            cls.domainServerMap[domain] = IScheduleServerRecord(uri="http://%s:%s/.well-known/ischedule" % result)
                     else:
                         # Create the iSchedule server record for this server
-                        cls.domainServerMap[domain] = IScheduleServerRecord(uri="http://%s:%s/.well-known/ischedule" % result)
-                else:
-                    # Create the iSchedule server record for this server
-                    cls.domainServerMap[domain] = IScheduleServerRecord(uri="https://%s:%s/.well-known/ischedule" % result)
+                        cls.domainServerMap[domain] = IScheduleServerRecord(uri="https://%s:%s/.well-known/ischedule" % result)
+            else:
+                cls.domainServerMap[domain] = None
 
         returnValue(cls.domainServerMap[domain])
 
@@ -136,8 +139,6 @@
             if isinstance(recipient, RemoteCalendarUser):
                 # Map the recipient's domain to a server
                 server = (yield self.serverForDomain(recipient.domain))
-            elif isinstance(recipient, PartitionedCalendarUser):
-                server = self._getServerForPartitionedUser(recipient)
             elif isinstance(recipient, OtherServerCalendarUser):
                 server = self._getServerForOtherServerUser(recipient)
             else:
@@ -182,20 +183,6 @@
         yield DeferredList(deferreds)
 
 
-    def _getServerForPartitionedUser(self, recipient):
-
-        if not hasattr(self, "partitionedServers"):
-            self.partitionedServers = {}
-
-        partition = recipient.principal.partitionURI()
-        if partition not in self.partitionedServers:
-            self.partitionedServers[partition] = IScheduleServerRecord(uri=joinURL(partition, "/ischedule"))
-            self.partitionedServers[partition].unNormalizeAddresses = False
-            self.partitionedServers[partition].moreHeaders.append(recipient.principal.server().secretHeader())
-
-        return self.partitionedServers[partition]
-
-
     def _getServerForOtherServerUser(self, recipient):
 
         if not hasattr(self, "otherServers"):
@@ -203,9 +190,12 @@
 
         serverURI = recipient.principal.serverURI()
         if serverURI not in self.otherServers:
-            self.otherServers[serverURI] = IScheduleServerRecord(uri=joinURL(serverURI, "/ischedule"))
-            self.otherServers[serverURI].unNormalizeAddresses = not recipient.principal.server().isImplicit
-            self.otherServers[serverURI].moreHeaders.append(recipient.principal.server().secretHeader())
+            self.otherServers[serverURI] = IScheduleServerRecord(
+                uri=joinURL(serverURI, config.Servers.InboxName),
+                unNormalizeAddresses=not recipient.principal.server().isImplicit,
+                moreHeaders=[recipient.principal.server().secretHeader(), ],
+                podding=True,
+            )
 
         return self.otherServers[serverURI]
 
@@ -222,6 +212,7 @@
         self.refreshOnly = refreshOnly
         self.headers = None
         self.data = None
+        self.original_organizer = None
 
 
     @inlineCallbacks
@@ -365,7 +356,8 @@
 
         # The Originator must be the ORGANIZER (for a request) or ATTENDEE (for a reply)
         originator = self.scheduler.organizer.cuaddr if self.scheduler.isiTIPRequest else self.scheduler.attendee
-        originator = normalizeCUAddress(originator, normalizationLookup, self.scheduler.txn.directoryService().recordWithCalendarUserAddress, toUUID=False)
+        if self.server.unNormalizeAddresses:
+            originator = normalizeCUAddress(originator, normalizationLookup, self.scheduler.txn.directoryService().recordWithCalendarUserAddress, toUUID=False)
         self.headers.addRawHeader("Originator", utf8String(originator))
         self.sign_headers.append("Originator")
 
@@ -414,15 +406,15 @@
         """
 
         if self.data is None:
+
             # Need to remap cuaddrs from urn:uuid
-            if self.server.unNormalizeAddresses and self.scheduler.method == "PUT":
-                normalizedCalendar = self.scheduler.calendar.duplicate()
+            normalizedCalendar = self.scheduler.calendar.duplicate()
+            self.original_organizer = normalizedCalendar.getOrganizer()
+            if self.server.unNormalizeAddresses:
                 normalizedCalendar.normalizeCalendarUserAddresses(
                     normalizationLookup,
                     self.scheduler.txn.directoryService().recordWithCalendarUserAddress,
                     toUUID=False)
-            else:
-                normalizedCalendar = self.scheduler.calendar
 
             # For VFREEBUSY we need to strip out ATTENDEEs that do not match the recipient list
             if self.scheduler.isfreebusy:
@@ -445,13 +437,12 @@
         f = Factory()
         f.protocol = HTTPClientProtocol
         if ssl:
-            ep = GAIEndpoint(reactor, host, port,
-                             _configuredClientContextFactory())
+            ep = GAIEndpoint(reactor, host, port, _configuredClientContextFactory())
         else:
             ep = GAIEndpoint(reactor, host, port)
         proto = (yield ep.connect(f))
 
-        if config.Scheduling.iSchedule.DKIM.Enabled:
+        if not self.server.podding() and config.Scheduling.iSchedule.DKIM.Enabled:
             domain, selector, key_file, algorithm, useDNSKey, useHTTPKey, usePrivateExchangeKey, expire = DKIMUtils.getConfiguration(config)
             request = DKIMRequest(
                 "POST",
@@ -503,6 +494,14 @@
             calendar_data = response.childOfType(CalendarData)
             if calendar_data:
                 calendar_data = str(calendar_data)
+                if self.server.unNormalizeAddresses and self.original_organizer is not None:
+                    # Need to restore original ORGANIZER value if it got unnormalized
+                    calendar = Component.fromString(calendar_data)
+                    organizers = calendar.getAllPropertiesInAnyComponent("ORGANIZER", depth=1)
+                    for organizer in organizers:
+                        organizer.setValue(self.original_organizer)
+                    calendar_data = str(calendar)
+
             error = response.childOfType(Error)
             if error:
                 error = error.children

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/localservers.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/localservers.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/localservers.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -31,18 +31,12 @@
 the principals across the whole domain need to be able to directly schedule each other and know of each others
 existence. A common scenario would be a production server and a development/test server.
 
-Each server is identified by an id and url. The id is used when assigning principals to a specific server. Each
-server can also support multiple partitions, and each of those is identified by an id and url, with the id also
-being used to assign principals to a specific partition.
+Each server is identified by an id and url. The id is used when assigning principals to a specific server.
 
-These servers support the concept of "partitioning" and "podding".
+These servers support the concept of "podding".
 
-A "partitioned" service is one that spreads its
-users out across multiple stores and does reverse proxying of incoming requests to the appropriate partitioned host.
-All servers within the same partition have to be running the same version of the software etc.
-
 A "podded" service is one where different groups of users are hosted on different servers, which may be of
-different versions etc. A "pod" may itself be "partitioned", but the partitioning is "invisible" to the outside world.
+different versions etc.
 """
 
 __all__ = [
@@ -104,13 +98,31 @@
     def getThisServer(self):
         return self._thisServer
 
+
+    def installReverseProxies(self, maxClients):
+        """
+        Install a reverse proxy for each of the other servers in the "pod".
+
+        @param maxClients: maximum number of clients in the pool.
+        @type maxClients: C{int}
+        """
+
+        for server in self._servers.values():
+            if server.thisServer:
+                continue
+            installPool(
+                server.id,
+                server.uri,
+                maxClients,
+            )
+
 Servers = ServersDB()   # Global server DB
 
 
 
 class Server(object):
     """
-    Represents a server which may itself be partitioned.
+    Represents a server.
     """
 
     def __init__(self):
@@ -120,8 +132,6 @@
         self.ips = set()
         self.allowed_from_ips = set()
         self.shared_secret = None
-        self.partitions = {}
-        self.partitions_ips = set()
         self.isImplicit = True
 
 
@@ -164,25 +174,12 @@
                 actual_ips.add(item)
         self.allowed_from_ips = actual_ips
 
-        for uri in self.partitions.values():
-            parsed_uri = urlparse.urlparse(uri)
-            try:
-                ips = getIPsFromHost(parsed_uri.hostname)
-            except socket.gaierror, e:
-                msg = "Unable to lookup ip-addr for partition '%s': %s" % (parsed_uri.hostname, str(e))
-                log.error(msg)
-                if ignoreIPLookupFailures:
-                    ips = ()
-                else:
-                    raise ValueError(msg)
-            self.partitions_ips.update(ips)
 
-
     def checkThisIP(self, ip):
         """
-        Check that the passed in IP address corresponds to this server or one of its partitions.
+        Check that the passed in IP address corresponds to this server.
         """
-        return (ip in self.ips) or (ip in self.partitions_ips)
+        return (ip in self.ips)
 
 
     def hasAllowedFromIP(self):
@@ -218,38 +215,13 @@
         return (SERVER_SECRET_HEADER, self.shared_secret,)
 
 
-    def addPartition(self, id, uri):
-        self.partitions[id] = uri
 
-
-    def getPartitionURIForId(self, id):
-        return self.partitions.get(id)
-
-
-    def isPartitioned(self):
-        return len(self.partitions) != 0
-
-
-    def installReverseProxies(self, ownUID, maxClients):
-
-        for partition, url in self.partitions.iteritems():
-            if partition != ownUID:
-                installPool(
-                    partition,
-                    url,
-                    maxClients,
-                )
-
-
-
 ELEMENT_SERVERS = "servers"
 ELEMENT_SERVER = "server"
 ELEMENT_ID = "id"
 ELEMENT_URI = "uri"
 ELEMENT_ALLOWED_FROM = "allowed-from"
 ELEMENT_SHARED_SECRET = "shared-secret"
-ELEMENT_PARTITIONS = "partitions"
-ELEMENT_PARTITION = "partition"
 ATTR_IMPLICIT = "implicit"
 ATTR_VALUE_YES = "yes"
 ATTR_VALUE_NO = "no"
@@ -286,39 +258,13 @@
                     server.allowed_from_ips.add(node.text)
                 elif node.tag == ELEMENT_SHARED_SECRET:
                     server.shared_secret = node.text
-                elif node.tag == ELEMENT_PARTITIONS:
-                    ServersParser._parsePartition(xmlFile, node, server)
                 else:
                     raise RuntimeError("Invalid element '%s' in servers file: '%s'" % (node.tag, xmlFile,))
 
             if server.id is None or server.uri is None:
-                raise RuntimeError("Invalid partition '%s' in servers file: '%s'" % (child.tag, xmlFile,))
+                raise RuntimeError("Invalid server '%s' in servers file: '%s'" % (child.tag, xmlFile,))
 
             server.check(ignoreIPLookupFailures=ignoreIPLookupFailures)
             results[server.id] = server
 
         return results
-
-
-    @staticmethod
-    def _parsePartition(xmlFile, partitions, server):
-
-        for child in partitions:
-
-            if child.tag != ELEMENT_PARTITION:
-                raise RuntimeError("Unknown partition type: '%s' in servers file: '%s'" % (child.tag, xmlFile,))
-
-            id = None
-            uri = None
-            for node in child:
-                if node.tag == ELEMENT_ID:
-                    id = node.text
-                elif node.tag == ELEMENT_URI:
-                    uri = node.text
-                else:
-                    raise RuntimeError("Invalid element '%s' in augment file: '%s'" % (node.tag, xmlFile,))
-
-            if id is None or uri is None:
-                raise RuntimeError("Invalid partition '%s' in servers file: '%s'" % (child.tag, xmlFile,))
-
-            server.addPartition(id, uri)

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/remoteservers.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/remoteservers.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/remoteservers.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -23,7 +23,7 @@
 
 """
 XML based iSchedule configuration file handling. This is for handling of remote servers. The localservers.py module
-handles servers that are local (partitioned or podded).
+handles servers that are local (podded).
 """
 
 __all__ = [
@@ -138,7 +138,7 @@
     """
     Contains server-to-server details.
     """
-    def __init__(self, uri=None):
+    def __init__(self, uri=None, unNormalizeAddresses=True, moreHeaders=[], podding=False):
         """
         @param recordType: record type for directory entry.
         """
@@ -148,8 +148,9 @@
         self.allow_to = True
         self.domains = []
         self.client_hosts = []
-        self.unNormalizeAddresses = True
-        self.moreHeaders = []
+        self.unNormalizeAddresses = unNormalizeAddresses
+        self.moreHeaders = moreHeaders
+        self._podding = podding
 
         if uri:
             self.uri = uri
@@ -160,6 +161,10 @@
         return (self.ssl, self.host, self.port, self.path,)
 
 
+    def podding(self):
+        return self._podding
+
+
     def redirect(self, location):
         """
         Permanent redirect for the lifetime of this record.

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/resource.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/resource.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/resource.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -52,7 +52,7 @@
     Extends L{DAVResource} to provide iSchedule inbox functionality.
     """
 
-    def __init__(self, parent, store):
+    def __init__(self, parent, store, podding=False):
         """
         @param parent: the parent resource of this one.
         """
@@ -62,6 +62,7 @@
 
         self.parent = parent
         self._newStore = store
+        self._podding = podding
 
 
     def deadProperties(self):
@@ -109,12 +110,12 @@
     def render(self, request):
         output = """<html>
 <head>
-<title>Server To Server Inbox Resource</title>
+<title>%(rtype)s Inbox Resource</title>
 </head>
 <body>
-<h1>Server To Server Inbox Resource.</h1>
+<h1>%(rtype)s Inbox Resource.</h1>
 </body
-</html>"""
+</html>""" % {"rtype" : "Podding" if self._podding else "iSchedule", }
 
         response = Response(200, {}, output)
         response.headers.setHeader("content-type", MimeType("text", "html"))
@@ -126,7 +127,7 @@
         The iSchedule GET method.
         """
 
-        if not request.args:
+        if not request.args or self._podding:
             # Do normal GET behavior
             return self.render(request)
 
@@ -229,7 +230,7 @@
         txn = transactionFromRequest(request, self._newStore)
 
         # This is a server-to-server scheduling operation.
-        scheduler = IScheduleScheduler(txn, None)
+        scheduler = IScheduleScheduler(txn, None, podding=self._podding)
 
         # Check content first
         contentType = request.headers.getHeader("content-type")
@@ -259,7 +260,8 @@
         else:
             yield txn.commit()
         response = result.response(format=format)
-        response.headers.addRawHeader(ISCHEDULE_CAPABILITIES, str(config.Scheduling.iSchedule.SerialNumber))
+        if not self._podding:
+            response.headers.addRawHeader(ISCHEDULE_CAPABILITIES, str(config.Scheduling.iSchedule.SerialNumber))
         returnValue(response)
 
 

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/scheduler.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/scheduler.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/scheduler.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -119,6 +119,9 @@
 
 
 class IScheduleScheduler(RemoteScheduler):
+    """
+    Handles iSchedule and podding requests.
+    """
 
     scheduleResponse = IScheduleResponseQueue
 
@@ -138,6 +141,11 @@
         "max-recipients": (ischedule_namespace, "max-recipients"),
     }
 
+    def __init__(self, txn, originator_uid, logItems=None, noAttendeeRefresh=False, podding=False):
+        super(IScheduleScheduler, self).__init__(txn, originator_uid, logItems=logItems, noAttendeeRefresh=noAttendeeRefresh)
+        self._podding = podding
+
+
     @inlineCallbacks
     def doSchedulingViaPOST(self, remoteAddr, headers, body, calendar, originator, recipients):
         """
@@ -148,7 +156,7 @@
         self.headers = headers
         self.verified = False
 
-        if config.Scheduling.iSchedule.DKIM.Enabled:
+        if not self._podding and config.Scheduling.iSchedule.DKIM.Enabled:
             verifier = DKIMVerifier(self.headers, body, protocol_debug=config.Scheduling.iSchedule.DKIM.ProtocolDebug)
             try:
                 yield verifier.verify()
@@ -172,11 +180,16 @@
                     msg,
                 ))
 
-        if self.headers.getRawHeaders('x-calendarserver-itip-refreshonly', ("F"))[0] == "T":
+        if self._podding and self.headers.getRawHeaders('x-calendarserver-itip-refreshonly', ("F"))[0] == "T":
             self.txn.doing_attendee_refresh = 1
 
         # Normalize recipient addresses
-        recipients = [normalizeCUAddress(recipient, normalizationLookup, self.txn.directoryService().recordWithCalendarUserAddress) for recipient in recipients]
+        results = []
+        for recipient in recipients:
+            normalized = normalizeCUAddress(recipient, normalizationLookup, self.txn.directoryService().recordWithCalendarUserAddress)
+            self.recipientsNormalizationMap[normalized] = recipient
+            results.append(normalized)
+        recipients = results
 
         result = (yield super(IScheduleScheduler, self).doSchedulingViaPOST(originator, recipients, calendar))
         returnValue(result)
@@ -216,7 +229,7 @@
         originatorPrincipal = self.txn.directoryService().recordWithCalendarUserAddress(self.originator)
         localUser = (yield addressmapping.mapper.isCalendarUserInMyDomain(self.originator))
         if originatorPrincipal or localUser:
-            if originatorPrincipal.locallyHosted():
+            if originatorPrincipal.thisServer():
                 log.error("Cannot use originator that is on this server: %s" % (self.originator,))
                 raise HTTPError(self.errorResponse(
                     responsecode.FORBIDDEN,
@@ -294,23 +307,17 @@
 
     def _validAlternateServer(self, principal):
         """
-        Check the validity of the partitioned host.
+        Check the validity of the podded host.
         """
 
-        # Extract expected host/port. This will be the partitionURI, or if no partitions,
-        # the serverURI
-        expected_uri = principal.partitionURI()
-        if expected_uri is None:
-            expected_uri = principal.serverURI()
+        # Extract expected host/port. This will be the serverURI.
+        expected_uri = principal.serverURI()
         expected_uri = urlparse.urlparse(expected_uri)
 
         # Get the request IP and map to hostname.
         clientip = self.remoteAddr.host
 
-        # Check against this server (or any of its partitions). We need this because an external iTIP message
-        # may be addressed to users on different partitions, and the node receiving the iTIP message will need to
-        # forward it to the partition nodes, thus the client ip seen by the partitions will in fact be the initial
-        # receiving node.
+        # Check against this server.
         matched = False
         if Servers.getThisServer().checkThisIP(clientip):
             matched = True
@@ -362,7 +369,7 @@
         if organizer:
             organizerPrincipal = self.txn.directoryService().recordWithCalendarUserAddress(organizer)
             if organizerPrincipal:
-                if organizerPrincipal.locallyHosted():
+                if organizerPrincipal.thisServer():
                     log.error("Invalid ORGANIZER in calendar data: %s" % (self.calendar,))
                     raise HTTPError(self.errorResponse(
                         responsecode.FORBIDDEN,
@@ -370,7 +377,7 @@
                         "Organizer is not local to server",
                     ))
                 else:
-                    # Check that the origin server is the correct partition
+                    # Check that the origin server is the correct pod
                     self.organizer = calendarUserFromPrincipal(organizer, organizerPrincipal)
                     self._validAlternateServer(self.organizer.principal)
             else:
@@ -403,7 +410,7 @@
         # Attendee cannot be local.
         attendeePrincipal = self.txn.directoryService().recordWithCalendarUserAddress(self.attendee)
         if attendeePrincipal:
-            if attendeePrincipal.locallyHosted():
+            if attendeePrincipal.thisServer():
                 log.error("Invalid ATTENDEE in calendar data: %s" % (self.calendar,))
                 raise HTTPError(self.errorResponse(
                     responsecode.FORBIDDEN,

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/test/test_delivery.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/test/test_delivery.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/test/test_delivery.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -42,6 +42,7 @@
         Make sure we do an exact comparison on EmailDomain
         """
 
+        self.patch(config.Scheduling.iSchedule, "Enabled", True)
         self.patch(config.Scheduling.iSchedule, "RemoteServers", "")
 
         # Only mailtos:
@@ -64,3 +65,9 @@
         self.assertFalse(result)
         result = yield ScheduleViaISchedule.matchCalendarUserAddress("mailto:user")
         self.assertFalse(result)
+
+        # Test when not enabled
+        ScheduleViaISchedule.domainServerMap = {}
+        self.patch(config.Scheduling.iSchedule, "Enabled", False)
+        result = yield ScheduleViaISchedule.matchCalendarUserAddress("mailto:user at example.com")
+        self.assertFalse(result)

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/test/test_localservers.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/test/test_localservers.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/ischedule/test/test_localservers.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -37,16 +37,6 @@
   <server>
     <id>00002</id>
     <uri>https://caldav2.example.com:8843</uri>
-    <partitions>
-        <partition>
-            <id>A</id>
-            <uri>https://machine1.example.com:8443</uri>
-        </partition>
-        <partition>
-            <id>B</id>
-            <uri>https://machine2.example.com:8443</uri>
-        </partition>
-    </partitions>
   </server>
 </servers>
 """
@@ -62,16 +52,6 @@
   <server>
     <id>00002</id>
     <uri>https://caldav2.example.com:8843</uri>
-    <partitions>
-        <partition>
-            <id>A</id>
-            <uri>https://machine1.example.com:8443</uri>
-        </partition>
-        <partition>
-            <id>B</id>
-            <uri>https://machine2.example.com:8443</uri>
-        </partition>
-    </partitions>
   </server>
 </servers>
 """
@@ -103,13 +83,7 @@
         self.assertEqual(servers.getServerById("00001").shared_secret, "foobar")
         self.assertEqual(servers.getServerById("00002").shared_secret, None)
 
-        self.assertEqual(len(servers.getServerById("00001").partitions), 0)
-        self.assertEqual(len(servers.getServerById("00002").partitions), 2)
 
-        self.assertEqual(servers.getServerById("00002").getPartitionURIForId("A"), "https://machine1.example.com:8443")
-        self.assertEqual(servers.getServerById("00002").getPartitionURIForId("B"), "https://machine2.example.com:8443")
-
-
     def test_this_server(self):
 
         servers = self._setupServers()
@@ -129,14 +103,6 @@
         self.assertTrue(servers.getServerById("00002").thisServer)
 
 
-    def test_check_is_partitioned(self):
-
-        servers = self._setupServers()
-
-        self.assertFalse(servers.getServerById("00001").isPartitioned())
-        self.assertTrue(servers.getServerById("00002").isPartitioned())
-
-
     def test_check_this_ip(self):
 
         servers = self._setupServers()

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/itip.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/itip.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/itip.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -476,16 +476,9 @@
                 pass
 
             elif attendee_comment is None and private_comment is not None:
-                # Remove all property parameters
-                private_comment.removeAllParameters()
+                # We now remove the private comment on the organizer's side if the attendee removed it
+                to_component.removeProperty(private_comment)
 
-                # Add default parameters
-                private_comment.setParameter("X-CALENDARSERVER-ATTENDEE-REF", attendee.value())
-                private_comment.setParameter("X-CALENDARSERVER-DTSTAMP", DateTime.getNowUTC().getText())
-
-                # Set value empty
-                private_comment.setValue("")
-
                 private_comment_changed = True
 
             elif attendee_comment is not None and private_comment is None:

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/scheduler.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/scheduler.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/scheduler.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -38,7 +38,6 @@
 from txdav.caldav.datastore.scheduling.cuaddress import LocalCalendarUser
 from txdav.caldav.datastore.scheduling.cuaddress import RemoteCalendarUser
 from txdav.caldav.datastore.scheduling.cuaddress import EmailCalendarUser
-from txdav.caldav.datastore.scheduling.cuaddress import PartitionedCalendarUser
 from txdav.caldav.datastore.scheduling.imip.delivery import ScheduleViaIMip
 from txdav.caldav.datastore.scheduling.ischedule.delivery import ScheduleViaISchedule
 from txdav.caldav.datastore.scheduling.itip import iTIPRequestStatus
@@ -52,8 +51,8 @@
 This module handles the delivery of scheduling messages to organizer and attendees. The basic idea is to first
 confirm the integrity of the incoming scheduling message, check authorization. Appropriate L{DeliveryService}s
 are then used to deliver the message to attendees or organizer. Delivery responses are processed and returned.
-This takes into account partitioning and podding of users by detecting the appropriate host for a calendar
-user and then dispatching the delivery accordingly.
+This takes into account podding of users by detecting the appropriate host for a calendar user and then
+dispatching the delivery accordingly.
 
 The L{Scheduler} class defines the basic behavior for processing deliveries. Sub-classes are defined for the
 different ways a deliver can be triggered.
@@ -144,6 +143,7 @@
 
         self.originator = None
         self.recipients = None
+        self.recipientsNormalizationMap = {}
         self.calendar = None
         self.organizer = None
         self.attendee = None
@@ -234,51 +234,6 @@
         returnValue(result)
 
 
-    @inlineCallbacks
-    def loadFromRequestData(self):
-        self.loadOriginatorFromRequestDetails()
-        self.loadRecipientsFromCalendarData()
-
-
-    def loadOriginatorFromRequestDetails(self):
-        # Get the originator who is the authenticated user
-        originatorPrincipal = self.txn.directoryService().recordWithUID(self.originator_uid)
-
-        # Pick the canonical CUA:
-        originator = originatorPrincipal.canonicalCalendarUserAddress() if originatorPrincipal else ""
-
-        if not originator:
-            log.error("%s request must have Originator" % (self.method,))
-            raise HTTPError(self.errorResponse(
-                responsecode.FORBIDDEN,
-                self.errorElements["originator-missing"],
-                "Missing originator",
-            ))
-        else:
-            self.originator = originator
-
-
-    def loadRecipientsFromCalendarData(self):
-
-        # Get the ATTENDEEs
-        attendees = list()
-        unique_set = set()
-        for attendee, _ignore in self.calendar.getAttendeesByInstance():
-            if attendee not in unique_set:
-                attendees.append(attendee)
-                unique_set.add(attendee)
-
-        if not attendees:
-            log.error("%s request must have at least one Recipient" % (self.method,))
-            raise HTTPError(self.errorResponse(
-                responsecode.FORBIDDEN,
-                self.errorElements["recipient-missing"],
-                "Must have recipients",
-            ))
-        else:
-            self.recipients = list(attendees)
-
-
     def preProcessCalendarData(self):
         """
         After loading calendar data from the request, do some optional processing of it. This method will be
@@ -476,11 +431,10 @@
         freebusy = self.checkForFreeBusy()
 
         # Prepare for multiple responses
-        responses = self.scheduleResponse(self.method, responsecode.OK)
+        responses = self.scheduleResponse(self.method, responsecode.OK, self.mapRecipientAddress)
 
         # Loop over each recipient and aggregate into lists by service types.
         caldav_recipients = []
-        partitioned_recipients = []
         otherserver_recipients = []
         remote_recipients = []
         imip_recipients = []
@@ -502,9 +456,6 @@
             elif isinstance(recipient, LocalCalendarUser):
                 caldav_recipients.append(recipient)
 
-            elif isinstance(recipient, PartitionedCalendarUser):
-                partitioned_recipients.append(recipient)
-
             elif isinstance(recipient, OtherServerCalendarUser):
                 otherserver_recipients.append(recipient)
 
@@ -526,10 +477,6 @@
         if caldav_recipients:
             yield self.generateLocalSchedulingResponses(caldav_recipients, responses, freebusy)
 
-        # Now process partitioned recipients
-        if partitioned_recipients:
-            yield self.generateRemoteSchedulingResponses(partitioned_recipients, responses, freebusy, getattr(self.txn, 'doing_attendee_refresh', False))
-
         # Now process other server recipients
         if otherserver_recipients:
             yield self.generateRemoteSchedulingResponses(otherserver_recipients, responses, freebusy, getattr(self.txn, 'doing_attendee_refresh', False))
@@ -579,7 +526,11 @@
         return requestor.generateSchedulingResponses()
 
 
+    def mapRecipientAddress(self, cuaddr):
+        return self.recipientsNormalizationMap.get(cuaddr, cuaddr)
 
+
+
 class RemoteScheduler(Scheduler):
 
     def checkOrganizer(self):
@@ -614,8 +565,8 @@
             else:
                 # Map recipient to their inbox
                 inbox = None
-                if principal.calendarsEnabled() and principal.thisServer():
-                    if principal.locallyHosted():
+                if principal.calendarsEnabled():
+                    if principal.thisServer():
                         recipient_home = yield self.txn.calendarHomeWithUID(principal.uid, create=True)
                         if recipient_home:
                             inbox = (yield recipient_home.calendarWithName("inbox"))
@@ -710,7 +661,7 @@
         ["recipient", "reqstatus", "calendar", "error", "message", ]
     )
 
-    def __init__(self, method, success_response):
+    def __init__(self, method, success_response, recipient_mapper=None):
         """
         @param method: the name of the method generating the queue.
         @param success_response: the response to return in lieu of a
@@ -719,6 +670,7 @@
         self.responses = []
         self.method = method
         self.success_response = success_response
+        self.recipient_mapper = recipient_mapper
         self.location = None
 
 
@@ -752,6 +704,9 @@
         else:
             raise AssertionError("Unknown data type: %r" % (what,))
 
+        if self.recipient_mapper is not None:
+            recipient = self.recipient_mapper(recipient)
+
         if not suppressErrorLog and code > 400: # Error codes only
             self.log.error("Error during %s for %s: %s" % (self.method, recipient, message))
 

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/utils.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/utils.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/utils.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -21,7 +21,7 @@
 log = Logger()
 
 @inlineCallbacks
-def getCalendarObjectForRecord(txn, record, uid, allow_shared=False):
+def getCalendarObjectForRecord(txn, record, uid):
     """
     Get a copy of the event for a calendar user identified by a directory record.
 
@@ -29,12 +29,12 @@
     one of them to avoid scheduling problems.
     """
 
-    if record and record.locallyHosted():
+    if record and record.thisServer():
         # Get record's calendar-home
         calendar_home = yield txn.calendarHomeWithUID(record.uid)
 
         # Get matching newstore objects
-        objectResources = (yield calendar_home.getCalendarResourcesForUID(uid, allow_shared))
+        objectResources = (yield calendar_home.getCalendarResourcesForUID(uid))
 
         if len(objectResources) > 1:
             # Delete all but the first one

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/sql.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/sql.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -546,9 +546,7 @@
         # refer to calendar *object* UIDs, since calendar *resources* are an
         # HTTP protocol layer thing, not a data store thing.  (See also
         # objectResourcesWithUID.)
-        objectResources = (
-            yield self.objectResourcesWithUID(uid, ["inbox"], False)
-        )
+        objectResources = (yield self.getCalendarResourcesForUID(uid))
         for objectResource in objectResources:
             if ok_object and objectResource._resourceID == ok_object._resourceID:
                 continue
@@ -560,15 +558,22 @@
 
 
     @inlineCallbacks
-    def getCalendarResourcesForUID(self, uid, allow_shared=False):
+    def getCalendarResourcesForUID(self, uid):
+        """
+        Find all calendar object resources in the calendar home that are not in the "inbox" collection
+        and not in shared collections.
+        Cache the result of this query as it can happen multiple times during scheduling under slightly
+        different circumstances.
 
-        results = []
-        objectResources = (yield self.objectResourcesWithUID(uid, ["inbox"]))
-        for objectResource in objectResources:
-            if allow_shared or objectResource._parentCollection.owned():
-                results.append(objectResource)
+        @param uid: the UID of the calendar object resources to find
+        @type uid: C{str}
+        """
 
-        returnValue(results)
+        if not hasattr(self, "_cachedCalendarResourcesForUID"):
+            self._cachedCalendarResourcesForUID = {}
+        if uid not in self._cachedCalendarResourcesForUID:
+            self._cachedCalendarResourcesForUID[uid] = (yield self.objectResourcesWithUID(uid, ["inbox"], allowShared=False))
+        returnValue(self._cachedCalendarResourcesForUID[uid])
 
 
     @inlineCallbacks
@@ -1566,10 +1571,6 @@
                 if calsize > config.MaxResourceSize:
                     raise ObjectResourceTooBigError()
 
-        # Possible timezone stripping
-        if config.EnableTimezonesByReference:
-            component.stripKnownTimezones()
-
         # Do validation on external requests
         if internal_state == ComponentUpdateState.NORMAL:
 
@@ -1587,6 +1588,10 @@
             # calendar data
             component.normalizeCalendarUserAddresses(normalizationLookup, self.directoryService().recordWithCalendarUserAddress)
 
+        # Possible timezone stripping
+        if config.EnableTimezonesByReference:
+            component.stripKnownTimezones()
+
         # Check location/resource organizer requirement
         self.validLocationResourceOrganizer(component, inserting, internal_state)
 
@@ -1721,20 +1726,23 @@
 
         NB Do this before implicit scheduling as we don't want old clients to trigger scheduling when
         the X- property is missing.
+
+        We now only preserve the "X-CALENDARSERVER-ATTENDEE-COMMENT" property. We will now allow clients
+        to delete the "X-CALENDARSERVER-PRIVATE-COMMENT" and treat that as a removal of the attendee
+        comment (which will trigger scheduling with the organizer to remove the comment on the organizer's
+        side).
         """
         if config.Scheduling.CalDAV.get("EnablePrivateComments", True):
             old_has_private_comments = not inserting and self.hasPrivateComment
             new_has_private_comments = component.hasPropertyInAnyComponent((
-                "X-CALENDARSERVER-PRIVATE-COMMENT",
                 "X-CALENDARSERVER-ATTENDEE-COMMENT",
             ))
 
             if old_has_private_comments and not new_has_private_comments:
                 # Transfer old comments to new calendar
-                log.debug("Private Comments properties were entirely removed by the client. Restoring existing properties.")
+                log.debug("Organizer private comment properties were entirely removed by the client. Restoring existing properties.")
                 old_calendar = (yield self.componentForUser())
                 component.transferProperties(old_calendar, (
-                    "X-CALENDARSERVER-PRIVATE-COMMENT",
                     "X-CALENDARSERVER-ATTENDEE-COMMENT",
                 ))
 
@@ -1943,7 +1951,7 @@
                 user_uuid = self._parentCollection.viewerHome().uid()
                 component = PerUserDataFilter(user_uuid).filter(component.duplicate())
 
-            scheduler = ImplicitScheduler()
+            scheduler = ImplicitScheduler(logItems=self._txn.logItems)
 
             # PUT
             do_implicit_action, is_scheduling_resource = (yield scheduler.testImplicitSchedulingPUT(
@@ -2600,7 +2608,7 @@
         if not isinbox and internal_state == ComponentRemoveState.NORMAL:
             # Get data we need for implicit scheduling
             calendar = (yield self.componentForUser())
-            scheduler = ImplicitScheduler()
+            scheduler = ImplicitScheduler(logItems=self._txn.logItems)
             do_implicit_action, _ignore = (yield scheduler.testImplicitSchedulingDELETE(
                 self.calendar(),
                 self,
@@ -2919,7 +2927,7 @@
 
         # Only allow organizers to manipulate managed attachments for now
         calendar = (yield self.componentForUser())
-        scheduler = ImplicitScheduler()
+        scheduler = ImplicitScheduler(logItems=self._txn.logItems)
         is_attendee = (yield scheduler.testAttendeeEvent(self.calendar(), self, calendar,))
         if is_attendee:
             raise InvalidAttachmentOperation("Attendees are not allowed to manipulate managed attachments")

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/test/common.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/test/common.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/test/common.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -84,73 +84,75 @@
 
 OTHER_HOME_UID = "home_splits"
 
-test_event_text = (
-    "BEGIN:VCALENDAR\r\n"
-      "VERSION:2.0\r\n"
-      "PRODID:-//Apple Inc.//iCal 4.0.1//EN\r\n"
-      "CALSCALE:GREGORIAN\r\n"
-      "BEGIN:VTIMEZONE\r\n"
-        "TZID:US/Pacific\r\n"
-        "BEGIN:DAYLIGHT\r\n"
-          "TZOFFSETFROM:-0800\r\n"
-          "RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU\r\n"
-          "DTSTART:20070311T020000\r\n"
-          "TZNAME:PDT\r\n"
-          "TZOFFSETTO:-0700\r\n"
-        "END:DAYLIGHT\r\n"
-        "BEGIN:STANDARD\r\n"
-          "TZOFFSETFROM:-0700\r\n"
-          "RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU\r\n"
-          "DTSTART:20071104T020000\r\n"
-          "TZNAME:PST\r\n"
-          "TZOFFSETTO:-0800\r\n"
-        "END:STANDARD\r\n"
-      "END:VTIMEZONE\r\n"
-      "BEGIN:VEVENT\r\n"
-        "CREATED:20100203T013849Z\r\n"
-        "UID:uid-test\r\n"
-        "DTEND;TZID=US/Pacific:20100207T173000\r\n"
-        "TRANSP:OPAQUE\r\n"
-        "SUMMARY:New Event\r\n"
-        "DTSTART;TZID=US/Pacific:20100207T170000\r\n"
-        "DTSTAMP:20100203T013909Z\r\n"
-        "SEQUENCE:3\r\n"
-        "X-APPLE-DROPBOX:/calendars/users/wsanchez/dropbox/uid-test.dropbox\r\n"
-        "BEGIN:VALARM\r\n"
-          "X-WR-ALARMUID:1377CCC7-F85C-4610-8583-9513D4B364E1\r\n"
-          "TRIGGER:-PT20M\r\n"
-          "ATTACH:Basso\r\n"
-          "ACTION:AUDIO\r\n"
-        "END:VALARM\r\n"
-      "END:VEVENT\r\n"
-    "END:VCALENDAR\r\n"
-)
+test_event_text = """BEGIN:VCALENDAR
+VERSION:2.0
+PRODID:-//Apple Inc.//iCal 4.0.1//EN
+CALSCALE:GREGORIAN
+BEGIN:VTIMEZONE
+TZID:US/Pacific
+BEGIN:DAYLIGHT
+TZOFFSETFROM:-0800
+RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU
+DTSTART:20070311T020000
+TZNAME:PDT
+TZOFFSETTO:-0700
+END:DAYLIGHT
+BEGIN:STANDARD
+TZOFFSETFROM:-0700
+RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU
+DTSTART:20071104T020000
+TZNAME:PST
+TZOFFSETTO:-0800
+END:STANDARD
+END:VTIMEZONE
+BEGIN:VEVENT
+CREATED:20100203T013849Z
+UID:uid-test
+DTEND;TZID=US/Pacific:20100207T173000
+TRANSP:OPAQUE
+SUMMARY:New Event
+DTSTART;TZID=US/Pacific:20100207T170000
+DTSTAMP:20100203T013909Z
+SEQUENCE:3
+X-APPLE-DROPBOX:/calendars/users/wsanchez/dropbox/uid-test.dropbox
+BEGIN:VALARM
+X-WR-ALARMUID:1377CCC7-F85C-4610-8583-9513D4B364E1
+TRIGGER:-PT20M
+ATTACH:Basso
+ACTION:AUDIO
+END:VALARM
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n")
 
 
 
-test_event_notCalDAV_text = (
-    "BEGIN:VCALENDAR\r\n"
-      "VERSION:2.0\r\n"
-      "PRODID:-//Apple Inc.//iCal 4.0.1//EN\r\n"
-      "CALSCALE:GREGORIAN\r\n"
-      "BEGIN:VEVENT\r\n"
-        "CREATED:20100203T013849Z\r\n"
-        "UID:test\r\n"
-        "DTEND;TZID=US/Pacific:20100207T173000\r\n" # TZID without VTIMEZONE
-        "TRANSP:OPAQUE\r\n"
-        "SUMMARY:New Event\r\n"
-        "DTSTART;TZID=US/Pacific:20100207T170000\r\n"
-        "DTSTAMP:20100203T013909Z\r\n"
-        "SEQUENCE:3\r\n"
-        "BEGIN:VALARM\r\n"
-          "X-WR-ALARMUID:1377CCC7-F85C-4610-8583-9513D4B364E1\r\n"
-          "TRIGGER:-PT20M\r\n"
-          "ATTACH:Basso\r\n"
-          "ACTION:AUDIO\r\n"
-        "END:VALARM\r\n"
-      "END:VEVENT\r\n"
-    "END:VCALENDAR\r\n"
-)
+test_event_notCalDAV_text = """BEGIN:VCALENDAR
+VERSION:2.0
+PRODID:-//Apple Inc.//iCal 4.0.1//EN
+CALSCALE:GREGORIAN
+BEGIN:VEVENT
+CREATED:20100203T013849Z
+UID:test-bad1
+DTEND:20100207T173000Z
+TRANSP:OPAQUE
+SUMMARY:New Event
+DTSTART:20100207T170000Z
+DTSTAMP:20100203T013909Z
+SEQUENCE:3
+END:VEVENT
+BEGIN:VEVENT
+CREATED:20100203T013849Z
+UID:test-bad2
+DTEND:20100207T173000Z
+TRANSP:OPAQUE
+SUMMARY:New Event
+DTSTART:20100207T170000Z
+DTSTAMP:20100203T013909Z
+SEQUENCE:3
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n")
 
 
 
@@ -450,9 +452,7 @@
         yield notifications.writeNotificationObject("abc", inviteNotification,
             inviteNotification.toxml())
 
-        yield self.commit()
-
-        # Make sure notification fired after commit
+        # notify is called prior to commit
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -460,6 +460,7 @@
                 "/CalDAV/example.com/home1/notification/",
             ])
         )
+        yield self.commit()
 
         notifications = yield self.transactionUnderTest().notificationsWithUID(
             "home1"
@@ -469,9 +470,7 @@
         abc = yield notifications.notificationObjectWithUID("abc")
         self.assertEquals(abc, None)
 
-        yield self.commit()
-
-        # Make sure notification fired after commit
+        # notify is called prior to commit
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -479,6 +478,7 @@
                 "/CalDAV/example.com/home1/notification/",
             ])
         )
+        yield self.commit()
 
 
     @inlineCallbacks
@@ -697,11 +697,10 @@
         self.assertNotIdentical((yield home.calendarWithName(name)), None)
         calendarProperties = (yield home.calendarWithName(name)).properties()
         self.assertEqual(len(calendarProperties), 0)
+        # notify is called prior to commit
+        self.assertTrue("/CalDAV/example.com/home1/" in self.notifierFactory.history)
         yield self.commit()
 
-        # Make sure notification fired after commit
-        self.assertTrue("/CalDAV/example.com/home1/" in self.notifierFactory.history)
-
         # Make sure it's available in a new transaction; i.e. test the commit.
         home = yield self.homeUnderTest()
         self.assertNotIdentical((yield home.calendarWithName(name)), None)
@@ -915,8 +914,7 @@
                 None
             )
 
-        # Make sure notifications are fired after commit
-        yield self.commit()
+        # notify is called prior to commit
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -924,6 +922,7 @@
                 "/CalDAV/example.com/home1/calendar_1/",
             ])
         )
+        yield self.commit()
 
 
     @inlineCallbacks
@@ -1471,9 +1470,7 @@
         self.assertEquals((yield calendarObject.componentForUser()), component)
         self.assertEquals((yield calendarObject.getMetadata()), metadata)
 
-        yield self.commit()
-
-        # Make sure notifications fire after commit
+        # notify is called prior to commit
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -1481,6 +1478,7 @@
                 "/CalDAV/example.com/home1/calendar_1/",
             ])
         )
+        yield self.commit()
 
 
     @inlineCallbacks
@@ -1591,9 +1589,7 @@
         calendarObject = yield calendar1.calendarObjectWithName("1.ics")
         self.assertEquals((yield calendarObject.componentForUser()), component)
 
-        yield self.commit()
-
-        # Make sure notification fired after commit
+        # notify is called prior to commit
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -1601,6 +1597,7 @@
                 "/CalDAV/example.com/home1/calendar_1/",
             ])
         )
+        yield self.commit()
 
 
     def checkPropertiesMethod(self, thunk):

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/test/test_implicit.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/test/test_implicit.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/test/test_implicit.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -478,9 +478,9 @@
 
 
     @inlineCallbacks
-    def test_validation_preservePrivateComments(self):
+    def test_validation_noPreservePrivateComments(self):
         """
-        Test that resource private comments are restored.
+        Test that attendee private comments are no longer restored.
         """
 
         data1 = """BEGIN:VCALENDAR
@@ -524,12 +524,65 @@
         calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
         calendar1 = (yield calendar_resource.component())
         calendar1 = str(calendar1).replace("\r\n ", "")
-        self.assertTrue("X-CALENDARSERVER-PRIVATE-COMMENT:My Comment" in calendar1)
+        self.assertFalse("X-CALENDARSERVER-PRIVATE-COMMENT:My Comment" in calendar1)
         self.assertTrue("SUMMARY:Changed" in calendar1)
         yield self.commit()
 
 
     @inlineCallbacks
+    def test_validation_preserveOrganizerPrivateComments(self):
+        """
+        Test that organizer private comments are restored.
+        """
+
+        data1 = """BEGIN:VCALENDAR
+VERSION:2.0
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:12345-67890-organizer
+DTSTAMP:20080601T120000Z
+DTSTART:20080601T120000Z
+DTEND:20080601T130000Z
+X-CALENDARSERVER-ATTENDEE-COMMENT;X-CALENDARSERVER-ATTENDEE-REF="urn:uuid:user01";
+ X-CALENDARSERVER-DTSTAMP=20131101T100000Z:Someone else's comment
+END:VEVENT
+END:VCALENDAR
+"""
+
+        calendar_collection = (yield self.calendarUnderTest(home="user01"))
+        calendar = Component.fromString(data1)
+        yield calendar_collection.createCalendarObjectWithName("test.ics", calendar)
+        yield self.commit()
+
+        data2 = """BEGIN:VCALENDAR
+VERSION:2.0
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:12345-67890-organizer
+DTSTAMP:20080601T120000Z
+DTSTART:20080601T120000Z
+DTEND:20080601T130000Z
+SUMMARY:Changed
+END:VEVENT
+END:VCALENDAR
+"""
+
+        calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
+        calendar = Component.fromString(data2)
+        txn = self.transactionUnderTest()
+        txn._authz_uid = "user01"
+        yield calendar_resource.setComponent(calendar)
+        yield self.commit()
+
+        calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
+        calendar1 = (yield calendar_resource.component())
+        calendar1 = str(calendar1).replace("\r\n ", "")
+        self.assertTrue("X-CALENDARSERVER-ATTENDEE-COMMENT;X-CALENDARSERVER-ATTENDEE-REF=\"urn:uuid:user01\";X-CALENDARSERVER-DTSTAMP=20131101T100000Z:Someone else's comment" in calendar1)
+        self.assertTrue("SUMMARY:Changed" in calendar1)
+        yield self.commit()
+
+
+    @inlineCallbacks
     def test_validation_replaceMissingToDoProperties_OrganizerAttendee(self):
         """
         Test that missing scheduling properties in VTODOs are recovered.

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/test/test_util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/test/test_util.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/test/test_util.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -526,16 +526,25 @@
                 "different-name": self.sampleEvent("other-uid", "tgt other"),
             },
         )
+
         txn = self.transactionUnderTest()
-        c1 = yield txn.calendarHomeWithUID("conflict1")
         c2 = yield txn.calendarHomeWithUID("conflict2")
         otherCal = yield c2.createCalendarWithName("othercal")
-        otherCal.createCalendarObjectWithName(
+        yield otherCal.createCalendarObjectWithName(
             "some-name", Component.fromString(
                 self.sampleEvent("oc", "target calendar")[0]
             )
         )
+        yield self.commit()
+
+        txn = self.transactionUnderTest()
+        c1 = yield txn.calendarHomeWithUID("conflict1")
+        c2 = yield txn.calendarHomeWithUID("conflict2")
         yield migrateHome(c1, c2, merge=True)
+        yield self.commit()
+
+        txn = self.transactionUnderTest()
+        c2 = yield txn.calendarHomeWithUID("conflict2")
         targetCal = yield c2.calendarWithName("conflicted")
         yield self.checkSummary("same-name", "target", targetCal)
         yield self.checkSummary("different-name", "tgt other", targetCal)

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/test/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/test/util.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/test/util.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -61,7 +61,6 @@
         fullName,
         calendarUserAddresses,
         cutype="INDIVIDUAL",
-        locallyHosted=True,
         thisServer=True,
     ):
 
@@ -72,7 +71,6 @@
         self.displayName = self.fullName if self.fullName else self.shortNames[0]
         self.calendarUserAddresses = calendarUserAddresses
         self.cutype = cutype
-        self._locallyHosted = locallyHosted
         self._thisServer = thisServer
 
 
@@ -92,10 +90,6 @@
         return cua
 
 
-    def locallyHosted(self):
-        return self._locallyHosted
-
-
     def thisServer(self):
         return self._thisServer
 

Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/icalendardirectoryservice.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/icalendardirectoryservice.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/icalendardirectoryservice.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -60,18 +60,9 @@
         @rtype: C{str}
         """
 
-    def locallyHosted(): #@NoSelf
-        """
-        Indicates whether the record is host on this specific server "pod".
-
-        @return: C{True} if locally hosted.
-        @rtype: C{bool}
-        """
-
     def thisServer(): #@NoSelf
         """
-        Indicates whether the record is hosted on this server or another "pod"
-        that hosts the same directory service.
+        Indicates whether the record is hosted on this server "pod".
 
         @return: C{True} if hosted by this service.
         @rtype: C{bool}

Modified: CalendarServer/branches/users/cdaboo/json/txdav/carddav/datastore/test/common.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/carddav/datastore/test/common.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/carddav/datastore/test/common.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -371,11 +371,10 @@
         #self.assertIdentical((yield home.addressbookWithName(name)), None)
         yield home.removeAddressBookWithName(name)
         self.assertNotIdentical((yield home.addressbookWithName(name)), None)
+        # notify is called prior to commit
+        self.assertTrue("/CardDAV/example.com/home1/" in self.notifierFactory.history)
         yield self.commit()
 
-        # Make sure notification fired after commit
-        self.assertTrue("/CardDAV/example.com/home1/" in self.notifierFactory.history)
-
         # Make sure it's available in a new transaction; i.e. test the commit.
         home = yield self.homeUnderTest()
         self.assertNotIdentical((yield home.addressbookWithName(name)), None)
@@ -396,9 +395,7 @@
             ab = yield home.addressbookWithName(name)
             self.assertEquals((yield ab.listAddressBookObjects()), [])
 
-        yield self.commit()
-
-        # Make sure notification fired after commit
+        # notify is called prior to commit
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -407,7 +404,9 @@
             ])
         )
 
+        yield self.commit()
 
+
     @inlineCallbacks
     def test_removeAddressBookWithName_absent(self):
         """
@@ -530,8 +529,6 @@
                 (yield addressbook.addressbookObjectWithName(name)), None
             )
 
-        # Make sure notifications are fired after commit
-        yield self.commit()
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -692,9 +689,7 @@
         addressbookObject = yield addressbook1.addressbookObjectWithName(name)
         self.assertEquals((yield addressbookObject.component()), component)
 
-        yield self.commit()
-
-        # Make sure notifications fire after commit
+        # notify is called prior to commit
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -703,7 +698,9 @@
             ])
         )
 
+        yield self.commit()
 
+
     @inlineCallbacks
     def test_createAddressBookObjectWithName_exists(self):
         """
@@ -808,9 +805,7 @@
         addressbookObject = yield addressbook1.addressbookObjectWithName("1.vcf")
         self.assertEquals((yield addressbookObject.component()), component)
 
-        yield self.commit()
-
-        # Make sure notification fired after commit
+        # notify is called prior to commit
         self.assertEquals(
             set(self.notifierFactory.history),
             set([
@@ -819,7 +814,9 @@
             ])
         )
 
+        yield self.commit()
 
+
     def checkPropertiesMethod(self, thunk):
         """
         Verify that the given object has a properties method that returns an

Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/file.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/file.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/file.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -926,6 +926,7 @@
         return (self._notifierPrefix, self.uid(),)
 
 
+    @inlineCallbacks
     def notifyChanged(self):
         """
         Trigger a notification of a change
@@ -933,8 +934,14 @@
 
         # Only send one set of change notifications per transaction
         if self._notifiers and not self._transaction.isNotifiedAlready(self):
-            for notifier in self._notifiers.values():
+            # cache notifiers run in post commit
+            notifier = self._notifiers.get("cache", None)
+            if notifier:
                 self._transaction.postCommit(notifier.notify)
+            # push notifiers add their work items immediately
+            notifier = self._notifiers.get("push", None)
+            if notifier:
+                yield notifier.notify(self._transaction)
             self._transaction.notificationAddedForObject(self)
 
 
@@ -1272,6 +1279,7 @@
         return self.ownerHome().notifierID()
 
 
+    @inlineCallbacks
     def notifyChanged(self):
         """
         Trigger a notification of a change
@@ -1279,8 +1287,14 @@
 
         # Only send one set of change notifications per transaction
         if self._notifiers and not self._transaction.isNotifiedAlready(self):
-            for notifier in self._notifiers.values():
+            # cache notifiers run in post commit
+            notifier = self._notifiers.get("cache", None)
+            if notifier:
                 self._transaction.postCommit(notifier.notify)
+            # push notifiers add their work items immediately
+            notifier = self._notifiers.get("push", None)
+            if notifier:
+                yield notifier.notify(self._transaction)
             self._transaction.notificationAddedForObject(self)
 
 

Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -29,9 +29,10 @@
 
 from pycalendar.datetime import DateTime
 
-from twext.enterprise.dal.syntax import \
-    Delete, utcNowSQL, Union, Insert, Len, Max, Parameter, SavepointAction, \
-    Select, Update, ColumnSyntax, TableSyntax, Upper, Count, ALL_COLUMNS, Sum
+from twext.enterprise.dal.syntax import (
+    Delete, utcNowSQL, Union, Insert, Len, Max, Parameter, SavepointAction,
+    Select, Update, ColumnSyntax, TableSyntax, Upper, Count, ALL_COLUMNS, Sum,
+    DatabaseLock, DatabaseUnlock)
 from twext.enterprise.ienterprise import AlreadyFinishedError
 from twext.enterprise.queue import LocalQueuer
 from twext.enterprise.util import parseSQLTimestamp
@@ -314,6 +315,7 @@
         self.label = label
         self.logFileName = logFileName
         self.statements = []
+        self.startTime = time.time()
 
 
     def startStatement(self, sql, args):
@@ -329,7 +331,7 @@
         """
         args = ["%s" % (arg,) for arg in args]
         args = [((arg[:10] + "...") if len(arg) > 40 else arg) for arg in args]
-        self.statements.append(["%s %s" % (sql, args,), 0, 0])
+        self.statements.append(["%s %s" % (sql, args,), 0, 0, 0])
         return len(self.statements) - 1, time.time()
 
 
@@ -343,8 +345,10 @@
         @type rows: C{int}
         """
         index, tstamp = context
+        t = time.time()
         self.statements[index][1] = len(rows) if rows else 0
-        self.statements[index][2] = time.time() - tstamp
+        self.statements[index][2] = t - tstamp
+        self.statements[index][3] = t
 
 
     def printReport(self):
@@ -352,19 +356,28 @@
         Print a report of all the SQL statements executed to date.
         """
 
+        total_statements = len(self.statements)
+        total_rows = sum([statement[1] for statement in self.statements])
+        total_time = sum([statement[2] for statement in self.statements]) * 1000.0
+
         toFile = StringIO()
         toFile.write("*** SQL Stats ***\n")
         toFile.write("\n")
         toFile.write("Label: %s\n" % (self.label,))
         toFile.write("Unique statements: %d\n" % (len(set([statement[0] for statement in self.statements]),),))
-        toFile.write("Total statements: %d\n" % (len(self.statements),))
-        toFile.write("Total rows: %d\n" % (sum([statement[1] for statement in self.statements]),))
-        toFile.write("Total time (ms): %.3f\n" % (sum([statement[2] for statement in self.statements]) * 1000.0,))
-        for sql, rows, t in self.statements:
+        toFile.write("Total statements: %d\n" % (total_statements,))
+        toFile.write("Total rows: %d\n" % (total_rows,))
+        toFile.write("Total time (ms): %.3f\n" % (total_time,))
+        t_last_end = self.startTime
+        for sql, rows, t_taken, t_end in self.statements:
             toFile.write("\n")
             toFile.write("SQL: %s\n" % (sql,))
             toFile.write("Rows: %s\n" % (rows,))
-            toFile.write("Time (ms): %.3f\n" % (t * 1000.0,))
+            toFile.write("Time (ms): %.3f\n" % (t_taken * 1000.0,))
+            toFile.write("Idle (ms): %.3f\n" % ((t_end - t_taken - t_last_end) * 1000.0,))
+            toFile.write("Elapsed (ms): %.3f\n" % ((t_end - self.startTime) * 1000.0,))
+            t_last_end = t_end
+        toFile.write("Commit (ms): %.3f\n" % ((time.time() - t_last_end) * 1000.0,))
         toFile.write("***\n\n")
 
         if self.logFileName:
@@ -372,8 +385,10 @@
         else:
             log.error(toFile.getvalue())
 
+        return (total_statements, total_rows, total_time,)
 
 
+
 class CommonStoreTransactionMonitor(object):
     """
     Object that monitors the state of a transaction over time and logs or times out
@@ -483,7 +498,9 @@
         self.iudCount = 0
         self.currentStatement = None
 
+        self.logItems = {}
 
+
     def enqueue(self, workItem, **kw):
         """
         Enqueue a L{twext.enterprise.queue.WorkItem} for later execution.
@@ -1029,8 +1046,10 @@
         """
         Commit the transaction and execute any post-commit hooks.
         """
+
+        # Do stats logging as a postCommit because there might be some pending preCommit SQL we want to log
         if self._stats:
-            self._stats.printReport()
+            self.postCommit(self.statsReport)
         return self._sqlTxn.commit()
 
 
@@ -1041,6 +1060,16 @@
         return self._sqlTxn.abort()
 
 
+    def statsReport(self):
+        """
+        Print the stats report and record log items
+        """
+        sql_statements, sql_rows, sql_time = self._stats.printReport()
+        self.logItems["sql-s"] = str(sql_statements)
+        self.logItems["sql-r"] = str(sql_rows)
+        self.logItems["sql-t"] = "%.1f" % (sql_time,)
+
+
     def _oldEventsBase(self, limit):
         ch = schema.CALENDAR_HOME
         co = schema.CALENDAR_OBJECT
@@ -1373,11 +1402,11 @@
 
 
     def acquireUpgradeLock(self):
-        return self.execSQL("select pg_advisory_lock(1)")
+        return DatabaseLock().on(self)
 
 
     def releaseUpgradeLock(self):
-        return self.execSQL("select pg_advisory_unlock(1)")
+        return DatabaseUnlock().on(self)
 
 
 
@@ -1415,6 +1444,7 @@
         self._txn = transaction
         self._ownerUID = ownerUID
         self._resourceID = None
+        self._dataVersion = None
         self._childrenLoaded = False
         self._children = {}
         self._notifiers = None
@@ -1660,6 +1690,23 @@
             yield queryCacher.invalidateAfterCommit(self._txn, cacheKey)
 
 
+    @classproperty
+    def _dataVersionQuery(cls): #@NoSelf
+        ch = cls._homeSchema
+        return Select(
+            [ch.DATAVERSION], From=ch,
+            Where=ch.RESOURCE_ID == Parameter("resourceID")
+        )
+
+
+    @inlineCallbacks
+    def dataVersion(self):
+        if self._dataVersion is None:
+            self._dataVersion = (yield self._dataVersionQuery.on(
+                self._txn, resourceID=self._resourceID))[0][0]
+        returnValue(self._dataVersion)
+
+
     def name(self):
         """
         Implement L{IDataStoreObject.name} to return the uid.
@@ -2195,6 +2242,7 @@
         the resource has changed.  We ensure we only do this once per object
         per transaction.
         """
+
         if self._txn.isNotifiedAlready(self):
             returnValue(None)
         self._txn.notificationAddedForObject(self)
@@ -2205,8 +2253,14 @@
 
         # Send notifications
         if self._notifiers:
-            for notifier in self._notifiers.values():
+            # cache notifiers run in post commit
+            notifier = self._notifiers.get("cache", None)
+            if notifier:
                 self._txn.postCommit(notifier.notify)
+            # push notifiers add their work items immediately
+            notifier = self._notifiers.get("push", None)
+            if notifier:
+                yield notifier.notify(self._txn)
 
 
     @classproperty
@@ -2320,16 +2374,20 @@
         raise NotImplementedError()
 
 
-    @classproperty
-    def _objectNamesSinceRevisionQuery(cls): #@NoSelf
+    @classmethod
+    def _objectNamesSinceRevisionQuery(cls, deleted=True): #@NoSelf
         """
         DAL query for (resource, deleted-flag)
         """
         rev = cls._revisionsSchema
-        return Select([rev.RESOURCE_NAME, rev.DELETED],
-                      From=rev,
-                      Where=(rev.REVISION > Parameter("revision")).And(
-                          rev.RESOURCE_ID == Parameter("resourceID")))
+        where = (rev.REVISION > Parameter("revision")).And(rev.RESOURCE_ID == Parameter("resourceID"))
+        if not deleted:
+            where = where.And(rev.DELETED == False)
+        return Select(
+            [rev.RESOURCE_NAME, rev.DELETED],
+            From=rev,
+            Where=where,
+        )
 
 
     def resourceNamesSinceToken(self, token):
@@ -2354,10 +2412,10 @@
         """
 
         results = [
-            (name if name else "", deleted)
-            for name, deleted in
-            (yield self._objectNamesSinceRevisionQuery.on(
-                self._txn, revision=revision, resourceID=self._resourceID))
+            (name if name else "", deleted) for name, deleted in
+                (yield self._objectNamesSinceRevisionQuery(deleted=(revision != 0)).on(
+                    self._txn, revision=revision, resourceID=self._resourceID)
+                )
         ]
         results.sort(key=lambda x: x[1])
 
@@ -2435,14 +2493,14 @@
     @classproperty
     def _bumpSyncTokenQuery(cls): #@NoSelf
         """
-        DAL query to change collection sync token.
+        DAL query to change collection sync token. Note this can impact multiple rows if the
+        collection is shared.
         """
         rev = cls._revisionsSchema
         return Update(
             {rev.REVISION: schema.REVISION_SEQ, },
             Where=(rev.RESOURCE_ID == Parameter("resourceID")).And
-                  (rev.RESOURCE_NAME == None),
-            Return=rev.REVISION
+                  (rev.RESOURCE_NAME == None)
         )
 
 
@@ -2451,8 +2509,11 @@
 
         if not self._txn.isRevisionBumpedAlready(self):
             self._txn.bumpRevisionForObject(self)
-            self._syncTokenRevision = (yield self._bumpSyncTokenQuery.on(
-                self._txn, resourceID=self._resourceID))[0][0]
+            yield self._bumpSyncTokenQuery.on(
+                self._txn,
+                resourceID=self._resourceID,
+            )
+            self._syncTokenRevision = None
 
 
     @classproperty
@@ -3012,7 +3073,9 @@
             queryCacher = self._txn._queryCacher
             if queryCacher:
                 cacheKey = queryCacher.keyForObjectWithName(shareeView._home._resourceID, shareeView._name)
-                queryCacher.invalidateAfterCommit(self._txn, cacheKey)
+                yield queryCacher.invalidateAfterCommit(self._txn, cacheKey)
+                cacheKey = queryCacher.keyForObjectWithResourceID(shareeView._home._resourceID, shareeView._resourceID)
+                yield queryCacher.invalidateAfterCommit(self._txn, cacheKey)
 
             shareeView._name = sharedname[0][0]
 
@@ -3070,7 +3133,9 @@
             queryCacher = self._txn._queryCacher
             if queryCacher:
                 cacheKey = queryCacher.keyForObjectWithName(shareeHome._resourceID, shareeChild._name)
-                queryCacher.invalidateAfterCommit(self._txn, cacheKey)
+                yield queryCacher.invalidateAfterCommit(self._txn, cacheKey)
+                cacheKey = queryCacher.keyForObjectWithResourceID(shareeHome._resourceID, shareeChild._resourceID)
+                yield queryCacher.invalidateAfterCommit(self._txn, cacheKey)
         else:
             deletedBindName = None
 
@@ -3335,10 +3400,9 @@
     def invalidateQueryCache(self):
         queryCacher = self._txn._queryCacher
         if queryCacher is not None:
-            cacheKey = queryCacher.keyForHomeChildMetaData(self._resourceID)
-            yield queryCacher.invalidateAfterCommit(self._txn, cacheKey)
-            cacheKey = queryCacher.keyForObjectWithName(self._home._resourceID, self._name)
-            yield queryCacher.invalidateAfterCommit(self._txn, cacheKey)
+            yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForHomeChildMetaData(self._resourceID))
+            yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForObjectWithName(self._home._resourceID, self._name))
+            yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForObjectWithResourceID(self._home._resourceID, self._resourceID))
 
 
 
@@ -3515,6 +3579,7 @@
             if rows and queryCacher:
                 # Cache the result
                 queryCacher.setAfterCommit(home._txn, cacheKey, rows)
+                queryCacher.setAfterCommit(home._txn, queryCacher.keyForObjectWithResourceID(home._resourceID, rows[0][2]), rows)
 
         if not rows:
             returnValue(None)
@@ -3555,8 +3620,24 @@
         @return: an L{CommonHomeChild} or C{None} if no such child
             exists.
         """
-        rows = yield cls._bindForResourceIDAndHomeID.on(
-            home._txn, resourceID=resourceID, homeID=home._resourceID)
+
+        rows = None
+        queryCacher = home._txn._queryCacher
+
+        if queryCacher:
+            # Retrieve data from cache
+            cacheKey = queryCacher.keyForObjectWithResourceID(home._resourceID, resourceID)
+            rows = yield queryCacher.get(cacheKey)
+
+        if rows is None:
+            # No cached copy
+            rows = yield cls._bindForResourceIDAndHomeID.on(home._txn, resourceID=resourceID, homeID=home._resourceID)
+
+            if rows and queryCacher:
+                # Cache the result (under both the ID and name values)
+                queryCacher.setAfterCommit(home._txn, cacheKey, rows)
+                queryCacher.setAfterCommit(home._txn, queryCacher.keyForObjectWithName(home._resourceID, rows[0][3]), rows)
+
         if not rows:
             returnValue(None)
 
@@ -3737,6 +3818,8 @@
         if queryCacher:
             cacheKey = queryCacher.keyForObjectWithName(self._home._resourceID, oldName)
             yield queryCacher.invalidateAfterCommit(self._home._txn, cacheKey)
+            cacheKey = queryCacher.keyForObjectWithResourceID(self._home._resourceID, self._resourceID)
+            yield queryCacher.invalidateAfterCommit(self._home._txn, cacheKey)
 
         yield self._renameQuery.on(self._txn, name=name,
                                    resourceID=self._resourceID,
@@ -3770,6 +3853,8 @@
         if queryCacher:
             cacheKey = queryCacher.keyForObjectWithName(self._home._resourceID, self._name)
             yield queryCacher.invalidateAfterCommit(self._home._txn, cacheKey)
+            cacheKey = queryCacher.keyForObjectWithResourceID(self._home._resourceID, self._resourceID)
+            yield queryCacher.invalidateAfterCommit(self._home._txn, cacheKey)
 
         yield self._deletedSyncToken()
         yield self._deleteQuery.on(self._txn, NoSuchHomeChildError,
@@ -4256,8 +4341,14 @@
 
         # Send notifications
         if self._notifiers:
-            for notifier in self._notifiers.values():
+            # cache notifiers run in post commit
+            notifier = self._notifiers.get("cache", None)
+            if notifier:
                 self._txn.postCommit(notifier.notify)
+            # push notifiers add their work items immediately
+            notifier = self._notifiers.get("push", None)
+            if notifier:
+                yield notifier.notify(self._txn)
 
 
     @classproperty
@@ -4480,7 +4571,7 @@
     @inlineCallbacks
     def create(cls, parent, name, component, options=None):
 
-        child = (yield cls.objectWithName(parent, name, None))
+        child = (yield parent.objectResourceWithName(name))
         if child:
             raise ObjectResourceNameAlreadyExistsError(name)
 
@@ -5077,15 +5168,21 @@
         the resource has changed.  We ensure we only do this once per object
         per transaction.
         """
-        yield
         if self._txn.isNotifiedAlready(self):
             returnValue(None)
         self._txn.notificationAddedForObject(self)
 
         # Send notifications
         if self._notifiers:
-            for notifier in self._notifiers.values():
+            # cache notifiers run in post commit
+            notifier = self._notifiers.get("cache", None)
+            if notifier:
                 self._txn.postCommit(notifier.notify)
+            # push notifiers add their work items immediately
+            notifier = self._notifiers.get("push", None)
+            if notifier:
+                yield notifier.notify(self._txn)
+
         returnValue(None)
 
 

Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/current-oracle-dialect.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/current-oracle-dialect.sql	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/current-oracle-dialect.sql	2013-11-08 03:33:05 UTC (rev 11912)
@@ -366,7 +366,7 @@
     "VALUE" nvarchar2(255)
 );
 
-insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '26');
+insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '27');
 insert into CALENDARSERVER (NAME, VALUE) values ('CALENDAR-DATAVERSION', '5');
 insert into CALENDARSERVER (NAME, VALUE) values ('ADDRESSBOOK-DATAVERSION', '2');
 create index CALENDAR_HOME_METADAT_3cb9049e on CALENDAR_HOME_METADATA (
@@ -452,9 +452,11 @@
     CALENDAR_RESOURCE_ID
 );
 
-create index CALENDAR_OBJECT_REVIS_2643d556 on CALENDAR_OBJECT_REVISIONS (
+create index CALENDAR_OBJECT_REVIS_6d9d929c on CALENDAR_OBJECT_REVISIONS (
     CALENDAR_RESOURCE_ID,
-    RESOURCE_NAME
+    RESOURCE_NAME,
+    DELETED,
+    REVISION
 );
 
 create index CALENDAR_OBJECT_REVIS_265c8acf on CALENDAR_OBJECT_REVISIONS (
@@ -467,9 +469,11 @@
     OWNER_HOME_RESOURCE_ID
 );
 
-create index ADDRESSBOOK_OBJECT_RE_980b9872 on ADDRESSBOOK_OBJECT_REVISIONS (
+create index ADDRESSBOOK_OBJECT_RE_00fe8288 on ADDRESSBOOK_OBJECT_REVISIONS (
     OWNER_HOME_RESOURCE_ID,
-    RESOURCE_NAME
+    RESOURCE_NAME,
+    DELETED,
+    REVISION
 );
 
 create index ADDRESSBOOK_OBJECT_RE_45004780 on ADDRESSBOOK_OBJECT_REVISIONS (

Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/current.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/current.sql	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/current.sql	2013-11-08 03:33:05 UTC (rev 11912)
@@ -529,8 +529,8 @@
 create index CALENDAR_OBJECT_REVISIONS_HOME_RESOURCE_ID_CALENDAR_RESOURCE_ID
   on CALENDAR_OBJECT_REVISIONS(CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_ID);
 
-create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_RESOURCE_NAME
-  on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, RESOURCE_NAME);
+create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_RESOURCE_NAME_DELETED_REVISION
+  on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, RESOURCE_NAME, DELETED, REVISION);
 
 create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_REVISION
   on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, REVISION);
@@ -552,8 +552,8 @@
 create index ADDRESSBOOK_OBJECT_REVISIONS_HOME_RESOURCE_ID_OWNER_HOME_RESOURCE_ID
   on ADDRESSBOOK_OBJECT_REVISIONS(ADDRESSBOOK_HOME_RESOURCE_ID, OWNER_HOME_RESOURCE_ID);
 
-create index ADDRESSBOOK_OBJECT_REVISIONS_OWNER_HOME_RESOURCE_ID_RESOURCE_NAME
-  on ADDRESSBOOK_OBJECT_REVISIONS(OWNER_HOME_RESOURCE_ID, RESOURCE_NAME);
+create index ADDRESSBOOK_OBJECT_REVISIONS_OWNER_HOME_RESOURCE_ID_RESOURCE_NAME_DELETED_REVISION
+  on ADDRESSBOOK_OBJECT_REVISIONS(OWNER_HOME_RESOURCE_ID, RESOURCE_NAME, DELETED, REVISION);
 
 create index ADDRESSBOOK_OBJECT_REVISIONS_OWNER_HOME_RESOURCE_ID_REVISION
   on ADDRESSBOOK_OBJECT_REVISIONS(OWNER_HOME_RESOURCE_ID, REVISION);
@@ -698,6 +698,6 @@
   VALUE                         varchar(255)
 );
 
-insert into CALENDARSERVER values ('VERSION', '26');
+insert into CALENDARSERVER values ('VERSION', '27');
 insert into CALENDARSERVER values ('CALENDAR-DATAVERSION', '5');
 insert into CALENDARSERVER values ('ADDRESSBOOK-DATAVERSION', '2');

Added: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/old/oracle-dialect/v26.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/old/oracle-dialect/v26.sql	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/old/oracle-dialect/v26.sql	2013-11-08 03:33:05 UTC (rev 11912)
@@ -0,0 +1,495 @@
+create sequence RESOURCE_ID_SEQ;
+create sequence INSTANCE_ID_SEQ;
+create sequence ATTACHMENT_ID_SEQ;
+create sequence REVISION_SEQ;
+create sequence WORKITEM_SEQ;
+create table NODE_INFO (
+    "HOSTNAME" nvarchar2(255),
+    "PID" integer not null,
+    "PORT" integer not null,
+    "TIME" timestamp default CURRENT_TIMESTAMP at time zone 'UTC' not null, 
+    primary key("HOSTNAME", "PORT")
+);
+
+create table NAMED_LOCK (
+    "LOCK_NAME" nvarchar2(255) primary key
+);
+
+create table CALENDAR_HOME (
+    "RESOURCE_ID" integer primary key,
+    "OWNER_UID" nvarchar2(255) unique,
+    "DATAVERSION" integer default 0 not null
+);
+
+create table CALENDAR (
+    "RESOURCE_ID" integer primary key
+);
+
+create table CALENDAR_HOME_METADATA (
+    "RESOURCE_ID" integer primary key references CALENDAR_HOME on delete cascade,
+    "QUOTA_USED_BYTES" integer default 0 not null,
+    "DEFAULT_EVENTS" integer default null references CALENDAR on delete set null,
+    "DEFAULT_TASKS" integer default null references CALENDAR on delete set null,
+    "ALARM_VEVENT_TIMED" nclob default null,
+    "ALARM_VEVENT_ALLDAY" nclob default null,
+    "ALARM_VTODO_TIMED" nclob default null,
+    "ALARM_VTODO_ALLDAY" nclob default null,
+    "AVAILABILITY" nclob default null,
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table CALENDAR_METADATA (
+    "RESOURCE_ID" integer primary key references CALENDAR on delete cascade,
+    "SUPPORTED_COMPONENTS" nvarchar2(255) default null,
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table NOTIFICATION_HOME (
+    "RESOURCE_ID" integer primary key,
+    "OWNER_UID" nvarchar2(255) unique
+);
+
+create table NOTIFICATION (
+    "RESOURCE_ID" integer primary key,
+    "NOTIFICATION_HOME_RESOURCE_ID" integer not null references NOTIFICATION_HOME,
+    "NOTIFICATION_UID" nvarchar2(255),
+    "XML_TYPE" nvarchar2(255),
+    "XML_DATA" nclob,
+    "MD5" nchar(32),
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    unique("NOTIFICATION_UID", "NOTIFICATION_HOME_RESOURCE_ID")
+);
+
+create table CALENDAR_BIND (
+    "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+    "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+    "CALENDAR_RESOURCE_NAME" nvarchar2(255),
+    "BIND_MODE" integer not null,
+    "BIND_STATUS" integer not null,
+    "BIND_REVISION" integer default 0 not null,
+    "MESSAGE" nclob,
+    "TRANSP" integer default 0 not null,
+    "ALARM_VEVENT_TIMED" nclob default null,
+    "ALARM_VEVENT_ALLDAY" nclob default null,
+    "ALARM_VTODO_TIMED" nclob default null,
+    "ALARM_VTODO_ALLDAY" nclob default null,
+    "TIMEZONE" nclob default null, 
+    primary key("CALENDAR_HOME_RESOURCE_ID", "CALENDAR_RESOURCE_ID"), 
+    unique("CALENDAR_HOME_RESOURCE_ID", "CALENDAR_RESOURCE_NAME")
+);
+
+create table CALENDAR_BIND_MODE (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('own', 0);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('read', 1);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('write', 2);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('direct', 3);
+create table CALENDAR_BIND_STATUS (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('invited', 0);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('accepted', 1);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('declined', 2);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('invalid', 3);
+create table CALENDAR_TRANSP (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_TRANSP (DESCRIPTION, ID) values ('opaque', 0);
+insert into CALENDAR_TRANSP (DESCRIPTION, ID) values ('transparent', 1);
+create table CALENDAR_OBJECT (
+    "RESOURCE_ID" integer primary key,
+    "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+    "RESOURCE_NAME" nvarchar2(255),
+    "ICALENDAR_TEXT" nclob,
+    "ICALENDAR_UID" nvarchar2(255),
+    "ICALENDAR_TYPE" nvarchar2(255),
+    "ATTACHMENTS_MODE" integer default 0 not null,
+    "DROPBOX_ID" nvarchar2(255),
+    "ORGANIZER" nvarchar2(255),
+    "RECURRANCE_MIN" date,
+    "RECURRANCE_MAX" date,
+    "ACCESS" integer default 0 not null,
+    "SCHEDULE_OBJECT" integer default 0,
+    "SCHEDULE_TAG" nvarchar2(36) default null,
+    "SCHEDULE_ETAGS" nclob default null,
+    "PRIVATE_COMMENTS" integer default 0 not null,
+    "MD5" nchar(32),
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    unique("CALENDAR_RESOURCE_ID", "RESOURCE_NAME")
+);
+
+create table CALENDAR_OBJECT_ATTACHMENTS_MO (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_OBJECT_ATTACHMENTS_MO (DESCRIPTION, ID) values ('none', 0);
+insert into CALENDAR_OBJECT_ATTACHMENTS_MO (DESCRIPTION, ID) values ('read', 1);
+insert into CALENDAR_OBJECT_ATTACHMENTS_MO (DESCRIPTION, ID) values ('write', 2);
+create table CALENDAR_ACCESS_TYPE (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(32) unique
+);
+
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('', 0);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('public', 1);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('private', 2);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('confidential', 3);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('restricted', 4);
+create table TIME_RANGE (
+    "INSTANCE_ID" integer primary key,
+    "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+    "CALENDAR_OBJECT_RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+    "FLOATING" integer not null,
+    "START_DATE" timestamp not null,
+    "END_DATE" timestamp not null,
+    "FBTYPE" integer not null,
+    "TRANSPARENT" integer not null
+);
+
+create table FREE_BUSY_TYPE (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('unknown', 0);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('free', 1);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy', 2);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy-unavailable', 3);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy-tentative', 4);
+create table TRANSPARENCY (
+    "TIME_RANGE_INSTANCE_ID" integer not null references TIME_RANGE on delete cascade,
+    "USER_ID" nvarchar2(255),
+    "TRANSPARENT" integer not null
+);
+
+create table ATTACHMENT (
+    "ATTACHMENT_ID" integer primary key,
+    "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+    "DROPBOX_ID" nvarchar2(255),
+    "CONTENT_TYPE" nvarchar2(255),
+    "SIZE" integer not null,
+    "MD5" nchar(32),
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "PATH" nvarchar2(1024)
+);
+
+create table ATTACHMENT_CALENDAR_OBJECT (
+    "ATTACHMENT_ID" integer not null references ATTACHMENT on delete cascade,
+    "MANAGED_ID" nvarchar2(255),
+    "CALENDAR_OBJECT_RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade, 
+    primary key("ATTACHMENT_ID", "CALENDAR_OBJECT_RESOURCE_ID"), 
+    unique("MANAGED_ID", "CALENDAR_OBJECT_RESOURCE_ID")
+);
+
+create table RESOURCE_PROPERTY (
+    "RESOURCE_ID" integer not null,
+    "NAME" nvarchar2(255),
+    "VALUE" nclob,
+    "VIEWER_UID" nvarchar2(255), 
+    primary key("RESOURCE_ID", "NAME", "VIEWER_UID")
+);
+
+create table ADDRESSBOOK_HOME (
+    "RESOURCE_ID" integer primary key,
+    "ADDRESSBOOK_PROPERTY_STORE_ID" integer not null,
+    "OWNER_UID" nvarchar2(255) unique,
+    "DATAVERSION" integer default 0 not null
+);
+
+create table ADDRESSBOOK_HOME_METADATA (
+    "RESOURCE_ID" integer primary key references ADDRESSBOOK_HOME on delete cascade,
+    "QUOTA_USED_BYTES" integer default 0 not null,
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table SHARED_ADDRESSBOOK_BIND (
+    "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+    "OWNER_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+    "ADDRESSBOOK_RESOURCE_NAME" nvarchar2(255),
+    "BIND_MODE" integer not null,
+    "BIND_STATUS" integer not null,
+    "BIND_REVISION" integer default 0 not null,
+    "MESSAGE" nclob, 
+    primary key("ADDRESSBOOK_HOME_RESOURCE_ID", "OWNER_HOME_RESOURCE_ID"), 
+    unique("ADDRESSBOOK_HOME_RESOURCE_ID", "ADDRESSBOOK_RESOURCE_NAME")
+);
+
+create table ADDRESSBOOK_OBJECT (
+    "RESOURCE_ID" integer primary key,
+    "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+    "RESOURCE_NAME" nvarchar2(255),
+    "VCARD_TEXT" nclob,
+    "VCARD_UID" nvarchar2(255),
+    "KIND" integer not null,
+    "MD5" nchar(32),
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    unique("ADDRESSBOOK_HOME_RESOURCE_ID", "RESOURCE_NAME"), 
+    unique("ADDRESSBOOK_HOME_RESOURCE_ID", "VCARD_UID")
+);
+
+create table ADDRESSBOOK_OBJECT_KIND (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('person', 0);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('group', 1);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('resource', 2);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('location', 3);
+create table ABO_MEMBERS (
+    "GROUP_ID" integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+    "ADDRESSBOOK_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+    "MEMBER_ID" integer not null references ADDRESSBOOK_OBJECT, 
+    primary key("GROUP_ID", "MEMBER_ID")
+);
+
+create table ABO_FOREIGN_MEMBERS (
+    "GROUP_ID" integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+    "ADDRESSBOOK_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+    "MEMBER_ADDRESS" nvarchar2(255), 
+    primary key("GROUP_ID", "MEMBER_ADDRESS")
+);
+
+create table SHARED_GROUP_BIND (
+    "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+    "GROUP_RESOURCE_ID" integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+    "GROUP_ADDRESSBOOK_NAME" nvarchar2(255),
+    "BIND_MODE" integer not null,
+    "BIND_STATUS" integer not null,
+    "BIND_REVISION" integer default 0 not null,
+    "MESSAGE" nclob, 
+    primary key("ADDRESSBOOK_HOME_RESOURCE_ID", "GROUP_RESOURCE_ID"), 
+    unique("ADDRESSBOOK_HOME_RESOURCE_ID", "GROUP_ADDRESSBOOK_NAME")
+);
+
+create table CALENDAR_OBJECT_REVISIONS (
+    "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+    "CALENDAR_RESOURCE_ID" integer references CALENDAR,
+    "CALENDAR_NAME" nvarchar2(255) default null,
+    "RESOURCE_NAME" nvarchar2(255),
+    "REVISION" integer not null,
+    "DELETED" integer not null
+);
+
+create table ADDRESSBOOK_OBJECT_REVISIONS (
+    "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+    "OWNER_HOME_RESOURCE_ID" integer references ADDRESSBOOK_HOME,
+    "ADDRESSBOOK_NAME" nvarchar2(255) default null,
+    "RESOURCE_NAME" nvarchar2(255),
+    "REVISION" integer not null,
+    "DELETED" integer not null
+);
+
+create table NOTIFICATION_OBJECT_REVISIONS (
+    "NOTIFICATION_HOME_RESOURCE_ID" integer not null references NOTIFICATION_HOME on delete cascade,
+    "RESOURCE_NAME" nvarchar2(255),
+    "REVISION" integer not null,
+    "DELETED" integer not null, 
+    unique("NOTIFICATION_HOME_RESOURCE_ID", "RESOURCE_NAME")
+);
+
+create table APN_SUBSCRIPTIONS (
+    "TOKEN" nvarchar2(255),
+    "RESOURCE_KEY" nvarchar2(255),
+    "MODIFIED" integer not null,
+    "SUBSCRIBER_GUID" nvarchar2(255),
+    "USER_AGENT" nvarchar2(255) default null,
+    "IP_ADDR" nvarchar2(255) default null, 
+    primary key("TOKEN", "RESOURCE_KEY")
+);
+
+create table IMIP_TOKENS (
+    "TOKEN" nvarchar2(255),
+    "ORGANIZER" nvarchar2(255),
+    "ATTENDEE" nvarchar2(255),
+    "ICALUID" nvarchar2(255),
+    "ACCESSED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    primary key("ORGANIZER", "ATTENDEE", "ICALUID")
+);
+
+create table IMIP_INVITATION_WORK (
+    "WORK_ID" integer primary key not null,
+    "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "FROM_ADDR" nvarchar2(255),
+    "TO_ADDR" nvarchar2(255),
+    "ICALENDAR_TEXT" nclob
+);
+
+create table IMIP_POLLING_WORK (
+    "WORK_ID" integer primary key not null,
+    "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table IMIP_REPLY_WORK (
+    "WORK_ID" integer primary key not null,
+    "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "ORGANIZER" nvarchar2(255),
+    "ATTENDEE" nvarchar2(255),
+    "ICALENDAR_TEXT" nclob
+);
+
+create table PUSH_NOTIFICATION_WORK (
+    "WORK_ID" integer primary key not null,
+    "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "PUSH_ID" nvarchar2(255)
+);
+
+create table GROUP_CACHER_POLLING_WORK (
+    "WORK_ID" integer primary key not null,
+    "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table CALENDAR_OBJECT_SPLITTER_WORK (
+    "WORK_ID" integer primary key not null,
+    "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade
+);
+
+create table CALENDARSERVER (
+    "NAME" nvarchar2(255) primary key,
+    "VALUE" nvarchar2(255)
+);
+
+insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '26');
+insert into CALENDARSERVER (NAME, VALUE) values ('CALENDAR-DATAVERSION', '5');
+insert into CALENDARSERVER (NAME, VALUE) values ('ADDRESSBOOK-DATAVERSION', '2');
+create index CALENDAR_HOME_METADAT_3cb9049e on CALENDAR_HOME_METADATA (
+    DEFAULT_EVENTS
+);
+
+create index CALENDAR_HOME_METADAT_d55e5548 on CALENDAR_HOME_METADATA (
+    DEFAULT_TASKS
+);
+
+create index NOTIFICATION_NOTIFICA_f891f5f9 on NOTIFICATION (
+    NOTIFICATION_HOME_RESOURCE_ID
+);
+
+create index CALENDAR_BIND_RESOURC_e57964d4 on CALENDAR_BIND (
+    CALENDAR_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_CALEN_a9a453a9 on CALENDAR_OBJECT (
+    CALENDAR_RESOURCE_ID,
+    ICALENDAR_UID
+);
+
+create index CALENDAR_OBJECT_CALEN_96e83b73 on CALENDAR_OBJECT (
+    CALENDAR_RESOURCE_ID,
+    RECURRANCE_MAX
+);
+
+create index CALENDAR_OBJECT_ICALE_82e731d5 on CALENDAR_OBJECT (
+    ICALENDAR_UID
+);
+
+create index CALENDAR_OBJECT_DROPB_de041d80 on CALENDAR_OBJECT (
+    DROPBOX_ID
+);
+
+create index TIME_RANGE_CALENDAR_R_beb6e7eb on TIME_RANGE (
+    CALENDAR_RESOURCE_ID
+);
+
+create index TIME_RANGE_CALENDAR_O_acf37bd1 on TIME_RANGE (
+    CALENDAR_OBJECT_RESOURCE_ID
+);
+
+create index TRANSPARENCY_TIME_RAN_5f34467f on TRANSPARENCY (
+    TIME_RANGE_INSTANCE_ID
+);
+
+create index ATTACHMENT_CALENDAR_H_0078845c on ATTACHMENT (
+    CALENDAR_HOME_RESOURCE_ID
+);
+
+create index ATTACHMENT_CALENDAR_O_81508484 on ATTACHMENT_CALENDAR_OBJECT (
+    CALENDAR_OBJECT_RESOURCE_ID
+);
+
+create index SHARED_ADDRESSBOOK_BI_e9a2e6d4 on SHARED_ADDRESSBOOK_BIND (
+    OWNER_HOME_RESOURCE_ID
+);
+
+create index ABO_MEMBERS_ADDRESSBO_4effa879 on ABO_MEMBERS (
+    ADDRESSBOOK_ID
+);
+
+create index ABO_MEMBERS_MEMBER_ID_8d66adcf on ABO_MEMBERS (
+    MEMBER_ID
+);
+
+create index ABO_FOREIGN_MEMBERS_A_1fd2c5e9 on ABO_FOREIGN_MEMBERS (
+    ADDRESSBOOK_ID
+);
+
+create index SHARED_GROUP_BIND_RES_cf52f95d on SHARED_GROUP_BIND (
+    GROUP_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_REVIS_3a3956c4 on CALENDAR_OBJECT_REVISIONS (
+    CALENDAR_HOME_RESOURCE_ID,
+    CALENDAR_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_REVIS_6d9d929c on CALENDAR_OBJECT_REVISIONS (
+    CALENDAR_RESOURCE_ID,
+    RESOURCE_NAME,
+    DELETED,
+    REVISION
+);
+
+create index CALENDAR_OBJECT_REVIS_265c8acf on CALENDAR_OBJECT_REVISIONS (
+    CALENDAR_RESOURCE_ID,
+    REVISION
+);
+
+create index ADDRESSBOOK_OBJECT_RE_2bfcf757 on ADDRESSBOOK_OBJECT_REVISIONS (
+    ADDRESSBOOK_HOME_RESOURCE_ID,
+    OWNER_HOME_RESOURCE_ID
+);
+
+create index ADDRESSBOOK_OBJECT_RE_00fe8288 on ADDRESSBOOK_OBJECT_REVISIONS (
+    OWNER_HOME_RESOURCE_ID,
+    RESOURCE_NAME,
+    DELETED,
+    REVISION
+);
+
+create index ADDRESSBOOK_OBJECT_RE_45004780 on ADDRESSBOOK_OBJECT_REVISIONS (
+    OWNER_HOME_RESOURCE_ID,
+    REVISION
+);
+
+create index NOTIFICATION_OBJECT_R_036a9cee on NOTIFICATION_OBJECT_REVISIONS (
+    NOTIFICATION_HOME_RESOURCE_ID,
+    REVISION
+);
+
+create index APN_SUBSCRIPTIONS_RES_9610d78e on APN_SUBSCRIPTIONS (
+    RESOURCE_KEY
+);
+
+create index IMIP_TOKENS_TOKEN_e94b918f on IMIP_TOKENS (
+    TOKEN
+);
+
+create index CALENDAR_OBJECT_SPLIT_af71dcda on CALENDAR_OBJECT_SPLITTER_WORK (
+    RESOURCE_ID
+);
+

Added: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/old/postgres-dialect/v26.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/old/postgres-dialect/v26.sql	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/old/postgres-dialect/v26.sql	2013-11-08 03:33:05 UTC (rev 11912)
@@ -0,0 +1,700 @@
+-- -*- test-case-name: txdav.caldav.datastore.test.test_sql,txdav.carddav.datastore.test.test_sql -*-
+
+----
+-- Copyright (c) 2010-2013 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+
+-----------------
+-- Resource ID --
+-----------------
+
+create sequence RESOURCE_ID_SEQ;
+
+
+-------------------------
+-- Cluster Bookkeeping --
+-------------------------
+
+-- Information about a process connected to this database.
+
+-- Note that this must match the node info schema in twext.enterprise.queue.
+create table NODE_INFO (
+  HOSTNAME  varchar(255) not null,
+  PID       integer      not null,
+  PORT      integer      not null,
+  TIME      timestamp    not null default timezone('UTC', CURRENT_TIMESTAMP),
+
+  primary key (HOSTNAME, PORT)
+);
+
+-- Unique named locks.  This table should always be empty, but rows are
+-- temporarily created in order to prevent undesirable concurrency.
+create table NAMED_LOCK (
+    LOCK_NAME varchar(255) primary key
+);
+
+
+-------------------
+-- Calendar Home --
+-------------------
+
+create table CALENDAR_HOME (
+  RESOURCE_ID      integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  OWNER_UID        varchar(255) not null unique,                                 -- implicit index
+  DATAVERSION      integer      default 0 not null
+);
+
+--------------
+-- Calendar --
+--------------
+
+create table CALENDAR (
+  RESOURCE_ID integer   primary key default nextval('RESOURCE_ID_SEQ') -- implicit index
+);
+
+----------------------------
+-- Calendar Home Metadata --
+----------------------------
+
+create table CALENDAR_HOME_METADATA (
+  RESOURCE_ID              integer     primary key references CALENDAR_HOME on delete cascade, -- implicit index
+  QUOTA_USED_BYTES         integer     default 0 not null,
+  DEFAULT_EVENTS           integer     default null references CALENDAR on delete set null,
+  DEFAULT_TASKS            integer     default null references CALENDAR on delete set null,
+  ALARM_VEVENT_TIMED       text        default null,
+  ALARM_VEVENT_ALLDAY      text        default null,
+  ALARM_VTODO_TIMED        text        default null,
+  ALARM_VTODO_ALLDAY       text        default null,
+  AVAILABILITY             text        default null,
+  CREATED                  timestamp   default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                 timestamp   default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+create index CALENDAR_HOME_METADATA_DEFAULT_EVENTS on
+	CALENDAR_HOME_METADATA(DEFAULT_EVENTS);
+create index CALENDAR_HOME_METADATA_DEFAULT_TASKS on
+	CALENDAR_HOME_METADATA(DEFAULT_TASKS);
+
+-----------------------
+-- Calendar Metadata --
+-----------------------
+
+create table CALENDAR_METADATA (
+  RESOURCE_ID           integer      primary key references CALENDAR on delete cascade, -- implicit index
+  SUPPORTED_COMPONENTS  varchar(255) default null,
+  CREATED               timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED              timestamp    default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+---------------------------
+-- Sharing Notifications --
+---------------------------
+
+create table NOTIFICATION_HOME (
+  RESOURCE_ID integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  OWNER_UID   varchar(255) not null unique                                 -- implicit index
+);
+
+create table NOTIFICATION (
+  RESOURCE_ID                   integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  NOTIFICATION_HOME_RESOURCE_ID integer      not null references NOTIFICATION_HOME,
+  NOTIFICATION_UID              varchar(255) not null,
+  XML_TYPE                      varchar(255) not null,
+  XML_DATA                      text         not null,
+  MD5                           char(32)     not null,
+  CREATED                       timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                      timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+
+  unique(NOTIFICATION_UID, NOTIFICATION_HOME_RESOURCE_ID) -- implicit index
+);
+
+create index NOTIFICATION_NOTIFICATION_HOME_RESOURCE_ID on
+	NOTIFICATION(NOTIFICATION_HOME_RESOURCE_ID);
+
+
+-------------------
+-- Calendar Bind --
+-------------------
+
+-- Joins CALENDAR_HOME and CALENDAR
+
+create table CALENDAR_BIND (
+  CALENDAR_HOME_RESOURCE_ID integer      not null references CALENDAR_HOME,
+  CALENDAR_RESOURCE_ID      integer      not null references CALENDAR on delete cascade,
+  CALENDAR_RESOURCE_NAME    varchar(255) not null,
+  BIND_MODE                 integer      not null, -- enum CALENDAR_BIND_MODE
+  BIND_STATUS               integer      not null, -- enum CALENDAR_BIND_STATUS
+  BIND_REVISION				integer      default 0 not null,
+  MESSAGE                   text,
+  TRANSP                    integer      default 0 not null, -- enum CALENDAR_TRANSP
+  ALARM_VEVENT_TIMED        text         default null,
+  ALARM_VEVENT_ALLDAY       text         default null,
+  ALARM_VTODO_TIMED         text         default null,
+  ALARM_VTODO_ALLDAY        text         default null,
+  TIMEZONE                  text         default null,
+
+  primary key(CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_ID), -- implicit index
+  unique(CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_NAME)     -- implicit index
+);
+
+create index CALENDAR_BIND_RESOURCE_ID on
+	CALENDAR_BIND(CALENDAR_RESOURCE_ID);
+
+-- Enumeration of calendar bind modes
+
+create table CALENDAR_BIND_MODE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_BIND_MODE values (0, 'own'  );
+insert into CALENDAR_BIND_MODE values (1, 'read' );
+insert into CALENDAR_BIND_MODE values (2, 'write');
+insert into CALENDAR_BIND_MODE values (3, 'direct');
+
+-- Enumeration of statuses
+
+create table CALENDAR_BIND_STATUS (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_BIND_STATUS values (0, 'invited' );
+insert into CALENDAR_BIND_STATUS values (1, 'accepted');
+insert into CALENDAR_BIND_STATUS values (2, 'declined');
+insert into CALENDAR_BIND_STATUS values (3, 'invalid');
+
+
+-- Enumeration of transparency
+
+create table CALENDAR_TRANSP (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_TRANSP values (0, 'opaque' );
+insert into CALENDAR_TRANSP values (1, 'transparent');
+
+
+---------------------
+-- Calendar Object --
+---------------------
+
+create table CALENDAR_OBJECT (
+  RESOURCE_ID          integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  CALENDAR_RESOURCE_ID integer      not null references CALENDAR on delete cascade,
+  RESOURCE_NAME        varchar(255) not null,
+  ICALENDAR_TEXT       text         not null,
+  ICALENDAR_UID        varchar(255) not null,
+  ICALENDAR_TYPE       varchar(255) not null,
+  ATTACHMENTS_MODE     integer      default 0 not null, -- enum CALENDAR_OBJECT_ATTACHMENTS_MODE
+  DROPBOX_ID           varchar(255),
+  ORGANIZER            varchar(255),
+  RECURRANCE_MIN       date,        -- minimum date that recurrences have been expanded to.
+  RECURRANCE_MAX       date,        -- maximum date that recurrences have been expanded to.
+  ACCESS               integer      default 0 not null,
+  SCHEDULE_OBJECT      boolean      default false,
+  SCHEDULE_TAG         varchar(36)  default null,
+  SCHEDULE_ETAGS       text         default null,
+  PRIVATE_COMMENTS     boolean      default false not null,
+  MD5                  char(32)     not null,
+  CREATED              timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED             timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+
+  unique (CALENDAR_RESOURCE_ID, RESOURCE_NAME) -- implicit index
+
+  -- since the 'inbox' is a 'calendar resource' for the purpose of storing
+  -- calendar objects, this constraint has to be selectively enforced by the
+  -- application layer.
+
+  -- unique(CALENDAR_RESOURCE_ID, ICALENDAR_UID)
+);
+
+create index CALENDAR_OBJECT_CALENDAR_RESOURCE_ID_AND_ICALENDAR_UID on
+  CALENDAR_OBJECT(CALENDAR_RESOURCE_ID, ICALENDAR_UID);
+
+create index CALENDAR_OBJECT_CALENDAR_RESOURCE_ID_RECURRANCE_MAX on
+  CALENDAR_OBJECT(CALENDAR_RESOURCE_ID, RECURRANCE_MAX);
+
+create index CALENDAR_OBJECT_ICALENDAR_UID on
+  CALENDAR_OBJECT(ICALENDAR_UID);
+
+create index CALENDAR_OBJECT_DROPBOX_ID on
+  CALENDAR_OBJECT(DROPBOX_ID);
+
+-- Enumeration of attachment modes
+
+create table CALENDAR_OBJECT_ATTACHMENTS_MODE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_OBJECT_ATTACHMENTS_MODE values (0, 'none' );
+insert into CALENDAR_OBJECT_ATTACHMENTS_MODE values (1, 'read' );
+insert into CALENDAR_OBJECT_ATTACHMENTS_MODE values (2, 'write');
+
+
+-- Enumeration of calendar access types
+
+create table CALENDAR_ACCESS_TYPE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(32) not null unique
+);
+
+insert into CALENDAR_ACCESS_TYPE values (0, ''             );
+insert into CALENDAR_ACCESS_TYPE values (1, 'public'       );
+insert into CALENDAR_ACCESS_TYPE values (2, 'private'      );
+insert into CALENDAR_ACCESS_TYPE values (3, 'confidential' );
+insert into CALENDAR_ACCESS_TYPE values (4, 'restricted'   );
+
+
+-----------------
+-- Instance ID --
+-----------------
+
+create sequence INSTANCE_ID_SEQ;
+
+
+----------------
+-- Time Range --
+----------------
+
+create table TIME_RANGE (
+  INSTANCE_ID                 integer        primary key default nextval('INSTANCE_ID_SEQ'), -- implicit index
+  CALENDAR_RESOURCE_ID        integer        not null references CALENDAR on delete cascade,
+  CALENDAR_OBJECT_RESOURCE_ID integer        not null references CALENDAR_OBJECT on delete cascade,
+  FLOATING                    boolean        not null,
+  START_DATE                  timestamp      not null,
+  END_DATE                    timestamp      not null,
+  FBTYPE                      integer        not null,
+  TRANSPARENT                 boolean        not null
+);
+
+create index TIME_RANGE_CALENDAR_RESOURCE_ID on
+  TIME_RANGE(CALENDAR_RESOURCE_ID);
+create index TIME_RANGE_CALENDAR_OBJECT_RESOURCE_ID on
+  TIME_RANGE(CALENDAR_OBJECT_RESOURCE_ID);
+
+
+-- Enumeration of free/busy types
+
+create table FREE_BUSY_TYPE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into FREE_BUSY_TYPE values (0, 'unknown'         );
+insert into FREE_BUSY_TYPE values (1, 'free'            );
+insert into FREE_BUSY_TYPE values (2, 'busy'            );
+insert into FREE_BUSY_TYPE values (3, 'busy-unavailable');
+insert into FREE_BUSY_TYPE values (4, 'busy-tentative'  );
+
+
+------------------
+-- Transparency --
+------------------
+
+create table TRANSPARENCY (
+  TIME_RANGE_INSTANCE_ID      integer      not null references TIME_RANGE on delete cascade,
+  USER_ID                     varchar(255) not null,
+  TRANSPARENT                 boolean      not null
+);
+
+create index TRANSPARENCY_TIME_RANGE_INSTANCE_ID on
+  TRANSPARENCY(TIME_RANGE_INSTANCE_ID);
+
+
+----------------
+-- Attachment --
+----------------
+
+create sequence ATTACHMENT_ID_SEQ;
+
+create table ATTACHMENT (
+  ATTACHMENT_ID               integer           primary key default nextval('ATTACHMENT_ID_SEQ'), -- implicit index
+  CALENDAR_HOME_RESOURCE_ID   integer           not null references CALENDAR_HOME,
+  DROPBOX_ID                  varchar(255),
+  CONTENT_TYPE                varchar(255)      not null,
+  SIZE                        integer           not null,
+  MD5                         char(32)          not null,
+  CREATED                     timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                    timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+  PATH                        varchar(1024)     not null
+);
+
+create index ATTACHMENT_CALENDAR_HOME_RESOURCE_ID on
+  ATTACHMENT(CALENDAR_HOME_RESOURCE_ID);
+
+-- Many-to-many relationship between attachments and calendar objects
+create table ATTACHMENT_CALENDAR_OBJECT (
+  ATTACHMENT_ID                  integer      not null references ATTACHMENT on delete cascade,
+  MANAGED_ID                     varchar(255) not null,
+  CALENDAR_OBJECT_RESOURCE_ID    integer      not null references CALENDAR_OBJECT on delete cascade,
+
+  primary key (ATTACHMENT_ID, CALENDAR_OBJECT_RESOURCE_ID), -- implicit index
+  unique (MANAGED_ID, CALENDAR_OBJECT_RESOURCE_ID) --implicit index
+);
+
+create index ATTACHMENT_CALENDAR_OBJECT_CALENDAR_OBJECT_RESOURCE_ID on
+	ATTACHMENT_CALENDAR_OBJECT(CALENDAR_OBJECT_RESOURCE_ID);
+
+-----------------------
+-- Resource Property --
+-----------------------
+
+create table RESOURCE_PROPERTY (
+  RESOURCE_ID integer      not null, -- foreign key: *.RESOURCE_ID
+  NAME        varchar(255) not null,
+  VALUE       text         not null, -- FIXME: xml?
+  VIEWER_UID  varchar(255),
+
+  primary key (RESOURCE_ID, NAME, VIEWER_UID) -- implicit index
+);
+
+
+----------------------
+-- AddressBook Home --
+----------------------
+
+create table ADDRESSBOOK_HOME (
+  RESOURCE_ID      				integer			primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  ADDRESSBOOK_PROPERTY_STORE_ID	integer      	default nextval('RESOURCE_ID_SEQ') not null, 	-- implicit index
+  OWNER_UID        				varchar(255) 	not null unique,                                -- implicit index
+  DATAVERSION      				integer      	default 0 not null
+);
+
+
+-------------------------------
+-- AddressBook Home Metadata --
+-------------------------------
+
+create table ADDRESSBOOK_HOME_METADATA (
+  RESOURCE_ID      integer      primary key references ADDRESSBOOK_HOME on delete cascade, -- implicit index
+  QUOTA_USED_BYTES integer      default 0 not null,
+  CREATED          timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED         timestamp    default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+-----------------------------
+-- Shared AddressBook Bind --
+-----------------------------
+
+-- Joins sharee ADDRESSBOOK_HOME and owner ADDRESSBOOK_HOME
+
+create table SHARED_ADDRESSBOOK_BIND (
+  ADDRESSBOOK_HOME_RESOURCE_ID			integer			not null references ADDRESSBOOK_HOME,
+  OWNER_HOME_RESOURCE_ID    			integer      	not null references ADDRESSBOOK_HOME on delete cascade,
+  ADDRESSBOOK_RESOURCE_NAME    			varchar(255) 	not null,
+  BIND_MODE                    			integer      	not null,	-- enum CALENDAR_BIND_MODE
+  BIND_STATUS                  			integer      	not null,	-- enum CALENDAR_BIND_STATUS
+  BIND_REVISION				   			integer      	default 0 not null,
+  MESSAGE                      			text,                  		-- FIXME: xml?
+
+  primary key (ADDRESSBOOK_HOME_RESOURCE_ID, OWNER_HOME_RESOURCE_ID), -- implicit index
+  unique (ADDRESSBOOK_HOME_RESOURCE_ID, ADDRESSBOOK_RESOURCE_NAME)     -- implicit index
+);
+
+create index SHARED_ADDRESSBOOK_BIND_RESOURCE_ID on
+  SHARED_ADDRESSBOOK_BIND(OWNER_HOME_RESOURCE_ID);
+
+
+------------------------
+-- AddressBook Object --
+------------------------
+
+create table ADDRESSBOOK_OBJECT (
+  RESOURCE_ID             		integer   		primary key default nextval('RESOURCE_ID_SEQ'),    -- implicit index
+  ADDRESSBOOK_HOME_RESOURCE_ID 	integer      	not null references ADDRESSBOOK_HOME on delete cascade,
+  RESOURCE_NAME           		varchar(255) 	not null,
+  VCARD_TEXT              		text         	not null,
+  VCARD_UID               		varchar(255) 	not null,
+  KIND 			  		  		integer      	not null,  -- enum ADDRESSBOOK_OBJECT_KIND
+  MD5                     		char(32)     	not null,
+  CREATED                 		timestamp    	default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                		timestamp    	default timezone('UTC', CURRENT_TIMESTAMP),
+
+  unique (ADDRESSBOOK_HOME_RESOURCE_ID, RESOURCE_NAME), -- implicit index
+  unique (ADDRESSBOOK_HOME_RESOURCE_ID, VCARD_UID)      -- implicit index
+);
+
+
+-----------------------------
+-- AddressBook Object kind --
+-----------------------------
+
+create table ADDRESSBOOK_OBJECT_KIND (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into ADDRESSBOOK_OBJECT_KIND values (0, 'person');
+insert into ADDRESSBOOK_OBJECT_KIND values (1, 'group' );
+insert into ADDRESSBOOK_OBJECT_KIND values (2, 'resource');
+insert into ADDRESSBOOK_OBJECT_KIND values (3, 'location');
+
+
+---------------------------------
+-- Address Book Object Members --
+---------------------------------
+
+create table ABO_MEMBERS (
+    GROUP_ID              integer      not null references ADDRESSBOOK_OBJECT on delete cascade,	-- AddressBook Object's (kind=='group') RESOURCE_ID
+ 	ADDRESSBOOK_ID		  integer      not null references ADDRESSBOOK_HOME on delete cascade,
+    MEMBER_ID             integer      not null references ADDRESSBOOK_OBJECT,						-- member AddressBook Object's RESOURCE_ID
+
+    primary key (GROUP_ID, MEMBER_ID) -- implicit index
+);
+
+create index ABO_MEMBERS_ADDRESSBOOK_ID on
+	ABO_MEMBERS(ADDRESSBOOK_ID);
+create index ABO_MEMBERS_MEMBER_ID on
+	ABO_MEMBERS(MEMBER_ID);
+
+------------------------------------------
+-- Address Book Object Foreign Members  --
+------------------------------------------
+
+create table ABO_FOREIGN_MEMBERS (
+    GROUP_ID              integer      not null references ADDRESSBOOK_OBJECT on delete cascade,	-- AddressBook Object's (kind=='group') RESOURCE_ID
+ 	ADDRESSBOOK_ID		  integer      not null references ADDRESSBOOK_HOME on delete cascade,
+    MEMBER_ADDRESS  	  varchar(255) not null, 													-- member AddressBook Object's 'calendar' address
+
+    primary key (GROUP_ID, MEMBER_ADDRESS) -- implicit index
+);
+
+create index ABO_FOREIGN_MEMBERS_ADDRESSBOOK_ID on
+	ABO_FOREIGN_MEMBERS(ADDRESSBOOK_ID);
+
+-----------------------
+-- Shared Group Bind --
+-----------------------
+
+-- Joins ADDRESSBOOK_HOME and ADDRESSBOOK_OBJECT (kind == group)
+
+create table SHARED_GROUP_BIND (	
+  ADDRESSBOOK_HOME_RESOURCE_ID 		integer      not null references ADDRESSBOOK_HOME,
+  GROUP_RESOURCE_ID      			integer      not null references ADDRESSBOOK_OBJECT on delete cascade,
+  GROUP_ADDRESSBOOK_NAME			varchar(255) not null,
+  BIND_MODE                    		integer      not null, -- enum CALENDAR_BIND_MODE
+  BIND_STATUS                  		integer      not null, -- enum CALENDAR_BIND_STATUS
+  BIND_REVISION				   		integer      default 0 not null,
+  MESSAGE                      		text,                  -- FIXME: xml?
+
+  primary key (ADDRESSBOOK_HOME_RESOURCE_ID, GROUP_RESOURCE_ID), -- implicit index
+  unique (ADDRESSBOOK_HOME_RESOURCE_ID, GROUP_ADDRESSBOOK_NAME)     -- implicit index
+);
+
+create index SHARED_GROUP_BIND_RESOURCE_ID on
+  SHARED_GROUP_BIND(GROUP_RESOURCE_ID);
+
+
+---------------
+-- Revisions --
+---------------
+
+create sequence REVISION_SEQ;
+
+
+-------------------------------
+-- Calendar Object Revisions --
+-------------------------------
+
+create table CALENDAR_OBJECT_REVISIONS (
+  CALENDAR_HOME_RESOURCE_ID integer      not null references CALENDAR_HOME,
+  CALENDAR_RESOURCE_ID      integer      references CALENDAR,
+  CALENDAR_NAME             varchar(255) default null,
+  RESOURCE_NAME             varchar(255),
+  REVISION                  integer      default nextval('REVISION_SEQ') not null,
+  DELETED                   boolean      not null
+);
+
+create index CALENDAR_OBJECT_REVISIONS_HOME_RESOURCE_ID_CALENDAR_RESOURCE_ID
+  on CALENDAR_OBJECT_REVISIONS(CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_ID);
+
+create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_RESOURCE_NAME_DELETED_REVISION
+  on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, RESOURCE_NAME, DELETED, REVISION);
+
+create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_REVISION
+  on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, REVISION);
+
+
+----------------------------------
+-- AddressBook Object Revisions --
+----------------------------------
+
+create table ADDRESSBOOK_OBJECT_REVISIONS (
+  ADDRESSBOOK_HOME_RESOURCE_ID 			integer			not null references ADDRESSBOOK_HOME,
+  OWNER_HOME_RESOURCE_ID    			integer     	references ADDRESSBOOK_HOME,
+  ADDRESSBOOK_NAME             			varchar(255) 	default null,
+  RESOURCE_NAME                			varchar(255),
+  REVISION                     			integer     	default nextval('REVISION_SEQ') not null,
+  DELETED                      			boolean      	not null
+);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_HOME_RESOURCE_ID_OWNER_HOME_RESOURCE_ID
+  on ADDRESSBOOK_OBJECT_REVISIONS(ADDRESSBOOK_HOME_RESOURCE_ID, OWNER_HOME_RESOURCE_ID);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_OWNER_HOME_RESOURCE_ID_RESOURCE_NAME_DELETED_REVISION
+  on ADDRESSBOOK_OBJECT_REVISIONS(OWNER_HOME_RESOURCE_ID, RESOURCE_NAME, DELETED, REVISION);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_OWNER_HOME_RESOURCE_ID_REVISION
+  on ADDRESSBOOK_OBJECT_REVISIONS(OWNER_HOME_RESOURCE_ID, REVISION);
+
+
+-----------------------------------
+-- Notification Object Revisions --
+-----------------------------------
+
+create table NOTIFICATION_OBJECT_REVISIONS (
+  NOTIFICATION_HOME_RESOURCE_ID integer      not null references NOTIFICATION_HOME on delete cascade,
+  RESOURCE_NAME                 varchar(255),
+  REVISION                      integer      default nextval('REVISION_SEQ') not null,
+  DELETED                       boolean      not null,
+
+  unique(NOTIFICATION_HOME_RESOURCE_ID, RESOURCE_NAME) -- implicit index
+);
+
+create index NOTIFICATION_OBJECT_REVISIONS_RESOURCE_ID_REVISION
+  on NOTIFICATION_OBJECT_REVISIONS(NOTIFICATION_HOME_RESOURCE_ID, REVISION);
+
+
+-------------------------------------------
+-- Apple Push Notification Subscriptions --
+-------------------------------------------
+
+create table APN_SUBSCRIPTIONS (
+  TOKEN                         varchar(255) not null,
+  RESOURCE_KEY                  varchar(255) not null,
+  MODIFIED                      integer      not null,
+  SUBSCRIBER_GUID               varchar(255) not null,
+  USER_AGENT                    varchar(255) default null,
+  IP_ADDR                       varchar(255) default null,
+
+  primary key (TOKEN, RESOURCE_KEY) -- implicit index
+);
+
+create index APN_SUBSCRIPTIONS_RESOURCE_KEY
+   on APN_SUBSCRIPTIONS(RESOURCE_KEY);
+
+   
+-----------------
+-- IMIP Tokens --
+-----------------
+
+create table IMIP_TOKENS (
+  TOKEN                         varchar(255) not null,
+  ORGANIZER                     varchar(255) not null,
+  ATTENDEE                      varchar(255) not null,
+  ICALUID                       varchar(255) not null,
+  ACCESSED                      timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+
+  primary key (ORGANIZER, ATTENDEE, ICALUID) -- implicit index
+);
+
+create index IMIP_TOKENS_TOKEN
+   on IMIP_TOKENS(TOKEN);
+
+   
+----------------
+-- Work Items --
+----------------
+
+create sequence WORKITEM_SEQ;
+
+
+---------------------------
+-- IMIP Inivitation Work --
+---------------------------
+
+create table IMIP_INVITATION_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  NOT_BEFORE                    timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  FROM_ADDR                     varchar(255) not null,
+  TO_ADDR                       varchar(255) not null,
+  ICALENDAR_TEXT                text         not null
+);
+
+
+-----------------------
+-- IMIP Polling Work --
+-----------------------
+
+create table IMIP_POLLING_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  NOT_BEFORE                    timestamp    default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+---------------------
+-- IMIP Reply Work --
+---------------------
+
+create table IMIP_REPLY_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  NOT_BEFORE                    timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  ORGANIZER                     varchar(255) not null,
+  ATTENDEE                      varchar(255) not null,
+  ICALENDAR_TEXT                text         not null
+);
+
+
+------------------------
+-- Push Notifications --
+------------------------
+
+create table PUSH_NOTIFICATION_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  NOT_BEFORE                    timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  PUSH_ID                       varchar(255) not null
+);
+
+-----------------
+-- GroupCacher --
+-----------------
+
+create table GROUP_CACHER_POLLING_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  NOT_BEFORE                    timestamp    default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+--------------------------
+-- Object Splitter Work --
+--------------------------
+
+create table CALENDAR_OBJECT_SPLITTER_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  NOT_BEFORE                    timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  RESOURCE_ID                   integer      not null references CALENDAR_OBJECT on delete cascade
+);
+
+create index CALENDAR_OBJECT_SPLITTER_WORK_RESOURCE_ID on
+	CALENDAR_OBJECT_SPLITTER_WORK(RESOURCE_ID);
+
+--------------------
+-- Schema Version --
+--------------------
+
+create table CALENDARSERVER (
+  NAME                          varchar(255) primary key, -- implicit index
+  VALUE                         varchar(255)
+);
+
+insert into CALENDARSERVER values ('VERSION', '26');
+insert into CALENDARSERVER values ('CALENDAR-DATAVERSION', '5');
+insert into CALENDARSERVER values ('ADDRESSBOOK-DATAVERSION', '2');

Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_19_to_20.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_19_to_20.sql	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_19_to_20.sql	2013-11-08 03:33:05 UTC (rev 11912)
@@ -113,8 +113,12 @@
 -- Alter  ADDRESSBOOK_HOME --
 -----------------------------
 
+-- This is tricky as we have to create a new not null column and populate it, but we can't do
+-- not null immediately without a default - which we do not want. So we create the column without not null,
+-- do the updates, then add the constraint.
+
 alter table ADDRESSBOOK_HOME
-	add ("ADDRESSBOOK_PROPERTY_STORE_ID" integer not null);
+	add ("ADDRESSBOOK_PROPERTY_STORE_ID" integer);
 
 update ADDRESSBOOK_HOME
 	set	ADDRESSBOOK_PROPERTY_STORE_ID = (
@@ -133,8 +137,11 @@
 			ADDRESSBOOK_BIND.BIND_MODE = 0 and 	-- CALENDAR_BIND_MODE 'own'
 			ADDRESSBOOK_BIND.ADDRESSBOOK_RESOURCE_NAME = 'addressbook'
   	);
-	
 
+alter table ADDRESSBOOK_HOME
+	modify ("ADDRESSBOOK_PROPERTY_STORE_ID" not null);
+
+
 --------------------------------
 -- change  ADDRESSBOOK_OBJECT --
 --------------------------------
@@ -176,14 +183,15 @@
   	
 -- add non null constraints after update and delete are complete
 alter table ADDRESSBOOK_OBJECT
-	modify ("KIND" not null,
-            "ADDRESSBOOK_HOME_RESOURCE_ID" not null)
-	drop ("ADDRESSBOOK_RESOURCE_ID");
+        modify ("KIND" not null)
+        modify ("ADDRESSBOOK_HOME_RESOURCE_ID" not null);
 
+alter table ADDRESSBOOK_OBJECT
+        drop column ADDRESSBOOK_RESOURCE_ID cascade constraints;
 
 alter table ADDRESSBOOK_OBJECT
 	add unique ("ADDRESSBOOK_HOME_RESOURCE_ID", "RESOURCE_NAME")
-	    unique ("ADDRESSBOOK_HOME_RESOURCE_ID", "VCARD_UID");
+	add unique ("ADDRESSBOOK_HOME_RESOURCE_ID", "VCARD_UID");
 
 ------------------------------------------
 -- change  ADDRESSBOOK_OBJECT_REVISIONS --

Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_25_to_26.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_25_to_26.sql	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_25_to_26.sql	2013-11-08 03:33:05 UTC (rev 11912)
@@ -18,13 +18,23 @@
 -- Upgrade database schema from VERSION 25 to 26 --
 ---------------------------------------------------
 
--- Calendar home related updates
+-- Replace index
 
-alter table CALENDAR_HOME_METADATA
- add ("DEFAULT_POLLS" integer default null references CALENDAR on delete set null);
+drop index CALENDAR_OBJECT_REVIS_2643d556;
+create index CALENDAR_OBJECT_REVIS_6d9d929c on CALENDAR_OBJECT_REVISIONS (
+    CALENDAR_RESOURCE_ID,
+    RESOURCE_NAME,
+    DELETED,
+    REVISION
+);
 
-create index CALENDAR_HOME_METADAT_910264ce on CALENDAR_HOME_METADATA (
-    DEFAULT_POLLS
+
+drop index ADDRESSBOOK_OBJECT_RE_980b9872;
+create index ADDRESSBOOK_OBJECT_RE_00fe8288 on ADDRESSBOOK_OBJECT_REVISIONS (
+    OWNER_HOME_RESOURCE_ID,
+    RESOURCE_NAME,
+    DELETED,
+    REVISION
 );
 
 

Added: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_26_to_27.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_26_to_27.sql	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_26_to_27.sql	2013-11-08 03:33:05 UTC (rev 11912)
@@ -0,0 +1,33 @@
+----
+-- Copyright (c) 2012-2013 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+---------------------------------------------------
+-- Upgrade database schema from VERSION 26 to 27 --
+---------------------------------------------------
+
+-- Calendar home related updates
+
+alter table CALENDAR_HOME_METADATA
+ add ("DEFAULT_POLLS" integer default null references CALENDAR on delete set null);
+
+create index CALENDAR_HOME_METADAT_910264ce on CALENDAR_HOME_METADATA (
+    DEFAULT_POLLS
+);
+
+
+-- Now update the version
+-- No data upgrades
+update CALENDARSERVER set VALUE = '27' where NAME = 'VERSION';

Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_13_to_14.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_13_to_14.sql	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_13_to_14.sql	2013-11-08 03:33:05 UTC (rev 11912)
@@ -26,6 +26,11 @@
  drop column SEEN_BY_OWNER;
 alter table CALENDAR_BIND
  drop column SEEN_BY_SHAREE;
+
+-- Don't allow nulls in the column we are about to constrain
+update CALENDAR_BIND
+	set CALENDAR_RESOURCE_NAME = 'Shared_' || CALENDAR_RESOURCE_ID || '_' || CALENDAR_HOME_RESOURCE_ID
+	where CALENDAR_RESOURCE_NAME is null;
 alter table CALENDAR_BIND
  alter column CALENDAR_RESOURCE_NAME 
   set not null;
@@ -34,6 +39,11 @@
  drop column SEEN_BY_OWNER;
 alter table ADDRESSBOOK_BIND
  drop column SEEN_BY_SHAREE;
+
+-- Don't allow nulls in the column we are about to constrain
+update ADDRESSBOOK_BIND
+	set ADDRESSBOOK_RESOURCE_NAME = 'Shared_' || ADDRESSBOOK_RESOURCE_ID || '_' || ADDRESSBOOK_HOME_RESOURCE_ID
+	where ADDRESSBOOK_RESOURCE_NAME is null;
 alter table ADDRESSBOOK_BIND
  alter column ADDRESSBOOK_RESOURCE_NAME
   set not null;

Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_25_to_26.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_25_to_26.sql	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_25_to_26.sql	2013-11-08 03:33:05 UTC (rev 11912)
@@ -18,14 +18,15 @@
 -- Upgrade database schema from VERSION 25 to 26 --
 ---------------------------------------------------
 
--- Calendar home related updates
+-- Replace index
+drop index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_RESOURCE_NAME;
+create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_RESOURCE_NAME_DELETED_REVISION
+  on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, RESOURCE_NAME, DELETED, REVISION);
 
-alter table CALENDAR_HOME_METADATA
- add column DEFAULT_POLLS integer default null references CALENDAR on delete set null;
+drop index ADDRESSBOOK_OBJECT_REVISIONS_OWNER_HOME_RESOURCE_ID_RESOURCE_NAME;
+create index ADDRESSBOOK_OBJECT_REVISIONS_OWNER_HOME_RESOURCE_ID_RESOURCE_NAME_DELETED_REVISION
+  on ADDRESSBOOK_OBJECT_REVISIONS(OWNER_HOME_RESOURCE_ID, RESOURCE_NAME, DELETED, REVISION);
 
-create index CALENDAR_HOME_METADATA_DEFAULT_POLLS on
-	CALENDAR_HOME_METADATA(DEFAULT_POLLS);
-
 -- Now update the version
 -- No data upgrades
 update CALENDARSERVER set VALUE = '26' where NAME = 'VERSION';

Added: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_26_to_27.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_26_to_27.sql	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_26_to_27.sql	2013-11-08 03:33:05 UTC (rev 11912)
@@ -0,0 +1,31 @@
+----
+-- Copyright (c) 2012-2013 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+---------------------------------------------------
+-- Upgrade database schema from VERSION 26 to 27 --
+---------------------------------------------------
+
+-- Calendar home related updates
+
+alter table CALENDAR_HOME_METADATA
+ add column DEFAULT_POLLS integer default null references CALENDAR on delete set null;
+
+create index CALENDAR_HOME_METADATA_DEFAULT_POLLS on
+	CALENDAR_HOME_METADATA(DEFAULT_POLLS);
+
+-- Now update the version
+-- No data upgrades
+update CALENDARSERVER set VALUE = '27' where NAME = 'VERSION';

Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_tables.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_tables.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql_tables.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -52,55 +52,39 @@
 # Column aliases, defined so that similar tables (such as CALENDAR_OBJECT and
 # ADDRESSBOOK_OBJECT) can be used according to a polymorphic interface.
 
-schema.CALENDAR_BIND.RESOURCE_NAME = \
-    schema.CALENDAR_BIND.CALENDAR_RESOURCE_NAME
-schema.CALENDAR_BIND.RESOURCE_ID = \
-    schema.CALENDAR_BIND.CALENDAR_RESOURCE_ID
-schema.CALENDAR_BIND.HOME_RESOURCE_ID = \
-    schema.CALENDAR_BIND.CALENDAR_HOME_RESOURCE_ID
-schema.SHARED_ADDRESSBOOK_BIND.RESOURCE_NAME = \
-    schema.SHARED_ADDRESSBOOK_BIND.ADDRESSBOOK_RESOURCE_NAME
-schema.SHARED_ADDRESSBOOK_BIND.RESOURCE_ID = \
-    schema.SHARED_ADDRESSBOOK_BIND.OWNER_HOME_RESOURCE_ID
-schema.SHARED_ADDRESSBOOK_BIND.HOME_RESOURCE_ID = \
-    schema.SHARED_ADDRESSBOOK_BIND.ADDRESSBOOK_HOME_RESOURCE_ID
-schema.SHARED_GROUP_BIND.RESOURCE_NAME = \
-    schema.SHARED_GROUP_BIND.GROUP_ADDRESSBOOK_NAME
-schema.SHARED_GROUP_BIND.RESOURCE_ID = \
-    schema.SHARED_GROUP_BIND.GROUP_RESOURCE_ID
-schema.SHARED_GROUP_BIND.HOME_RESOURCE_ID = \
-    schema.SHARED_GROUP_BIND.ADDRESSBOOK_HOME_RESOURCE_ID
-schema.CALENDAR_OBJECT_REVISIONS.RESOURCE_ID = \
-    schema.CALENDAR_OBJECT_REVISIONS.CALENDAR_RESOURCE_ID
-schema.CALENDAR_OBJECT_REVISIONS.HOME_RESOURCE_ID = \
-    schema.CALENDAR_OBJECT_REVISIONS.CALENDAR_HOME_RESOURCE_ID
-schema.CALENDAR_OBJECT_REVISIONS.COLLECTION_NAME = \
-    schema.CALENDAR_OBJECT_REVISIONS.CALENDAR_NAME
-schema.ADDRESSBOOK_OBJECT_REVISIONS.RESOURCE_ID = \
-    schema.ADDRESSBOOK_OBJECT_REVISIONS.OWNER_HOME_RESOURCE_ID
-schema.ADDRESSBOOK_OBJECT_REVISIONS.HOME_RESOURCE_ID = \
-    schema.ADDRESSBOOK_OBJECT_REVISIONS.ADDRESSBOOK_HOME_RESOURCE_ID
-schema.ADDRESSBOOK_OBJECT_REVISIONS.COLLECTION_NAME = \
-    schema.ADDRESSBOOK_OBJECT_REVISIONS.ADDRESSBOOK_NAME
-schema.NOTIFICATION_OBJECT_REVISIONS.HOME_RESOURCE_ID = \
-    schema.NOTIFICATION_OBJECT_REVISIONS.NOTIFICATION_HOME_RESOURCE_ID
-schema.NOTIFICATION_OBJECT_REVISIONS.RESOURCE_ID = \
-    schema.NOTIFICATION_OBJECT_REVISIONS.NOTIFICATION_HOME_RESOURCE_ID
-schema.CALENDAR_OBJECT.TEXT = \
-    schema.CALENDAR_OBJECT.ICALENDAR_TEXT
-schema.CALENDAR_OBJECT.UID = \
-    schema.CALENDAR_OBJECT.ICALENDAR_UID
-schema.CALENDAR_OBJECT.PARENT_RESOURCE_ID = \
-    schema.CALENDAR_OBJECT.CALENDAR_RESOURCE_ID
-schema.ADDRESSBOOK_OBJECT.TEXT = \
-    schema.ADDRESSBOOK_OBJECT.VCARD_TEXT
-schema.ADDRESSBOOK_OBJECT.UID = \
-    schema.ADDRESSBOOK_OBJECT.VCARD_UID
-schema.ADDRESSBOOK_OBJECT.PARENT_RESOURCE_ID = \
-    schema.ADDRESSBOOK_OBJECT.ADDRESSBOOK_HOME_RESOURCE_ID
+schema.CALENDAR_BIND.RESOURCE_NAME = schema.CALENDAR_BIND.CALENDAR_RESOURCE_NAME
+schema.CALENDAR_BIND.RESOURCE_ID = schema.CALENDAR_BIND.CALENDAR_RESOURCE_ID
+schema.CALENDAR_BIND.HOME_RESOURCE_ID = schema.CALENDAR_BIND.CALENDAR_HOME_RESOURCE_ID
 
+schema.SHARED_ADDRESSBOOK_BIND.RESOURCE_NAME = schema.SHARED_ADDRESSBOOK_BIND.ADDRESSBOOK_RESOURCE_NAME
+schema.SHARED_ADDRESSBOOK_BIND.RESOURCE_ID = schema.SHARED_ADDRESSBOOK_BIND.OWNER_HOME_RESOURCE_ID
+schema.SHARED_ADDRESSBOOK_BIND.HOME_RESOURCE_ID = schema.SHARED_ADDRESSBOOK_BIND.ADDRESSBOOK_HOME_RESOURCE_ID
 
+schema.SHARED_GROUP_BIND.RESOURCE_NAME = schema.SHARED_GROUP_BIND.GROUP_ADDRESSBOOK_NAME
+schema.SHARED_GROUP_BIND.RESOURCE_ID = schema.SHARED_GROUP_BIND.GROUP_RESOURCE_ID
+schema.SHARED_GROUP_BIND.HOME_RESOURCE_ID = schema.SHARED_GROUP_BIND.ADDRESSBOOK_HOME_RESOURCE_ID
 
+schema.CALENDAR_OBJECT_REVISIONS.RESOURCE_ID = schema.CALENDAR_OBJECT_REVISIONS.CALENDAR_RESOURCE_ID
+schema.CALENDAR_OBJECT_REVISIONS.HOME_RESOURCE_ID = schema.CALENDAR_OBJECT_REVISIONS.CALENDAR_HOME_RESOURCE_ID
+schema.CALENDAR_OBJECT_REVISIONS.COLLECTION_NAME = schema.CALENDAR_OBJECT_REVISIONS.CALENDAR_NAME
+
+schema.ADDRESSBOOK_OBJECT_REVISIONS.RESOURCE_ID = schema.ADDRESSBOOK_OBJECT_REVISIONS.OWNER_HOME_RESOURCE_ID
+schema.ADDRESSBOOK_OBJECT_REVISIONS.HOME_RESOURCE_ID = schema.ADDRESSBOOK_OBJECT_REVISIONS.ADDRESSBOOK_HOME_RESOURCE_ID
+schema.ADDRESSBOOK_OBJECT_REVISIONS.COLLECTION_NAME = schema.ADDRESSBOOK_OBJECT_REVISIONS.ADDRESSBOOK_NAME
+
+schema.NOTIFICATION_OBJECT_REVISIONS.HOME_RESOURCE_ID = schema.NOTIFICATION_OBJECT_REVISIONS.NOTIFICATION_HOME_RESOURCE_ID
+schema.NOTIFICATION_OBJECT_REVISIONS.RESOURCE_ID = schema.NOTIFICATION_OBJECT_REVISIONS.NOTIFICATION_HOME_RESOURCE_ID
+
+schema.CALENDAR_OBJECT.TEXT = schema.CALENDAR_OBJECT.ICALENDAR_TEXT
+schema.CALENDAR_OBJECT.UID = schema.CALENDAR_OBJECT.ICALENDAR_UID
+schema.CALENDAR_OBJECT.PARENT_RESOURCE_ID = schema.CALENDAR_OBJECT.CALENDAR_RESOURCE_ID
+
+schema.ADDRESSBOOK_OBJECT.TEXT = schema.ADDRESSBOOK_OBJECT.VCARD_TEXT
+schema.ADDRESSBOOK_OBJECT.UID = schema.ADDRESSBOOK_OBJECT.VCARD_UID
+schema.ADDRESSBOOK_OBJECT.PARENT_RESOURCE_ID = schema.ADDRESSBOOK_OBJECT.ADDRESSBOOK_HOME_RESOURCE_ID
+
+
+
 def _combine(**kw):
     """
     Combine two table dictionaries used in a join to produce a single dictionary

Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/test/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/test/util.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/test/util.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -726,7 +726,7 @@
         return "/%s/%s/%s/" % (prefix, self.hostname, id)
 
 
-    def send(self, prefix, id):
+    def send(self, prefix, id, txn):
         self.history.append(self.pushKeyForId(prefix, id))
 
 

Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/test/test_upgrade.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/test/test_upgrade.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/test/test_upgrade.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -25,8 +25,8 @@
 from twisted.trial.unittest import TestCase
 from txdav.common.datastore.sql_dump import dumpSchema
 from txdav.common.datastore.test.util import theStoreBuilder, StubNotifierFactory
-from txdav.common.datastore.upgrade.sql.upgrade import UpgradeDatabaseSchemaStep, \
-    UpgradeDatabaseAddressBookDataStep, UpgradeDatabaseCalendarDataStep
+from txdav.common.datastore.upgrade.sql.upgrade import (
+    UpgradeDatabaseSchemaStep, UpgradeDatabaseAddressBookDataStep, UpgradeDatabaseCalendarDataStep, NotAllowedToUpgrade)
 import re
 
 class SchemaUpgradeTests(TestCase):
@@ -215,12 +215,12 @@
         old_version = yield _loadVersion()
         try:
             yield upgrader.databaseUpgrade()
-        except RuntimeError:
+        except NotAllowedToUpgrade:
             pass
         except Exception:
-            self.fail("RuntimeError not raised")
+            self.fail("NotAllowedToUpgrade not raised")
         else:
-            self.fail("RuntimeError not raised")
+            self.fail("NotAllowedToUpgrade not raised")
         new_version = yield _loadVersion()
         yield _unloadOldSchema()
 

Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrade.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrade.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrade.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -74,11 +74,15 @@
         yield sqlTxn.commit()
 
 
-    def stepWithFailure(self, failure):
-        return self.stepWithResult(None)
 
+class NotAllowedToUpgrade(Exception):
+    """
+    Exception indicating an upgrade is needed but we're not configured to
+    perform it.
+    """
 
 
+
 class UpgradeDatabaseCoreStep(object):
     """
     Base class for either schema or data upgrades on the database.
@@ -136,8 +140,7 @@
             self.log.error(msg)
             raise RuntimeError(msg)
         elif self.failIfUpgradeNeeded:
-                # TODO: change this exception to be upgrade-specific
-            raise RuntimeError("Database upgrade is needed but not allowed.")
+            raise NotAllowedToUpgrade()
         else:
             self.sqlStore.setUpgrading(True)
             yield self.upgradeVersion(actual_version, required_version, dialect)

Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/addressbook_upgrade_from_1_to_2.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/addressbook_upgrade_from_1_to_2.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/addressbook_upgrade_from_1_to_2.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -22,7 +22,8 @@
 from txdav.base.propertystore.base import PropertyName
 from txdav.common.datastore.sql_tables import _ABO_KIND_GROUP, schema
 from txdav.common.datastore.upgrade.sql.upgrades.util import updateAddressBookDataVersion, \
-    doToEachHomeNotAtVersion, removeProperty, cleanPropertyStore
+    doToEachHomeNotAtVersion, removeProperty, cleanPropertyStore, \
+    logUpgradeStatus
 from txdav.xml import element
 
 """
@@ -73,14 +74,20 @@
                 #update rest
                 yield abObject.setComponent(component)
 
+    logUpgradeStatus("Starting Addressbook Populate Members")
+
     # Do this to each calendar home not already at version 2
-    yield doToEachHomeNotAtVersion(sqlStore, schema.ADDRESSBOOK_HOME, UPGRADE_TO_VERSION, doIt)
+    yield doToEachHomeNotAtVersion(sqlStore, schema.ADDRESSBOOK_HOME, UPGRADE_TO_VERSION, doIt, "Populate Members")
 
 
 
 @inlineCallbacks
 def removeResourceType(sqlStore):
+    logUpgradeStatus("Starting Addressbook Remove Resource Type")
+
     sqlTxn = sqlStore.newTransaction()
     yield removeProperty(sqlTxn, PropertyName.fromElement(element.ResourceType))
     yield sqlTxn.commit()
     yield cleanPropertyStore()
+
+    logUpgradeStatus("End Addressbook Remove Resource Type")

Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_1_to_2.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_1_to_2.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_1_to_2.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -16,12 +16,16 @@
 ##
 
 from twext.enterprise.dal.syntax import Update
-from txdav.xml.parser import WebDAVDocument
+
 from twisted.internet.defer import inlineCallbacks
+
 from twistedcaldav import caldavxml
+
 from txdav.common.datastore.sql_tables import schema
 from txdav.common.datastore.upgrade.sql.upgrades.util import rowsForProperty,\
-    removeProperty, updateCalendarDataVersion, doToEachHomeNotAtVersion
+    removeProperty, updateCalendarDataVersion, doToEachHomeNotAtVersion, \
+    logUpgradeStatus, logUpgradeError
+from txdav.xml.parser import WebDAVDocument
 
 """
 Calendar data upgrade from database version 1 to 2
@@ -50,9 +54,14 @@
     extracting the new format value from the XML property.
     """
 
+    logUpgradeStatus("Starting Move supported-component-set")
+
     sqlTxn = sqlStore.newTransaction()
     try:
+        calendar_rid = None
         rows = (yield rowsForProperty(sqlTxn, caldavxml.SupportedCalendarComponentSet))
+        total = len(rows)
+        count = 0
         for calendar_rid, value in rows:
             prop = WebDAVDocument.fromString(value).root_element
             supported_components = ",".join(sorted([comp.attributes["name"].upper() for comp in prop.children]))
@@ -63,11 +72,19 @@
                 },
                 Where=(meta.RESOURCE_ID == calendar_rid)
             ).on(sqlTxn)
+            count += 1
+            logUpgradeStatus("Move supported-component-set", count, total)
 
         yield removeProperty(sqlTxn, caldavxml.SupportedCalendarComponentSet)
         yield sqlTxn.commit()
+
+        logUpgradeStatus("End Move supported-component-set")
     except RuntimeError:
         yield sqlTxn.abort()
+        logUpgradeError(
+            "Move supported-component-set",
+            "Last calendar: {}".format(calendar_rid)
+        )
         raise
 
 
@@ -86,5 +103,7 @@
         home = yield txn.calendarHomeWithResourceID(homeResourceID)
         yield home.splitCalendars()
 
+    logUpgradeStatus("Starting Split Calendars")
+
     # Do this to each calendar home not already at version 2
-    yield doToEachHomeNotAtVersion(sqlStore, schema.CALENDAR_HOME, UPGRADE_TO_VERSION, doIt)
+    yield doToEachHomeNotAtVersion(sqlStore, schema.CALENDAR_HOME, UPGRADE_TO_VERSION, doIt, "Split Calendars")

Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_3_to_4.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_3_to_4.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_3_to_4.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -15,19 +15,17 @@
 # limitations under the License.
 ##
 
-from twext.enterprise.dal.syntax import Select, Delete, Parameter
-
 from twisted.internet.defer import inlineCallbacks
 
 from twistedcaldav import caldavxml, customxml
 
 from txdav.base.propertystore.base import PropertyName
-from txdav.common.datastore.sql_tables import schema, _BIND_MODE_OWN
-from txdav.common.datastore.upgrade.sql.upgrades.util import rowsForProperty, updateCalendarDataVersion, \
-    updateAllCalendarHomeDataVersions, removeProperty, cleanPropertyStore
-from txdav.xml.parser import WebDAVDocument
+from txdav.caldav.icalendarstore import InvalidDefaultCalendar
+from txdav.common.datastore.sql_tables import schema
+from txdav.common.datastore.upgrade.sql.upgrades.util import updateCalendarDataVersion, \
+    removeProperty, cleanPropertyStore, logUpgradeStatus, doToEachHomeNotAtVersion
 from txdav.xml import element
-from twisted.python.failure import Failure
+from twistedcaldav.config import config
 
 """
 Data upgrade from database version 3 to 4
@@ -41,167 +39,114 @@
     """
     Do the required upgrade steps.
     """
-    yield moveDefaultCalendarProperties(sqlStore)
-    yield moveCalendarTranspProperties(sqlStore)
-    yield moveDefaultAlarmProperties(sqlStore)
-    yield removeResourceType(sqlStore)
+    yield updateCalendarHomes(sqlStore, config.UpgradeHomePrefix)
 
-    # Always bump the DB value
-    yield updateCalendarDataVersion(sqlStore, UPGRADE_TO_VERSION)
-    yield updateAllCalendarHomeDataVersions(sqlStore, UPGRADE_TO_VERSION)
+    # Don't do remaining upgrade if we are only process a subset of the homes
+    if not config.UpgradeHomePrefix:
+        yield removeResourceType(sqlStore)
 
+        # Always bump the DB value
+        yield updateCalendarDataVersion(sqlStore, UPGRADE_TO_VERSION)
 
 
+
 @inlineCallbacks
-def moveDefaultCalendarProperties(sqlStore):
+def updateCalendarHomes(sqlStore, prefix=None):
     """
-    Need to move all the CalDAV:default-calendar and CS:default-tasks properties in the
-    RESOURCE_PROPERTY table to the new CALENDAR_HOME_METADATA table columns, extracting
-    the new value from the XML property.
+    For each calendar home, update the associated properties on the home or its owned calendars.
     """
 
-    meta = schema.CALENDAR_HOME_METADATA
-    yield _processDefaultCalendarProperty(sqlStore, caldavxml.ScheduleDefaultCalendarURL, meta.DEFAULT_EVENTS)
-    yield _processDefaultCalendarProperty(sqlStore, customxml.ScheduleDefaultTasksURL, meta.DEFAULT_TASKS)
+    yield doToEachHomeNotAtVersion(sqlStore, schema.CALENDAR_HOME, UPGRADE_TO_VERSION, updateCalendarHome, "Update Calendar Home", filterOwnerUID=prefix)
 
 
 
 @inlineCallbacks
-def _processDefaultCalendarProperty(sqlStore, propname, colname):
+def updateCalendarHome(txn, homeResourceID):
     """
-    Move the specified property value to the matching CALENDAR_HOME_METADATA table column.
-
-    Since the number of calendar homes may well be large, we need to do this in batches.
+    For this calendar home, update the associated properties on the home or its owned calendars.
     """
 
-    cb = schema.CALENDAR_BIND
-    rp = schema.RESOURCE_PROPERTY
+    home = yield txn.calendarHomeWithResourceID(homeResourceID)
+    yield moveDefaultCalendarProperties(home)
+    yield moveCalendarTranspProperties(home)
+    yield moveDefaultAlarmProperties(home)
+    yield cleanPropertyStore()
 
-    try:
-        while True:
-            sqlTxn = sqlStore.newTransaction()
-            rows = (yield rowsForProperty(sqlTxn, propname, batch=BATCH_SIZE))
-            if len(rows) == 0:
-                yield sqlTxn.commit()
-                break
-            delete_ids = []
-            for inbox_rid, value in rows:
-                delete_ids.append(inbox_rid)
-                ids = yield Select(
-                    [cb.CALENDAR_HOME_RESOURCE_ID, ],
-                    From=cb,
-                    Where=cb.CALENDAR_RESOURCE_ID == inbox_rid,
-                ).on(sqlTxn)
-                if len(ids) > 0:
 
-                    calendarHome = (yield sqlTxn.calendarHomeWithResourceID(ids[0][0]))
-                    if calendarHome is not None:
 
-                        prop = WebDAVDocument.fromString(value).root_element
-                        defaultCalendar = str(prop.children[0])
-                        parts = defaultCalendar.split("/")
-                        if len(parts) == 5:
+ at inlineCallbacks
+def moveDefaultCalendarProperties(home):
+    """
+    Need to move any the CalDAV:default-calendar and CS:default-tasks properties in the
+    RESOURCE_PROPERTY table to the new CALENDAR_HOME_METADATA table columns, extracting
+    the new value from the XML property.
+    """
 
-                            calendarName = parts[-1]
-                            calendarHomeUID = parts[-2]
-                            expectedHome = (yield sqlTxn.calendarHomeWithUID(calendarHomeUID))
-                            if expectedHome is not None and expectedHome.id() == calendarHome.id():
+    yield _processDefaultCalendarProperty(home, caldavxml.ScheduleDefaultCalendarURL)
+    yield _processDefaultCalendarProperty(home, customxml.ScheduleDefaultTasksURL)
 
-                                calendar = (yield calendarHome.calendarWithName(calendarName))
-                                if calendar is not None:
-                                    if propname == caldavxml.ScheduleDefaultCalendarURL:
-                                        ctype = "VEVENT"
-                                    elif propname == customxml.ScheduleDefaultTasksURL:
-                                        ctype = "VTODO"
-                                    yield calendarHome.setDefaultCalendar(calendar, ctype)
 
-            # Always delete the rows so that batch processing works correctly
-            yield Delete(
-                From=rp,
-                Where=(rp.RESOURCE_ID.In(Parameter("ids", len(delete_ids)))).And
-                      (rp.NAME == PropertyName.fromElement(propname).toString()),
-            ).on(sqlTxn, ids=delete_ids)
 
-            yield sqlTxn.commit()
+ at inlineCallbacks
+def _processDefaultCalendarProperty(home, propname):
+    """
+    Move the specified property value to the matching CALENDAR_HOME_METADATA table column.
+    """
 
-        yield cleanPropertyStore()
+    inbox = (yield home.calendarWithName("inbox"))
+    if inbox is not None:
+        prop = inbox.properties().get(PropertyName.fromElement(propname))
+        if prop is not None:
+            defaultCalendar = str(prop.children[0])
+            parts = defaultCalendar.split("/")
+            if len(parts) == 5:
 
-    except RuntimeError:
-        f = Failure()
-        yield sqlTxn.abort()
-        f.raiseException()
+                calendarName = parts[-1]
+                calendarHomeUID = parts[-2]
+                if calendarHomeUID == home.uid():
 
+                    calendar = (yield home.calendarWithName(calendarName))
+                    if calendar is not None:
+                        try:
+                            if propname == caldavxml.ScheduleDefaultCalendarURL:
+                                ctype = "VEVENT"
+                            elif propname == customxml.ScheduleDefaultTasksURL:
+                                ctype = "VTODO"
+                            yield home.setDefaultCalendar(
+                                calendar, ctype
+                            )
+                        except InvalidDefaultCalendar:
+                            # Ignore these - the server will recover
+                            pass
 
+            del inbox.properties()[PropertyName.fromElement(propname)]
 
+
+
 @inlineCallbacks
-def moveCalendarTranspProperties(sqlStore):
+def moveCalendarTranspProperties(home):
     """
     Need to move all the CalDAV:schedule-calendar-transp properties in the
     RESOURCE_PROPERTY table to the new CALENDAR_BIND table columns, extracting
     the new value from the XML property.
     """
 
-    cb = schema.CALENDAR_BIND
-    rp = schema.RESOURCE_PROPERTY
+    # Iterate over each calendar (both owned and shared)
+    calendars = (yield home.loadChildren())
+    for calendar in calendars:
+        if calendar.isInbox():
+            prop = calendar.properties().get(PropertyName.fromElement(caldavxml.CalendarFreeBusySet))
+            if prop is not None:
+                del calendar.properties()[PropertyName.fromElement(caldavxml.CalendarFreeBusySet)]
+        prop = calendar.properties().get(PropertyName.fromElement(caldavxml.ScheduleCalendarTransp))
+        if prop is not None:
+            yield calendar.setUsedForFreeBusy(prop == caldavxml.ScheduleCalendarTransp(caldavxml.Opaque()))
+            del calendar.properties()[PropertyName.fromElement(caldavxml.ScheduleCalendarTransp)]
 
-    try:
-        calendars_for_id = {}
-        while True:
-            sqlTxn = sqlStore.newTransaction()
-            rows = (yield rowsForProperty(sqlTxn, caldavxml.ScheduleCalendarTransp, with_uid=True, batch=BATCH_SIZE))
-            if len(rows) == 0:
-                yield sqlTxn.commit()
-                break
-            delete_ids = []
-            for calendar_rid, value, viewer in rows:
-                delete_ids.append(calendar_rid)
-                if calendar_rid not in calendars_for_id:
-                    ids = yield Select(
-                        [cb.CALENDAR_HOME_RESOURCE_ID, cb.BIND_MODE, ],
-                        From=cb,
-                        Where=cb.CALENDAR_RESOURCE_ID == calendar_rid,
-                    ).on(sqlTxn)
-                    calendars_for_id[calendar_rid] = ids
 
-                if viewer:
-                    calendarHome = (yield sqlTxn.calendarHomeWithUID(viewer))
-                else:
-                    calendarHome = None
-                    for row in calendars_for_id[calendar_rid]:
-                        home_id, bind_mode = row
-                        if bind_mode == _BIND_MODE_OWN:
-                            calendarHome = (yield sqlTxn.calendarHomeWithResourceID(home_id))
-                            break
 
-                if calendarHome is not None:
-                    prop = WebDAVDocument.fromString(value).root_element
-                    calendar = (yield calendarHome.childWithID(calendar_rid))
-                    if calendar is not None:
-                        yield calendar.setUsedForFreeBusy(prop == caldavxml.ScheduleCalendarTransp(caldavxml.Opaque()))
-
-            # Always delete the rows so that batch processing works correctly
-            yield Delete(
-                From=rp,
-                Where=(rp.RESOURCE_ID.In(Parameter("ids", len(delete_ids)))).And
-                      (rp.NAME == PropertyName.fromElement(caldavxml.ScheduleCalendarTransp).toString()),
-            ).on(sqlTxn, ids=delete_ids)
-
-            yield sqlTxn.commit()
-
-        sqlTxn = sqlStore.newTransaction()
-        yield removeProperty(sqlTxn, PropertyName.fromElement(caldavxml.CalendarFreeBusySet))
-        yield sqlTxn.commit()
-        yield cleanPropertyStore()
-
-    except RuntimeError:
-        f = Failure()
-        yield sqlTxn.abort()
-        f.raiseException()
-
-
-
 @inlineCallbacks
-def moveDefaultAlarmProperties(sqlStore):
+def moveDefaultAlarmProperties(home):
     """
     Need to move all the CalDAV:default-calendar and CS:default-tasks properties in the
     RESOURCE_PROPERTY table to the new CALENDAR_HOME_METADATA table columns, extracting
@@ -209,25 +154,25 @@
     """
 
     yield _processDefaultAlarmProperty(
-        sqlStore,
+        home,
         caldavxml.DefaultAlarmVEventDateTime,
         True,
         True,
     )
     yield _processDefaultAlarmProperty(
-        sqlStore,
+        home,
         caldavxml.DefaultAlarmVEventDate,
         True,
         False,
     )
     yield _processDefaultAlarmProperty(
-        sqlStore,
+        home,
         caldavxml.DefaultAlarmVToDoDateTime,
         False,
         True,
     )
     yield _processDefaultAlarmProperty(
-        sqlStore,
+        home,
         caldavxml.DefaultAlarmVToDoDate,
         False,
         False,
@@ -236,90 +181,40 @@
 
 
 @inlineCallbacks
-def _processDefaultAlarmProperty(sqlStore, propname, vevent, timed):
+def _processDefaultAlarmProperty(home, propname, vevent, timed):
     """
     Move the specified property value to the matching CALENDAR_HOME_METADATA or CALENDAR_BIND table column.
 
     Since the number of properties may well be large, we need to do this in batches.
     """
 
-    hm = schema.CALENDAR_HOME_METADATA
-    cb = schema.CALENDAR_BIND
-    rp = schema.RESOURCE_PROPERTY
+    # Check the home first
+    prop = home.properties().get(PropertyName.fromElement(propname))
+    if prop is not None:
+        alarm = str(prop.children[0]) if prop.children else None
+        yield home.setDefaultAlarm(alarm, vevent, timed)
+        del home.properties()[PropertyName.fromElement(propname)]
 
-    try:
-        calendars_for_id = {}
-        while True:
-            sqlTxn = sqlStore.newTransaction()
-            rows = (yield rowsForProperty(sqlTxn, propname, with_uid=True, batch=BATCH_SIZE))
-            if len(rows) == 0:
-                yield sqlTxn.commit()
-                break
-            delete_ids = []
-            for rid, value, viewer in rows:
-                delete_ids.append(rid)
+    # Now each child
+    calendars = (yield home.loadChildren())
+    for calendar in calendars:
+        if calendar.isInbox():
+            continue
+        prop = calendar.properties().get(PropertyName.fromElement(propname))
+        if prop is not None:
+            alarm = str(prop.children[0]) if prop.children else None
+            yield calendar.setDefaultAlarm(alarm, vevent, timed)
+            del calendar.properties()[PropertyName.fromElement(propname)]
 
-                prop = WebDAVDocument.fromString(value).root_element
-                alarm = str(prop.children[0]) if prop.children else None
 
-                # First check if the rid is a home - this is the most common case
-                ids = yield Select(
-                    [hm.RESOURCE_ID, ],
-                    From=hm,
-                    Where=hm.RESOURCE_ID == rid,
-                ).on(sqlTxn)
 
-                if len(ids) > 0:
-                    # Home object
-                    calendarHome = (yield sqlTxn.calendarHomeWithResourceID(ids[0][0]))
-                    if calendarHome is not None:
-                        yield calendarHome.setDefaultAlarm(alarm, vevent, timed)
-                else:
-                    # rid is a calendar - we need to find the per-user calendar for the resource viewer
-                    if rid not in calendars_for_id:
-                        ids = yield Select(
-                            [cb.CALENDAR_HOME_RESOURCE_ID, cb.BIND_MODE, ],
-                            From=cb,
-                            Where=cb.CALENDAR_RESOURCE_ID == rid,
-                        ).on(sqlTxn)
-                        calendars_for_id[rid] = ids
-
-                    if viewer:
-                        calendarHome = (yield sqlTxn.calendarHomeWithUID(viewer))
-                    else:
-                        calendarHome = None
-                        for row in calendars_for_id[rid]:
-                            home_id, bind_mode = row
-                            if bind_mode == _BIND_MODE_OWN:
-                                calendarHome = (yield sqlTxn.calendarHomeWithResourceID(home_id))
-                                break
-
-                    if calendarHome is not None:
-                        calendar = yield calendarHome.childWithID(rid)
-                        if calendar is not None:
-                            yield calendar.setDefaultAlarm(alarm, vevent, timed)
-
-            # Always delete the rows so that batch processing works correctly
-            yield Delete(
-                From=rp,
-                Where=(rp.RESOURCE_ID.In(Parameter("ids", len(delete_ids)))).And
-                      (rp.NAME == PropertyName.fromElement(propname).toString()),
-            ).on(sqlTxn, ids=delete_ids)
-
-            yield sqlTxn.commit()
-
-        yield cleanPropertyStore()
-
-    except RuntimeError:
-        f = Failure()
-        yield sqlTxn.abort()
-        f.raiseException()
-
-
-
 @inlineCallbacks
 def removeResourceType(sqlStore):
+    logUpgradeStatus("Starting Calendar Remove Resource Type")
+
     sqlTxn = sqlStore.newTransaction()
     yield removeProperty(sqlTxn, PropertyName.fromElement(element.ResourceType))
     yield sqlTxn.commit()
     yield cleanPropertyStore()
+
+    logUpgradeStatus("End Calendar Remove Resource Type")

Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_4_to_5.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_4_to_5.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_4_to_5.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -15,21 +15,18 @@
 # limitations under the License.
 ##
 
-from twext.enterprise.dal.syntax import Select, Delete, Parameter
+from twext.web2.dav.resource import TwistedQuotaUsedProperty, TwistedGETContentMD5
 
 from twisted.internet.defer import inlineCallbacks
-from twisted.python.failure import Failure
 
 from twistedcaldav import caldavxml, customxml
+from twistedcaldav.config import config
 
 from txdav.base.propertystore.base import PropertyName
-from txdav.common.datastore.sql_tables import schema, _BIND_MODE_OWN
-from txdav.common.datastore.upgrade.sql.upgrades.util import rowsForProperty, updateCalendarDataVersion, \
-    updateAllCalendarHomeDataVersions, removeProperty, cleanPropertyStore
+from txdav.common.datastore.sql_tables import schema
+from txdav.common.datastore.upgrade.sql.upgrades.util import updateCalendarDataVersion, \
+    removeProperty, cleanPropertyStore, logUpgradeStatus, doToEachHomeNotAtVersion
 from txdav.xml import element
-from txdav.xml.parser import WebDAVDocument
-from twext.web2.dav.resource import TwistedQuotaUsedProperty, \
-    TwistedGETContentMD5
 
 """
 Data upgrade from database version 4 to 5
@@ -43,136 +40,76 @@
     """
     Do the required upgrade steps.
     """
-    yield moveCalendarTimezoneProperties(sqlStore)
-    yield moveCalendarAvailabilityProperties(sqlStore)
-    yield removeOtherProperties(sqlStore)
+    yield updateCalendarHomes(sqlStore, config.UpgradeHomePrefix)
 
-    # Always bump the DB value
-    yield updateCalendarDataVersion(sqlStore, UPGRADE_TO_VERSION)
-    yield updateAllCalendarHomeDataVersions(sqlStore, UPGRADE_TO_VERSION)
+    # Don't do remaining upgrade if we are only process a subset of the homes
+    if not config.UpgradeHomePrefix:
+        yield removeOtherProperties(sqlStore)
 
+        # Always bump the DB value
+        yield updateCalendarDataVersion(sqlStore, UPGRADE_TO_VERSION)
 
 
+
 @inlineCallbacks
-def moveCalendarTimezoneProperties(sqlStore):
+def updateCalendarHomes(sqlStore, prefix=None):
     """
-    Need to move all the CalDAV:calendar-timezone properties in the
-    RESOURCE_PROPERTY table to the new CALENDAR_BIND table columns, extracting
-    the new value from the XML property.
+    For each calendar home, update the associated properties on the home or its owned calendars.
     """
 
-    cb = schema.CALENDAR_BIND
-    rp = schema.RESOURCE_PROPERTY
+    yield doToEachHomeNotAtVersion(sqlStore, schema.CALENDAR_HOME, UPGRADE_TO_VERSION, updateCalendarHome, "Update Calendar Home", filterOwnerUID=prefix)
 
-    try:
-        calendars_for_id = {}
-        while True:
-            sqlTxn = sqlStore.newTransaction()
-            rows = (yield rowsForProperty(sqlTxn, caldavxml.CalendarTimeZone, with_uid=True, batch=BATCH_SIZE))
-            if len(rows) == 0:
-                yield sqlTxn.commit()
-                break
-            delete_ids = []
-            for calendar_rid, value, viewer in rows:
-                delete_ids.append(calendar_rid)
-                if calendar_rid not in calendars_for_id:
-                    ids = yield Select(
-                        [cb.CALENDAR_HOME_RESOURCE_ID, cb.BIND_MODE, ],
-                        From=cb,
-                        Where=cb.CALENDAR_RESOURCE_ID == calendar_rid,
-                    ).on(sqlTxn)
-                    calendars_for_id[calendar_rid] = ids
 
-                if viewer:
-                    calendarHome = (yield sqlTxn.calendarHomeWithUID(viewer))
-                else:
-                    calendarHome = None
-                    for row in calendars_for_id[calendar_rid]:
-                        home_id, bind_mode = row
-                        if bind_mode == _BIND_MODE_OWN:
-                            calendarHome = (yield sqlTxn.calendarHomeWithResourceID(home_id))
-                            break
 
-                if calendarHome is not None:
-                    prop = WebDAVDocument.fromString(value).root_element
-                    calendar = (yield calendarHome.childWithID(calendar_rid))
-                    if calendar is not None:
-                        yield calendar.setTimezone(prop.calendar())
+ at inlineCallbacks
+def updateCalendarHome(txn, homeResourceID):
+    """
+    For this calendar home, update the associated properties on the home or its owned calendars.
+    """
 
-            # Always delete the rows so that batch processing works correctly
-            yield Delete(
-                From=rp,
-                Where=(rp.RESOURCE_ID.In(Parameter("ids", len(delete_ids)))).And
-                      (rp.NAME == PropertyName.fromElement(caldavxml.CalendarTimeZone).toString()),
-            ).on(sqlTxn, ids=delete_ids)
+    home = yield txn.calendarHomeWithResourceID(homeResourceID)
+    yield moveCalendarTimezoneProperties(home)
+    yield moveCalendarAvailabilityProperties(home)
+    yield cleanPropertyStore()
 
-            yield sqlTxn.commit()
 
-        yield cleanPropertyStore()
 
-    except RuntimeError:
-        f = Failure()
-        yield sqlTxn.abort()
-        f.raiseException()
+ at inlineCallbacks
+def moveCalendarTimezoneProperties(home):
+    """
+    Need to move all the CalDAV:calendar-timezone properties in the
+    RESOURCE_PROPERTY table to the new CALENDAR_BIND table columns, extracting
+    the new value from the XML property.
+    """
 
+    # Iterate over each calendar (both owned and shared)
+    calendars = (yield home.loadChildren())
+    for calendar in calendars:
+        if calendar.isInbox():
+            continue
+        prop = calendar.properties().get(PropertyName.fromElement(caldavxml.CalendarTimeZone))
+        if prop is not None:
+            yield calendar.setTimezone(prop.calendar())
+            del calendar.properties()[PropertyName.fromElement(caldavxml.CalendarTimeZone)]
 
 
+
 @inlineCallbacks
-def moveCalendarAvailabilityProperties(sqlStore):
+def moveCalendarAvailabilityProperties(home):
     """
     Need to move all the CS:calendar-availability properties in the
     RESOURCE_PROPERTY table to the new CALENDAR_BIND table columns, extracting
     the new value from the XML property.
     """
+    inbox = (yield home.calendarWithName("inbox"))
+    if inbox is not None:
+        prop = inbox.properties().get(PropertyName.fromElement(customxml.CalendarAvailability))
+        if prop is not None:
+            yield home.setAvailability(prop.calendar())
+            del inbox.properties()[PropertyName.fromElement(customxml.CalendarAvailability)]
 
-    cb = schema.CALENDAR_BIND
-    rp = schema.RESOURCE_PROPERTY
 
-    try:
-        while True:
-            sqlTxn = sqlStore.newTransaction()
-            rows = (yield rowsForProperty(sqlTxn, customxml.CalendarAvailability, batch=BATCH_SIZE))
-            if len(rows) == 0:
-                yield sqlTxn.commit()
-                break
 
-            # Map each calendar to a home id using a single query for efficiency
-            calendar_ids = [row[0] for row in rows]
-
-            home_map = yield Select(
-                [cb.CALENDAR_RESOURCE_ID, cb.CALENDAR_HOME_RESOURCE_ID, ],
-                From=cb,
-                Where=(cb.CALENDAR_RESOURCE_ID.In(Parameter("ids", len(calendar_ids)))).And(cb.BIND_MODE == _BIND_MODE_OWN),
-            ).on(sqlTxn, ids=calendar_ids)
-            calendar_to_home = dict(home_map)
-
-            # Move property to each home
-            for calendar_rid, value in rows:
-                if calendar_rid in calendar_to_home:
-                    calendarHome = (yield sqlTxn.calendarHomeWithResourceID(calendar_to_home[calendar_rid]))
-
-                    if calendarHome is not None:
-                        prop = WebDAVDocument.fromString(value).root_element
-                        yield calendarHome.setAvailability(prop.calendar())
-
-            # Always delete the rows so that batch processing works correctly
-            yield Delete(
-                From=rp,
-                Where=(rp.RESOURCE_ID.In(Parameter("ids", len(calendar_ids)))).And
-                      (rp.NAME == PropertyName.fromElement(customxml.CalendarAvailability).toString()),
-            ).on(sqlTxn, ids=calendar_ids)
-
-            yield sqlTxn.commit()
-
-        yield cleanPropertyStore()
-
-    except RuntimeError:
-        f = Failure()
-        yield sqlTxn.abort()
-        f.raiseException()
-
-
-
 @inlineCallbacks
 def removeOtherProperties(sqlStore):
     """
@@ -190,6 +127,8 @@
     {http://twistedmatrix.com/xml_namespace/dav/}schedule-auto-respond
 
     """
+    logUpgradeStatus("Starting Calendar Remove Other Properties")
+
     sqlTxn = sqlStore.newTransaction()
 
     yield removeProperty(sqlTxn, PropertyName.fromElement(element.ACL))
@@ -205,3 +144,5 @@
 
     yield sqlTxn.commit()
     yield cleanPropertyStore()
+
+    logUpgradeStatus("End Calendar Remove Other Properties")

Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/test/test_upgrade_from_3_to_4.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/test/test_upgrade_from_3_to_4.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/test/test_upgrade_from_3_to_4.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -13,23 +13,27 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ##
+
+from twext.enterprise.dal.syntax import Update, Insert
+
+from twistedcaldav import caldavxml
 from twistedcaldav.caldavxml import ScheduleDefaultCalendarURL, \
-    CalendarFreeBusySet, Opaque, ScheduleCalendarTransp
+    CalendarFreeBusySet, Opaque, ScheduleCalendarTransp, Transparent
+
 from txdav.base.propertystore.base import PropertyName
 from txdav.caldav.datastore.test.util import CommonStoreTests
+from txdav.common.datastore.sql_tables import _BIND_MODE_WRITE, schema
+from txdav.common.datastore.upgrade.sql.upgrades.calendar_upgrade_from_3_to_4 import updateCalendarHomes, \
+    doUpgrade
+from txdav.xml import element
 from txdav.xml.element import HRef
-from twext.enterprise.dal.syntax import Update, Insert
-from txdav.common.datastore.upgrade.sql.upgrades.calendar_upgrade_from_3_to_4 import moveDefaultCalendarProperties, \
-    moveCalendarTranspProperties, removeResourceType, moveDefaultAlarmProperties
-from txdav.xml import element
-from twistedcaldav import caldavxml
-from txdav.common.datastore.sql_tables import _BIND_MODE_WRITE, schema
+from twistedcaldav.config import config
 
 """
 Tests for L{txdav.common.datastore.upgrade.sql.upgrade}.
 """
 
-from twisted.internet.defer import inlineCallbacks
+from twisted.internet.defer import inlineCallbacks, returnValue
 
 class Upgrade_from_3_to_4(CommonStoreTests):
     """
@@ -37,7 +41,7 @@
     """
 
     @inlineCallbacks
-    def test_defaultCalendarUpgrade(self):
+    def _defaultCalendarUpgrade_setup(self):
 
         # Set dead property on inbox
         for user in ("user01", "user02",):
@@ -52,39 +56,132 @@
                 Where=chm.RESOURCE_ID == home._resourceID,
             ).on(self.transactionUnderTest())
 
-        # Force data version to previous
-        ch = home._homeSchema
-        yield Update(
-            {ch.DATAVERSION: 3},
-            Where=ch.RESOURCE_ID == home._resourceID,
-        ).on(self.transactionUnderTest())
+            # Force data version to previous
+            ch = home._homeSchema
+            yield Update(
+                {ch.DATAVERSION: 3},
+                Where=ch.RESOURCE_ID == home._resourceID,
+            ).on(self.transactionUnderTest())
 
         yield self.commit()
 
-        # Trigger upgrade
-        yield moveDefaultCalendarProperties(self._sqlCalendarStore)
 
+    @inlineCallbacks
+    def _defaultCalendarUpgrade_check(self, changed_users, unchanged_users):
+
         # Test results
-        for user in ("user01", "user02",):
+        for user in changed_users:
             home = (yield self.homeUnderTest(name=user))
+            version = (yield home.dataVersion())
+            self.assertEqual(version, 4)
             calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
             self.assertTrue(home.isDefaultCalendar(calendar))
             inbox = (yield self.calendarUnderTest(name="inbox", home=user))
             self.assertTrue(PropertyName.fromElement(ScheduleDefaultCalendarURL) not in inbox.properties())
 
+        for user in unchanged_users:
+            home = (yield self.homeUnderTest(name=user))
+            version = (yield home.dataVersion())
+            self.assertEqual(version, 3)
+            calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
+            self.assertFalse(home.isDefaultCalendar(calendar))
+            inbox = (yield self.calendarUnderTest(name="inbox", home=user))
+            self.assertTrue(PropertyName.fromElement(ScheduleDefaultCalendarURL) in inbox.properties())
 
+
     @inlineCallbacks
-    def test_calendarTranspUpgrade(self):
+    def test_defaultCalendarUpgrade(self):
+        yield self._defaultCalendarUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore)
+        yield self._defaultCalendarUpgrade_check(("user01", "user02",), ())
 
+
+    @inlineCallbacks
+    def test_partialDefaultCalendarUpgrade(self):
+        yield self._defaultCalendarUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore, "user01")
+        yield self._defaultCalendarUpgrade_check(("user01",), ("user02",))
+
+
+    @inlineCallbacks
+    def _invalidDefaultCalendarUpgrade_setup(self):
+
         # Set dead property on inbox
         for user in ("user01", "user02",):
             inbox = (yield self.calendarUnderTest(name="inbox", home=user))
+            inbox.properties()[PropertyName.fromElement(ScheduleDefaultCalendarURL)] = ScheduleDefaultCalendarURL(HRef.fromString("/calendars/__uids__/%s/tasks_1" % (user,)))
+
+            # Force current default to null
+            home = (yield self.homeUnderTest(name=user))
+            chm = home._homeMetaDataSchema
+            yield Update(
+                {chm.DEFAULT_EVENTS: None},
+                Where=chm.RESOURCE_ID == home._resourceID,
+            ).on(self.transactionUnderTest())
+
+            # Create tasks only calendar
+            tasks = (yield home.createCalendarWithName("tasks_1"))
+            yield tasks.setSupportedComponents("VTODO")
+
+            # Force data version to previous
+            ch = home._homeSchema
+            yield Update(
+                {ch.DATAVERSION: 3},
+                Where=ch.RESOURCE_ID == home._resourceID,
+            ).on(self.transactionUnderTest())
+
+        yield self.commit()
+
+
+    @inlineCallbacks
+    def _invalidDefaultCalendarUpgrade_check(self, changed_users, unchanged_users):
+
+        # Test results
+        for user in changed_users:
+            home = (yield self.homeUnderTest(name=user))
+            version = (yield home.dataVersion())
+            self.assertEqual(version, 4)
+            calendar = (yield self.calendarUnderTest(name="tasks_1", home=user))
+            self.assertFalse(home.isDefaultCalendar(calendar))
+            inbox = (yield self.calendarUnderTest(name="inbox", home=user))
+            self.assertTrue(PropertyName.fromElement(ScheduleDefaultCalendarURL) not in inbox.properties())
+
+        for user in unchanged_users:
+            home = (yield self.homeUnderTest(name=user))
+            version = (yield home.dataVersion())
+            self.assertEqual(version, 3)
+            calendar = (yield self.calendarUnderTest(name="tasks_1", home=user))
+            self.assertFalse(home.isDefaultCalendar(calendar))
+            inbox = (yield self.calendarUnderTest(name="inbox", home=user))
+            self.assertTrue(PropertyName.fromElement(ScheduleDefaultCalendarURL) in inbox.properties())
+
+
+    @inlineCallbacks
+    def test_invalidDefaultCalendarUpgrade(self):
+        yield self._invalidDefaultCalendarUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore)
+        yield self._invalidDefaultCalendarUpgrade_check(("user01", "user02",), ())
+
+
+    @inlineCallbacks
+    def test_partialInvalidDefaultCalendarUpgrade(self):
+        yield self._invalidDefaultCalendarUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore, "user01")
+        yield self._invalidDefaultCalendarUpgrade_check(("user01",), ("user02",))
+
+
+    @inlineCallbacks
+    def _calendarTranspUpgrade_setup(self):
+
+        # Set dead property on inbox
+        for user in ("user01", "user02",):
+            inbox = (yield self.calendarUnderTest(name="inbox", home=user))
             inbox.properties()[PropertyName.fromElement(CalendarFreeBusySet)] = CalendarFreeBusySet(HRef.fromString("/calendars/__uids__/%s/calendar_1" % (user,)))
 
             # Force current to transparent
             calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
             yield calendar.setUsedForFreeBusy(False)
-            calendar.properties()[PropertyName.fromElement(ScheduleCalendarTransp)] = ScheduleCalendarTransp(Opaque())
+            calendar.properties()[PropertyName.fromElement(ScheduleCalendarTransp)] = ScheduleCalendarTransp(Opaque() if user == "user01" else Transparent())
 
             # Force data version to previous
             home = (yield self.homeUnderTest(name=user))
@@ -118,21 +215,55 @@
         ).on(txn)
         yield self.commit()
 
-        # Trigger upgrade
-        yield moveCalendarTranspProperties(self._sqlCalendarStore)
 
+    @inlineCallbacks
+    def _calendarTranspUpgrade_check(self, changed_users, unchanged_users):
+
         # Test results
-        for user in ("user01", "user02",):
+        for user in changed_users:
             home = (yield self.homeUnderTest(name=user))
+            version = (yield home.dataVersion())
+            self.assertEqual(version, 4)
             calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
-            self.assertTrue(calendar.isUsedForFreeBusy())
+            if user == "user01":
+                self.assertTrue(calendar.isUsedForFreeBusy())
+            else:
+                self.assertFalse(calendar.isUsedForFreeBusy())
+            self.assertTrue(PropertyName.fromElement(caldavxml.ScheduleCalendarTransp) not in calendar.properties())
             inbox = (yield self.calendarUnderTest(name="inbox", home=user))
             self.assertTrue(PropertyName.fromElement(CalendarFreeBusySet) not in inbox.properties())
 
+        for user in unchanged_users:
+            home = (yield self.homeUnderTest(name=user))
+            version = (yield home.dataVersion())
+            self.assertEqual(version, 3)
+            calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
+            if user == "user01":
+                self.assertFalse(calendar.isUsedForFreeBusy())
+            else:
+                self.assertFalse(calendar.isUsedForFreeBusy())
+            self.assertTrue(PropertyName.fromElement(caldavxml.ScheduleCalendarTransp) in calendar.properties())
+            inbox = (yield self.calendarUnderTest(name="inbox", home=user))
+            self.assertTrue(PropertyName.fromElement(CalendarFreeBusySet) in inbox.properties())
 
+
     @inlineCallbacks
-    def test_defaultAlarmUpgrade(self):
+    def test_calendarTranspUpgrade(self):
+        yield self._calendarTranspUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore)
+        yield self._calendarTranspUpgrade_check(("user01", "user02",), ())
 
+
+    @inlineCallbacks
+    def test_partialCalendarTranspUpgrade(self):
+        yield self._calendarTranspUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore, "user01")
+        yield self._calendarTranspUpgrade_check(("user01",), ("user02",))
+
+
+    @inlineCallbacks
+    def _defaultAlarmUpgrade_setup(self):
+
         alarmhome1 = """BEGIN:VALARM
 ACTION:AUDIO
 TRIGGER;RELATED=START:-PT1M
@@ -236,13 +367,28 @@
         shared = yield self.calendarUnderTest(name=shared_name, home="user02")
         for _ignore_vevent, _ignore_timed, alarm, prop in detailsshared:
             shared.properties()[PropertyName.fromElement(prop)] = prop(alarm)
+
+        for user in ("user01", "user02",):
+            # Force data version to previous
+            home = (yield self.homeUnderTest(name=user))
+            ch = home._homeSchema
+            yield Update(
+                {ch.DATAVERSION: 3},
+                Where=ch.RESOURCE_ID == home._resourceID,
+            ).on(self.transactionUnderTest())
+
         yield self.commit()
 
-        # Trigger upgrade
-        yield moveDefaultAlarmProperties(self._sqlCalendarStore)
+        returnValue((detailshome, detailscalendar, detailsshared, shared_name,))
 
+
+    @inlineCallbacks
+    def _defaultAlarmUpgrade_check(self, changed_users, unchanged_users, detailshome, detailscalendar, detailsshared, shared_name):
+
         # Check each type of collection
         home = yield self.homeUnderTest(name="user01")
+        version = (yield home.dataVersion())
+        self.assertEqual(version, 4)
         for vevent, timed, alarm, prop in detailshome:
             alarm_result = (yield home.getDefaultAlarm(vevent, timed))
             self.assertEquals(alarm_result, alarm)
@@ -252,18 +398,67 @@
         for vevent, timed, alarm, prop in detailscalendar:
             alarm_result = (yield calendar.getDefaultAlarm(vevent, timed))
             self.assertEquals(alarm_result, alarm)
-            self.assertTrue(PropertyName.fromElement(prop) not in home.properties())
+            self.assertTrue(PropertyName.fromElement(prop) not in calendar.properties())
 
-        shared = yield self.calendarUnderTest(name=shared_name, home="user02")
-        for vevent, timed, alarm, prop in detailsshared:
-            alarm_result = (yield shared.getDefaultAlarm(vevent, timed))
-            self.assertEquals(alarm_result, alarm)
-            self.assertTrue(PropertyName.fromElement(prop) not in home.properties())
+        if "user02" in changed_users:
+            home = (yield self.homeUnderTest(name="user02"))
+            version = (yield home.dataVersion())
+            self.assertEqual(version, 4)
+            shared = yield self.calendarUnderTest(name=shared_name, home="user02")
+            for vevent, timed, alarm, prop in detailsshared:
+                alarm_result = (yield shared.getDefaultAlarm(vevent, timed))
+                self.assertEquals(alarm_result, alarm)
+                self.assertTrue(PropertyName.fromElement(prop) not in shared.properties())
+        else:
+            home = (yield self.homeUnderTest(name="user02"))
+            version = (yield home.dataVersion())
+            self.assertEqual(version, 3)
+            shared = yield self.calendarUnderTest(name=shared_name, home="user02")
+            for vevent, timed, alarm, prop in detailsshared:
+                alarm_result = (yield shared.getDefaultAlarm(vevent, timed))
+                self.assertEquals(alarm_result, None)
+                self.assertTrue(PropertyName.fromElement(prop) in shared.properties())
 
 
     @inlineCallbacks
-    def test_resourceTypeUpgrade(self):
+    def test_defaultAlarmUpgrade(self):
+        detailshome, detailscalendar, detailsshared, shared_name = (yield self._defaultAlarmUpgrade_setup())
+        yield updateCalendarHomes(self._sqlCalendarStore)
+        yield self._defaultAlarmUpgrade_check(("user01", "user02",), (), detailshome, detailscalendar, detailsshared, shared_name)
 
+
+    @inlineCallbacks
+    def test_partialDefaultAlarmUpgrade(self):
+        detailshome, detailscalendar, detailsshared, shared_name = (yield self._defaultAlarmUpgrade_setup())
+        yield updateCalendarHomes(self._sqlCalendarStore, "user01")
+        yield self._defaultAlarmUpgrade_check(("user01",), ("user02",), detailshome, detailscalendar, detailsshared, shared_name)
+
+
+    @inlineCallbacks
+    def test_combinedUpgrade(self):
+        yield self._defaultCalendarUpgrade_setup()
+        yield self._calendarTranspUpgrade_setup()
+        detailshome, detailscalendar, detailsshared, shared_name = (yield self._defaultAlarmUpgrade_setup())
+        yield updateCalendarHomes(self._sqlCalendarStore)
+        yield self._defaultCalendarUpgrade_check(("user01", "user02",), ())
+        yield self._calendarTranspUpgrade_check(("user01", "user02",), ())
+        yield self._defaultAlarmUpgrade_check(("user01", "user02",), (), detailshome, detailscalendar, detailsshared, shared_name)
+
+
+    @inlineCallbacks
+    def test_partialCombinedUpgrade(self):
+        yield self._defaultCalendarUpgrade_setup()
+        yield self._calendarTranspUpgrade_setup()
+        detailshome, detailscalendar, detailsshared, shared_name = (yield self._defaultAlarmUpgrade_setup())
+        yield updateCalendarHomes(self._sqlCalendarStore, "user01")
+        yield self._defaultCalendarUpgrade_check(("user01",), ("user02",))
+        yield self._calendarTranspUpgrade_check(("user01",), ("user02",))
+        yield self._defaultAlarmUpgrade_check(("user01",), ("user02",), detailshome, detailscalendar, detailsshared, shared_name)
+
+
+    @inlineCallbacks
+    def _resourceTypeUpgrade_setup(self):
+
         # Set dead property on calendar
         for user in ("user01", "user02",):
             calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
@@ -273,12 +468,60 @@
         for user in ("user01", "user02",):
             calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
             self.assertTrue(PropertyName.fromElement(element.ResourceType) in calendar.properties())
+
+        yield self.transactionUnderTest().updateCalendarserverValue("CALENDAR-DATAVERSION", "3")
+
         yield self.commit()
 
-        # Trigger upgrade
-        yield removeResourceType(self._sqlCalendarStore)
 
+    @inlineCallbacks
+    def _resourceTypeUpgrade_check(self, full=True):
+
         # Test results
-        for user in ("user01", "user02",):
-            calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
-            self.assertTrue(PropertyName.fromElement(element.ResourceType) not in calendar.properties())
+        if full:
+            for user in ("user01", "user02",):
+                calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
+                self.assertTrue(PropertyName.fromElement(element.ResourceType) not in calendar.properties())
+            version = yield self.transactionUnderTest().calendarserverValue("CALENDAR-DATAVERSION")
+            self.assertEqual(int(version), 4)
+        else:
+            for user in ("user01", "user02",):
+                calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
+                self.assertTrue(PropertyName.fromElement(element.ResourceType) in calendar.properties())
+            version = yield self.transactionUnderTest().calendarserverValue("CALENDAR-DATAVERSION")
+            self.assertEqual(int(version), 3)
+
+
+    @inlineCallbacks
+    def test_resourceTypeUpgrade(self):
+        yield self._resourceTypeUpgrade_setup()
+        yield doUpgrade(self._sqlCalendarStore)
+        yield self._resourceTypeUpgrade_check()
+
+
+    @inlineCallbacks
+    def test_fullUpgrade(self):
+        self.patch(config, "UpgradeHomePrefix", "")
+        yield self._defaultCalendarUpgrade_setup()
+        yield self._calendarTranspUpgrade_setup()
+        detailshome, detailscalendar, detailsshared, shared_name = (yield self._defaultAlarmUpgrade_setup())
+        yield self._resourceTypeUpgrade_setup()
+        yield doUpgrade(self._sqlCalendarStore)
+        yield self._defaultCalendarUpgrade_check(("user01", "user02",), ())
+        yield self._calendarTranspUpgrade_check(("user01", "user02",), ())
+        yield self._defaultAlarmUpgrade_check(("user01", "user02",), (), detailshome, detailscalendar, detailsshared, shared_name)
+        yield self._resourceTypeUpgrade_check()
+
+
+    @inlineCallbacks
+    def test_partialFullUpgrade(self):
+        self.patch(config, "UpgradeHomePrefix", "user01")
+        yield self._defaultCalendarUpgrade_setup()
+        yield self._calendarTranspUpgrade_setup()
+        yield self._resourceTypeUpgrade_setup()
+        detailshome, detailscalendar, detailsshared, shared_name = (yield self._defaultAlarmUpgrade_setup())
+        yield doUpgrade(self._sqlCalendarStore)
+        yield self._defaultCalendarUpgrade_check(("user01",), ("user02",))
+        yield self._calendarTranspUpgrade_check(("user01",), ("user02",))
+        yield self._defaultAlarmUpgrade_check(("user01",), ("user02",), detailshome, detailscalendar, detailsshared, shared_name)
+        yield self._resourceTypeUpgrade_check(False)

Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/test/test_upgrade_from_4_to_5.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/test/test_upgrade_from_4_to_5.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/test/test_upgrade_from_4_to_5.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -13,21 +13,24 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ##
-from twistedcaldav import caldavxml, customxml
-from txdav.common.datastore.upgrade.sql.upgrades.calendar_upgrade_from_4_to_5 import moveCalendarTimezoneProperties, \
-    removeOtherProperties, moveCalendarAvailabilityProperties
-from txdav.common.datastore.sql_tables import _BIND_MODE_WRITE, schema
-from txdav.xml import element
 
 """
 Tests for L{txdav.common.datastore.upgrade.sql.upgrade}.
 """
 
 from twext.enterprise.dal.syntax import Update, Insert
-from twisted.internet.defer import inlineCallbacks
+
+from twisted.internet.defer import inlineCallbacks, returnValue
+
+from twistedcaldav import caldavxml, customxml
+from twistedcaldav.config import config
 from twistedcaldav.ical import Component
+
 from txdav.base.propertystore.base import PropertyName
 from txdav.caldav.datastore.test.util import CommonStoreTests
+from txdav.common.datastore.sql_tables import _BIND_MODE_WRITE, schema
+from txdav.common.datastore.upgrade.sql.upgrades.calendar_upgrade_from_4_to_5 import updateCalendarHomes, doUpgrade
+from txdav.xml import element
 
 class Upgrade_from_4_to_5(CommonStoreTests):
     """
@@ -35,7 +38,7 @@
     """
 
     @inlineCallbacks
-    def test_calendarTimezoneUpgrade(self):
+    def _calendarTimezoneUpgrade_setup(self):
 
         tz1 = Component.fromString("""BEGIN:VCALENDAR
 VERSION:2.0
@@ -137,19 +140,50 @@
         ).on(txn)
         yield self.commit()
 
-        # Trigger upgrade
-        yield moveCalendarTimezoneProperties(self._sqlCalendarStore)
+        returnValue(user_details)
 
+
+    @inlineCallbacks
+    def _calendarTimezoneUpgrade_check(self, changed_users, unchanged_users, user_details):
+
         # Test results
         for user, calname, tz in user_details:
-            calendar = (yield self.calendarUnderTest(name=calname, home=user))
-            self.assertEqual(calendar.getTimezone(), tz)
-            self.assertTrue(PropertyName.fromElement(caldavxml.CalendarTimeZone) not in calendar.properties())
+            if user in changed_users:
+                home = (yield self.homeUnderTest(name=user))
+                version = (yield home.dataVersion())
+                self.assertEqual(version, 5)
+                calendar = (yield self.calendarUnderTest(name=calname, home=user))
+                self.assertEqual(calendar.getTimezone(), tz)
+                self.assertTrue(PropertyName.fromElement(caldavxml.CalendarTimeZone) not in calendar.properties())
+            else:
+                home = (yield self.homeUnderTest(name=user))
+                version = (yield home.dataVersion())
+                self.assertEqual(version, 4)
+                calendar = (yield self.calendarUnderTest(name=calname, home=user))
+                self.assertEqual(calendar.getTimezone(), None)
+                if tz:
+                    self.assertTrue(PropertyName.fromElement(caldavxml.CalendarTimeZone) in calendar.properties())
+                else:
+                    self.assertTrue(PropertyName.fromElement(caldavxml.CalendarTimeZone) not in calendar.properties())
 
 
     @inlineCallbacks
-    def test_calendarAvailabilityUpgrade(self):
+    def test_calendarTimezoneUpgrade(self):
+        user_details = yield self._calendarTimezoneUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore)
+        yield self._calendarTimezoneUpgrade_check(("user01", "user02", "user03",), (), user_details)
 
+
+    @inlineCallbacks
+    def test_partialCalendarTimezoneUpgrade(self):
+        user_details = yield self._calendarTimezoneUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore, "user01")
+        yield self._calendarTimezoneUpgrade_check(("user01",), ("user02", "user03",), user_details)
+
+
+    @inlineCallbacks
+    def _calendarAvailabilityUpgrade_setup(self):
+
         av1 = Component.fromString("""BEGIN:VCALENDAR
 VERSION:2.0
 CALSCALE:GREGORIAN
@@ -220,20 +254,68 @@
             self.assertEqual(PropertyName.fromElement(customxml.CalendarAvailability) in calendar.properties(), av is not None)
         yield self.commit()
 
-        # Trigger upgrade
-        yield moveCalendarAvailabilityProperties(self._sqlCalendarStore)
+        returnValue(user_details)
 
+
+    @inlineCallbacks
+    def _calendarAvailabilityUpgrade_check(self, changed_users, unchanged_users, user_details):
+
         # Test results
         for user, av in user_details:
-            home = (yield self.homeUnderTest(name=user))
-            calendar = (yield self.calendarUnderTest(name="inbox", home=user))
-            self.assertEqual(home.getAvailability(), av)
-            self.assertTrue(PropertyName.fromElement(customxml.CalendarAvailability) not in calendar.properties())
+            if user in changed_users:
+                home = (yield self.homeUnderTest(name=user))
+                version = (yield home.dataVersion())
+                self.assertEqual(version, 5)
+                calendar = (yield self.calendarUnderTest(name="inbox", home=user))
+                self.assertEqual(home.getAvailability(), av)
+                self.assertTrue(PropertyName.fromElement(customxml.CalendarAvailability) not in calendar.properties())
+            else:
+                home = (yield self.homeUnderTest(name=user))
+                version = (yield home.dataVersion())
+                self.assertEqual(version, 4)
+                calendar = (yield self.calendarUnderTest(name="inbox", home=user))
+                self.assertEqual(home.getAvailability(), None)
+                if av:
+                    self.assertTrue(PropertyName.fromElement(customxml.CalendarAvailability) in calendar.properties())
+                else:
+                    self.assertTrue(PropertyName.fromElement(customxml.CalendarAvailability) not in calendar.properties())
 
 
     @inlineCallbacks
-    def test_removeOtherPropertiesUpgrade(self):
+    def test_calendarAvailabilityUpgrade(self):
+        user_details = yield self._calendarAvailabilityUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore)
+        yield self._calendarAvailabilityUpgrade_check(("user01", "user02", "user03",), (), user_details)
 
+
+    @inlineCallbacks
+    def test_partialCalendarAvailabilityUpgrade(self):
+        user_details = yield self._calendarAvailabilityUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore, "user01")
+        yield self._calendarAvailabilityUpgrade_check(("user01",), ("user02", "user03",), user_details)
+
+
+    @inlineCallbacks
+    def test_combinedUpgrade(self):
+        user_details1 = yield self._calendarTimezoneUpgrade_setup()
+        user_details2 = yield self._calendarAvailabilityUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore)
+        yield self._calendarTimezoneUpgrade_check(("user01", "user02", "user03",), (), user_details1)
+        yield self._calendarAvailabilityUpgrade_check(("user01", "user02", "user03",), (), user_details2)
+
+
+    @inlineCallbacks
+    def test_partialCombinedUpgrade(self):
+        user_details1 = yield self._calendarTimezoneUpgrade_setup()
+        user_details2 = yield self._calendarAvailabilityUpgrade_setup()
+        yield updateCalendarHomes(self._sqlCalendarStore, "user01")
+        yield self._calendarTimezoneUpgrade_check(("user01",), ("user02", "user03",), user_details1)
+        yield self._calendarAvailabilityUpgrade_check(("user01",), ("user02", "user03",), user_details2)
+
+
+    @inlineCallbacks
+    def _removeOtherPropertiesUpgrade_setup(self):
+
         # Set dead property on calendar
         for user in ("user01", "user02",):
             calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
@@ -243,12 +325,55 @@
         for user in ("user01", "user02",):
             calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
             self.assertTrue(PropertyName.fromElement(element.ResourceID) in calendar.properties())
+
+        yield self.transactionUnderTest().updateCalendarserverValue("CALENDAR-DATAVERSION", "4")
+
         yield self.commit()
 
-        # Trigger upgrade
-        yield removeOtherProperties(self._sqlCalendarStore)
 
+    @inlineCallbacks
+    def _removeOtherPropertiesUpgrade_check(self, full=True):
+
         # Test results
         for user in ("user01", "user02",):
-            calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
-            self.assertTrue(PropertyName.fromElement(element.ResourceID) not in calendar.properties())
+            if full:
+                calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
+                self.assertTrue(PropertyName.fromElement(element.ResourceID) not in calendar.properties())
+                version = yield self.transactionUnderTest().calendarserverValue("CALENDAR-DATAVERSION")
+                self.assertEqual(int(version), 5)
+            else:
+                calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
+                self.assertTrue(PropertyName.fromElement(element.ResourceID) in calendar.properties())
+                version = yield self.transactionUnderTest().calendarserverValue("CALENDAR-DATAVERSION")
+                self.assertEqual(int(version), 4)
+
+
+    @inlineCallbacks
+    def test_removeOtherPropertiesUpgrade(self):
+        yield self._removeOtherPropertiesUpgrade_setup()
+        yield doUpgrade(self._sqlCalendarStore)
+        yield self._removeOtherPropertiesUpgrade_check()
+
+
+    @inlineCallbacks
+    def test_fullUpgrade(self):
+        self.patch(config, "UpgradeHomePrefix", "")
+        user_details1 = yield self._calendarTimezoneUpgrade_setup()
+        user_details2 = yield self._calendarAvailabilityUpgrade_setup()
+        yield self._removeOtherPropertiesUpgrade_setup()
+        yield doUpgrade(self._sqlCalendarStore)
+        yield self._calendarTimezoneUpgrade_check(("user01", "user02", "user03",), (), user_details1)
+        yield self._calendarAvailabilityUpgrade_check(("user01", "user02", "user03",), (), user_details2)
+        yield self._removeOtherPropertiesUpgrade_check()
+
+
+    @inlineCallbacks
+    def test_partialFullUpgrade(self):
+        self.patch(config, "UpgradeHomePrefix", "user01")
+        user_details1 = yield self._calendarTimezoneUpgrade_setup()
+        user_details2 = yield self._calendarAvailabilityUpgrade_setup()
+        yield self._removeOtherPropertiesUpgrade_setup()
+        yield doUpgrade(self._sqlCalendarStore)
+        yield self._calendarTimezoneUpgrade_check(("user01",), ("user02", "user03",), user_details1)
+        yield self._calendarAvailabilityUpgrade_check(("user01",), ("user02", "user03",), user_details2)
+        yield self._removeOtherPropertiesUpgrade_check(False)

Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/util.py	2013-11-08 02:59:43 UTC (rev 11911)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/upgrade/sql/upgrades/util.py	2013-11-08 03:33:05 UTC (rev 11912)
@@ -14,7 +14,7 @@
 # limitations under the License.
 ##
 
-from twext.enterprise.dal.syntax import Select, Delete, Update
+from twext.enterprise.dal.syntax import Select, Delete, Update, Count
 from twext.python.log import Logger
 from twisted.internet.defer import inlineCallbacks, returnValue
 from txdav.base.propertystore.base import PropertyName
@@ -44,6 +44,21 @@
 
 
 @inlineCallbacks
+def countProperty(txn, propelement):
+    pname = PropertyName.fromElement(propelement)
+
+    rp = schema.RESOURCE_PROPERTY
+    count = (yield Select(
+        [Count(rp.RESOURCE_ID), ],
+        From=rp,
+        Where=rp.NAME == pname.toString(),
+    ).on(txn))[0][0]
+
+    returnValue(count)
+
+
+
+ at inlineCallbacks
 def cleanPropertyStore():
     """
     We have manually manipulated the SQL property store by-passing the underlying implementation's caching
@@ -114,27 +129,43 @@
 
 
 @inlineCallbacks
-def doToEachHomeNotAtVersion(store, homeSchema, version, doIt):
+def doToEachHomeNotAtVersion(store, homeSchema, version, doIt, logStr, filterOwnerUID=None):
     """
     Do something to each home whose version column indicates it is older
-    than the specified version. Do this in batches as there may be a lot of work to do.
+    than the specified version. Do this in batches as there may be a lot of work to do. Also,
+    allow the GUID to be filtered to support a parallel mode of operation.
     """
 
+    txn = store.newTransaction("updateDataVersion")
+    where = homeSchema.DATAVERSION < version
+    if filterOwnerUID:
+        where = where.And(homeSchema.OWNER_UID.StartsWith(filterOwnerUID))
+    total = (yield Select(
+        [Count(homeSchema.RESOURCE_ID), ],
+        From=homeSchema,
+        Where=where,
+    ).on(txn))[0][0]
+    yield txn.commit()
+    count = 0
+
     while True:
 
+        logUpgradeStatus(logStr, count, total)
+
         # Get the next home with an old version
         txn = store.newTransaction("updateDataVersion")
         try:
             rows = yield Select(
                 [homeSchema.RESOURCE_ID, homeSchema.OWNER_UID, ],
                 From=homeSchema,
-                Where=homeSchema.DATAVERSION < version,
+                Where=where,
                 OrderBy=homeSchema.OWNER_UID,
                 Limit=1,
             ).on(txn)
 
             if len(rows) == 0:
                 yield txn.commit()
+                logUpgradeStatus("End {}".format(logStr), count, total)
                 returnValue(None)
 
             # Apply to the home
@@ -149,6 +180,26 @@
             yield txn.commit()
         except RuntimeError, e:
             f = Failure()
-            log.error("Failed to upgrade %s to %s: %s" % (homeSchema, version, e))
+            logUpgradeError(
+                logStr,
+                "Failed to upgrade {} to {}: {}".format(homeSchema, version, e)
+            )
             yield txn.abort()
             f.raiseException()
+
+        count += 1
+
+
+
+def logUpgradeStatus(title, count=None, total=None):
+    if total is None:
+        log.info("Database upgrade {title}", title=title)
+    else:
+        divisor = 1000 if total > 1000 else 100
+        if (divmod(count, divisor)[1] == 0) or (count == total):
+            log.info("Database upgrade {title}: {count} of {total}", title=title, count=count, total=total)
+
+
+
+def logUpgradeError(title, details):
+    log.error("Database upgrade {title} failed: {details}", title=title, details=details)
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20140312/bfcc47f9/attachment.html>


More information about the calendarserver-changes mailing list