[CalendarServer-changes] [13158] CalendarServer/branches/users/sagen/move2who-5

source_changes at macosforge.org source_changes at macosforge.org
Fri Apr 4 10:20:27 PDT 2014


Revision: 13158
          http://trac.calendarserver.org//changeset/13158
Author:   sagen at apple.com
Date:     2014-04-04 10:20:27 -0700 (Fri, 04 Apr 2014)
Log Message:
-----------
pull up from trunk

Modified Paths:
--------------
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/accesslog.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/provision/root.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/provision/test/test_root.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/push/applepush.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tap/caldav.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tap/test/test_caldav.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tap/test/test_util.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tap/util.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/agent.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/calverify.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/export.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/gateway.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/migrate.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/principals.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/purge.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/push.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/resources.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/shell/directory.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/shell/terminal.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/shell/test/test_vfs.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/shell/vfs.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/gateway/augments.xml
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/gateway/caldavd.plist
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/gateway/resources-locations.xml
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/gateway/users-groups.xml
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/principals/caldavd.plist
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/principals/resources-locations.xml
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/principals/users-groups.xml
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_agent.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_calverify.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_gateway.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_principals.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_purge.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_purge_old_events.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_resources.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/util.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/webadmin/principals.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/webcal/resource.py
    CalendarServer/branches/users/sagen/move2who-5/conf/auth/accounts-test.xml
    CalendarServer/branches/users/sagen/move2who-5/conf/auth/augments-test.xml
    CalendarServer/branches/users/sagen/move2who-5/conf/auth/proxies-test.xml
    CalendarServer/branches/users/sagen/move2who-5/conf/auth/resources-test.xml
    CalendarServer/branches/users/sagen/move2who-5/conf/caldavd-test.plist
    CalendarServer/branches/users/sagen/move2who-5/contrib/performance/loadtest/test_sim.py
    CalendarServer/branches/users/sagen/move2who-5/requirements/py_develop.txt
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/cache.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/customxml.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/addressbook.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/augment.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/calendar.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/calendaruserproxy.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/calendaruserproxyloader.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/common.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/directory-principal-resource.html
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/principal.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/accounts.xml
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/augments.xml
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/resources.xml
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/test_augment.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/test_principal.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/util.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/util.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directorybackedaddressbook.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/extensions.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/freebusyurl.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/ical.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/method/report.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/method/report_addressbook_query.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/method/report_common.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/method/report_multiget_common.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/resource.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/scheduling_store/caldav/resource.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/sharing.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/stdconfig.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/storebridge.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/test/test_addressbookmultiget.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/test/test_addressbookquery.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/test/test_cache.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/test/test_calendarquery.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/test/test_collectioncontents.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/test/test_config.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/test/test_icalendar.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/test/test_mkcalendar.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/test/test_multiget.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/test/test_props.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/test/test_resource.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/test/test_sharing.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/test/test_upgrade.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/test/test_wrapping.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/test/util.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/timezoneservice.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/timezonestdservice.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/upgrade.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/util.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/caldav/datastore/scheduling/caldav/scheduler.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/caldav/datastore/scheduling/freebusy.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/caldav/datastore/scheduling/imip/inbound.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/caldav/datastore/scheduling/implicit.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/caldav/datastore/scheduling/ischedule/delivery.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/caldav/datastore/scheduling/ischedule/resource.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/caldav/datastore/scheduling/ischedule/scheduler.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/caldav/datastore/scheduling/itip.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/caldav/datastore/scheduling/processing.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/caldav/datastore/scheduling/scheduler.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/caldav/datastore/scheduling/work.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/caldav/datastore/sql.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/caldav/datastore/test/attachments/accounts.xml
    CalendarServer/branches/users/sagen/move2who-5/txdav/caldav/datastore/test/attachments/resources.xml
    CalendarServer/branches/users/sagen/move2who-5/txdav/caldav/datastore/test/test_attachments.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/caldav/datastore/test/util.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/caldav/datastore/util.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/caldav/icalendardirectoryservice.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/carddav/datastore/query/filter.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/common/datastore/file.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/common/datastore/podding/conduit.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/common/datastore/podding/resource.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/common/datastore/podding/test/test_conduit.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/common/datastore/sql.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/common/datastore/test/util.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/dps/client.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/dps/commands.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/dps/server.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/dps/test/test.xml
    CalendarServer/branches/users/sagen/move2who-5/txdav/dps/test/test_client.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/who/delegates.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/who/groups.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/who/idirectory.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/who/test/accounts/accounts.xml
    CalendarServer/branches/users/sagen/move2who-5/txdav/who/test/accounts/resources.xml
    CalendarServer/branches/users/sagen/move2who-5/txdav/who/test/test_delegates.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/who/test/test_groups.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/who/xml.py
    CalendarServer/branches/users/sagen/move2who-5/txweb2/channel/http.py
    CalendarServer/branches/users/sagen/move2who-5/txweb2/dav/method/report_expand.py
    CalendarServer/branches/users/sagen/move2who-5/txweb2/dav/resource.py
    CalendarServer/branches/users/sagen/move2who-5/txweb2/dav/util.py
    CalendarServer/branches/users/sagen/move2who-5/txweb2/server.py

Added Paths:
-----------
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/platform/darwin/od/
    CalendarServer/branches/users/sagen/move2who-5/conf/auth/generate_test_accounts.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/dps/json.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/who/augment.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/who/directory.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/who/test/accounts/augments.xml
    CalendarServer/branches/users/sagen/move2who-5/txdav/who/test/test_augment.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/who/test/test_directory.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/who/test/test_util.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/who/test/test_wiki.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/who/util.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/who/vcard.py
    CalendarServer/branches/users/sagen/move2who-5/txdav/who/wiki.py

Removed Paths:
-------------
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/platform/darwin/od/
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/platform/darwin/wiki.py
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/calverify/
    CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/purge/
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/aggregate.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/appleopendirectory.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/cachingdirectory.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/directory.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/idirectory.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/ldapdirectory.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/opendirectorybacker.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/test_aggregate.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/test_buildquery.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/test_cachedirectory.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/test_directory.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/test_guidchange.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/test_ldapdirectory.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/test_livedirectory.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/test_modify.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/test_opendirectory.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/test_opendirectorybacker.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/test_proxyprincipalmembers.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/test_resources.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/test_wiki.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/test/test_xmlfile.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/wiki.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/xmlaccountsparser.py
    CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/xmlfile.py

Property Changed:
----------------
    CalendarServer/branches/users/sagen/move2who-5/


Property changes on: CalendarServer/branches/users/sagen/move2who-5
___________________________________________________________________
Modified: svn:mergeinfo
   - /CalDAVTester/trunk:11193-11198
/CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/release/CalendarServer-4.3-dev:10180-10190,10192
/CalendarServer/branches/release/CalendarServer-5.1-dev:11846
/CalendarServer/branches/release/CalendarServer-5.2-dev:11972,12357-12358,12794,12814
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/cross-pod-sharing:12038-12191
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/fix-no-ischedule:11607-11871
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
/CalendarServer/branches/users/cdaboo/json:11622-11912
/CalendarServer/branches/users/cdaboo/managed-attachments:9985-10145
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/performance-tweaks:11824-11836
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/reverse-proxy-pods:11875-11900
/CalendarServer/branches/users/cdaboo/scheduling-queue-refresh:11783-12557
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/sharing-in-the-store:11935-12016
/CalendarServer/branches/users/cdaboo/store-scheduling:10876-11129
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/gaya/cleanrevisions:12152-12334
/CalendarServer/branches/users/gaya/sharedgroupfixes:12120-12142
/CalendarServer/branches/users/gaya/sharedgroups-3:11088-11204
/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/digest-auth-redux:10624-10635
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/enforce-max-requests:11640-11643
/CalendarServer/branches/users/glyph/hang-fix:11465-11491
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/launchd-wrapper-bis:11413-11436
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/log-cleanups:11691-11731
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/one-home-list-api:10048-10073
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/q:9560-9688
/CalendarServer/branches/users/glyph/queue-locking-and-timing:10204-10289
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/start-service-start-loop:11060-11065
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/unshare-when-access-revoked:10562-10595
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/warning-cleanups:11347-11357
/CalendarServer/branches/users/glyph/whenNotProposed:11881-11897
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/sagen/testing:10827-10851,10853-10855
/CalendarServer/branches/users/wsanchez/transations:5515-5593
   + /CalDAVTester/trunk:11193-11198
/CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/release/CalendarServer-4.3-dev:10180-10190,10192
/CalendarServer/branches/release/CalendarServer-5.1-dev:11846
/CalendarServer/branches/release/CalendarServer-5.2-dev:11972,12357-12358,12794,12814
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/cross-pod-sharing:12038-12191
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/fix-no-ischedule:11607-11871
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
/CalendarServer/branches/users/cdaboo/json:11622-11912
/CalendarServer/branches/users/cdaboo/managed-attachments:9985-10145
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/performance-tweaks:11824-11836
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/reverse-proxy-pods:11875-11900
/CalendarServer/branches/users/cdaboo/scheduling-queue-refresh:11783-12557
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/sharing-in-the-store:11935-12016
/CalendarServer/branches/users/cdaboo/store-scheduling:10876-11129
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/gaya/cleanrevisions:12152-12334
/CalendarServer/branches/users/gaya/sharedgroupfixes:12120-12142
/CalendarServer/branches/users/gaya/sharedgroups-3:11088-11204
/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/digest-auth-redux:10624-10635
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/enforce-max-requests:11640-11643
/CalendarServer/branches/users/glyph/hang-fix:11465-11491
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/launchd-wrapper-bis:11413-11436
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/log-cleanups:11691-11731
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/one-home-list-api:10048-10073
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/q:9560-9688
/CalendarServer/branches/users/glyph/queue-locking-and-timing:10204-10289
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/start-service-start-loop:11060-11065
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/unshare-when-access-revoked:10562-10595
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/warning-cleanups:11347-11357
/CalendarServer/branches/users/glyph/whenNotProposed:11881-11897
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/move2who:12819-12860
/CalendarServer/branches/users/sagen/move2who-2:12861-12898
/CalendarServer/branches/users/sagen/move2who-3:12899-12913
/CalendarServer/branches/users/sagen/move2who-4:12914-13157
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/sagen/testing:10827-10851,10853-10855
/CalendarServer/branches/users/wsanchez/transations:5515-5593

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/accesslog.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/accesslog.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/accesslog.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -48,7 +48,6 @@
 from twisted.protocols import amp
 
 from twistedcaldav.config import config
-from twistedcaldav.directory.directory import DirectoryService
 
 from txdav.xml import element as davxml
 
@@ -91,22 +90,27 @@
                     if hasattr(request, "authzUser") and str(request.authzUser.children[0]) != uidn:
                         uidz = str(request.authzUser.children[0])
 
-                    def convertUIDtoShortName(uid):
-                        uid = uid.rstrip("/")
-                        uid = uid[uid.rfind("/") + 1:]
-                        record = request.site.resource.getDirectory().recordWithUID(uid)
-                        if record:
-                            if record.recordType == DirectoryService.recordType_users:
-                                return record.shortNames[0]
-                            else:
-                                return "(%s)%s" % (record.recordType, record.shortNames[0],)
-                        else:
-                            return uid
+                    # def convertUIDtoShortName(uid):
+                    #     uid = uid.rstrip("/")
+                    #     uid = uid[uid.rfind("/") + 1:]
+                    #     record = request.site.resource.getDirectory().recordWithUID(uid)
+                    #     if record:
+                    #         if record.recordType == DirectoryService.recordType_users:
+                    #             return record.shortNames[0]
+                    #         else:
+                    #             return "(%s)%s" % (record.recordType, record.shortNames[0],)
+                    #     else:
+                    #         return uid
 
-                    uidn = convertUIDtoShortName(uidn)
-                    if uidz:
-                        uidz = convertUIDtoShortName(uidz)
+                    # MOVE2WHO
+                    # Better to stick the records directly on the request at
+                    # an earlier point, since we can't do anything deferred
+                    # in here.
 
+                    # uidn = convertUIDtoShortName(uidn)
+                    # if uidz:
+                    #     uidz = convertUIDtoShortName(uidz)
+
                     if uidn and uidz:
                         uid = '"%s as %s"' % (uidn, uidz,)
                     else:
@@ -151,8 +155,9 @@
                 format += ' i=%(serverInstance)s'
                 formatArgs["serverInstance"] = config.LogID if config.LogID else "0"
 
-                format += ' or=%(outstandingRequests)s'
-                formatArgs["outstandingRequests"] = request.chanRequest.channel.factory.outstandingRequests
+                if request.chanRequest:  # This can be None during tests
+                    format += ' or=%(outstandingRequests)s'
+                    formatArgs["outstandingRequests"] = request.chanRequest.channel.factory.outstandingRequests
 
                 # Tags for time stamps collected along the way - the first one in the list is the initial
                 # time for request creation - we use that to track the entire request/response time

Deleted: CalendarServer/branches/users/sagen/move2who-5/calendarserver/platform/darwin/wiki.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/platform/darwin/wiki.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/platform/darwin/wiki.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -1,102 +0,0 @@
-##
-# Copyright (c) 2012-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-
-from twext.python.log import Logger
-from twext.internet.gaiendpoint import GAIEndpoint
-from twext.internet.adaptendpoint import connect
-
-from twisted.web.client import HTTPPageGetter, HTTPClientFactory
-from twisted.internet import reactor
-from twisted.internet.defer import inlineCallbacks, returnValue
-
-import json
-
-log = Logger()
-
- at inlineCallbacks
-def guidForAuthToken(token, host="localhost", port=80):
-    """
-    Send a GET request to the web auth service to retrieve the user record
-    guid associated with the provided auth token.
-
-    @param token: An auth token, usually passed in via cookie when webcal
-        makes a request.
-    @type token: C{str}
-    @return: deferred returning a guid (C{str}) if successful, or
-        will raise WebAuthError otherwise.
-    """
-    url = "http://%s:%d/auth/verify?auth_token=%s" % (host, port, token,)
-    jsonResponse = (yield _getPage(url, host, port))
-    try:
-        response = json.loads(jsonResponse)
-    except Exception, e:
-        log.error("Error parsing JSON response from webauth: %s (%s)" %
-            (jsonResponse, str(e)))
-        raise WebAuthError("Could not look up token: %s" % (token,))
-    if response["succeeded"]:
-        returnValue(response["generated_uid"])
-    else:
-        raise WebAuthError("Could not look up token: %s" % (token,))
-
-
-
-def accessForUserToWiki(user, wiki, host="localhost", port=4444):
-    """
-    Send a GET request to the wiki collabd service to retrieve the access level
-    the given user (in GUID form) has to the given wiki (in wiki short-name
-    form).
-
-    @param user: The GUID of the user
-    @type user: C{str}
-    @param wiki: The short name of the wiki
-    @type wiki: C{str}
-    @return: deferred returning a access level (C{str}) if successful, or
-        if the user is not recognized a twisted.web.error.Error with
-        status FORBIDDEN will errBack; an unknown wiki will have a status
-        of NOT_FOUND
-    """
-    url = "http://%s:%s/cal/accessLevelForUserWikiCalendar/%s/%s" % (host, port,
-        user, wiki)
-    return _getPage(url, host, port)
-
-
-
-def _getPage(url, host, port):
-    """
-    Fetch the body of the given url via HTTP, connecting to the given host
-    and port.
-
-    @param url: The URL to GET
-    @type url: C{str}
-    @param host: The hostname to connect to
-    @type host: C{str}
-    @param port: The port number to connect to
-    @type port: C{int}
-    @return: A deferred; upon 200 success the body of the response is returned,
-        otherwise a twisted.web.error.Error is the result.
-    """
-    factory = HTTPClientFactory(url)
-    factory.protocol = HTTPPageGetter
-    connect(GAIEndpoint(reactor, host, port), factory)
-    return factory.deferred
-
-
-
-class WebAuthError(RuntimeError):
-    """
-    Error in web auth
-    """

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/provision/root.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/provision/root.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/provision/root.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -20,33 +20,34 @@
 ]
 
 from twext.python.log import Logger
-from txweb2 import responsecode
-from txweb2.auth.wrapper import UnauthorizedResponse
-from txdav.xml import element as davxml
-from txweb2.dav.xattrprops import xattrPropertyStore
-from txweb2.http import HTTPError, StatusResponse, RedirectResponse
-
 from twisted.cred.error import LoginFailed, UnauthorizedLogin
-from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.internet.defer import inlineCallbacks, returnValue, succeed
 from twisted.python.reflect import namedClass
-from twisted.web.xmlrpc import Proxy
 from twisted.web.error import Error as WebError
-
+from twistedcaldav.cache import DisabledCache
+from twistedcaldav.cache import MemcacheResponseCache, MemcacheChangeNotifier
 from twistedcaldav.cache import _CachedResponseResource
-from twistedcaldav.cache import MemcacheResponseCache, MemcacheChangeNotifier
-from twistedcaldav.cache import DisabledCache
 from twistedcaldav.config import config
+from twistedcaldav.directory.principal import DirectoryPrincipalResource
 from twistedcaldav.extensions import DAVFile, CachingPropertyStore
 from twistedcaldav.extensions import DirectoryPrincipalPropertySearchMixIn
 from twistedcaldav.extensions import ReadOnlyResourceMixIn
 from twistedcaldav.resource import CalDAVComplianceMixIn
-from twistedcaldav.directory.principal import DirectoryPrincipalResource
-from calendarserver.platform.darwin.wiki import guidForAuthToken
+from txdav.who.wiki import DirectoryService as WikiDirectoryService
+from txdav.who.wiki import uidForAuthToken
+from txdav.xml import element as davxml
+from txweb2 import responsecode
+from txweb2.auth.wrapper import UnauthorizedResponse
+from txweb2.dav.xattrprops import xattrPropertyStore
+from txweb2.http import HTTPError, StatusResponse, RedirectResponse
 
 log = Logger()
 
 
-class RootResource (ReadOnlyResourceMixIn, DirectoryPrincipalPropertySearchMixIn, CalDAVComplianceMixIn, DAVFile):
+class RootResource(
+    ReadOnlyResourceMixIn, DirectoryPrincipalPropertySearchMixIn,
+    CalDAVComplianceMixIn, DAVFile
+):
     """
     A special root resource that contains support checking SACLs
     as well as adding responseFilters.
@@ -58,17 +59,17 @@
     # starts with any of these, then the list of SACLs are checked.  If the
     # request path does not start with any of these, then no SACLs are checked.
     saclMap = {
-        "addressbooks" : ("addressbook",),
-        "calendars" : ("calendar",),
-        "directory" : ("addressbook",),
-        "principals" : ("addressbook", "calendar"),
-        "webcal" : ("calendar",),
+        "addressbooks": ("addressbook",),
+        "calendars": ("calendar",),
+        "directory": ("addressbook",),
+        "principals": ("addressbook", "calendar"),
+        "webcal": ("calendar",),
     }
 
     # If a top-level resource path starts with any of these, an unauthenticated
     # request is redirected to the auth url (config.WebCalendarAuthPath)
     authServiceMap = {
-        "webcal" : True,
+        "webcal": True,
     }
 
     def __init__(self, path, *args, **kwargs):
@@ -82,11 +83,17 @@
 
         self.contentFilters = []
 
-        if config.EnableResponseCache and config.Memcached.Pools.Default.ClientEnabled:
+        if (
+            config.EnableResponseCache and
+            config.Memcached.Pools.Default.ClientEnabled
+        ):
             self.responseCache = MemcacheResponseCache(self.fp)
 
-            # These class attributes need to be setup with our memcache notifier
-            DirectoryPrincipalResource.cacheNotifierFactory = MemcacheChangeNotifier
+            # These class attributes need to be setup with our memcache\
+            # notifier
+            DirectoryPrincipalResource.cacheNotifierFactory = (
+                MemcacheChangeNotifier
+            )
         else:
             self.responseCache = DisabledCache()
 
@@ -98,7 +105,9 @@
     def deadProperties(self):
         if not hasattr(self, "_dead_properties"):
             # Get the property store from super
-            deadProperties = namedClass(config.RootResourcePropStoreClass)(self)
+            deadProperties = (
+                namedClass(config.RootResourcePropStoreClass)(self)
+            )
 
             # Wrap the property store in a memory store
             if isinstance(deadProperties, xattrPropertyStore):
@@ -110,7 +119,7 @@
 
 
     def defaultAccessControlList(self):
-        return config.RootResourceACL
+        return succeed(config.RootResourceACL)
 
 
     @inlineCallbacks
@@ -160,7 +169,9 @@
         request.checkingSACL = True
 
         for collection in self.principalCollections():
-            principal = collection._principalForURI(authzUser.children[0].children[0].data)
+            principal = yield collection._principalForURI(
+                authzUser.children[0].children[0].data
+            )
             if principal is None:
                 response = (yield UnauthorizedResponse.makeResponse(
                     request.credentialFactories,
@@ -185,7 +196,10 @@
         if access:
             returnValue(True)
 
-        log.warn("User %r is not enabled with the %r SACL(s)" % (username, saclServices,))
+        log.warn(
+            "User {user!r} is not enabled with the {sacl!r} SACL(s)",
+            user=username, sacl=saclServices
+        )
         raise HTTPError(responsecode.FORBIDDEN)
 
 
@@ -229,54 +243,71 @@
                     token = None
 
                 if token is not None and token != "unauthenticated":
-                    log.debug("Wiki sessionID cookie value: %s" % (token,))
+                    log.debug(
+                        "Wiki sessionID cookie value: {token}", token=token
+                    )
 
                     record = None
                     try:
-                        if wikiConfig.LionCompatibility:
-                            guid = None
-                            proxy = Proxy(wikiConfig["URL"])
-                            username = (yield proxy.callRemote(wikiConfig["UserMethod"], token))
-                            directory = request.site.resource.getDirectory()
-                            record = directory.recordWithShortName("users", username)
-                            if record is not None:
-                                guid = record.guid
-                        else:
-                            guid = (yield guidForAuthToken(token))
-                            if guid == "unauthenticated":
-                                guid = None
+                        uid = yield uidForAuthToken(token)
+                        if uid == "unauthenticated":
+                            uid = None
 
-                    except WebError, w:
-                        guid = None
+                    except WebError as w:
+                        uid = None
                         # FORBIDDEN status means it's an unknown token
                         if int(w.status) == responsecode.NOT_FOUND:
-                            log.debug("Unknown wiki token: %s" % (token,))
+                            log.debug(
+                                "Unknown wiki token: {token}", token=token
+                            )
                         else:
-                            log.error("Failed to look up wiki token %s: %s" %
-                                (token, w.message,))
+                            log.error(
+                                "Failed to look up wiki token {token}: "
+                                "{message}",
+                                token=token, message=w.message
+                            )
 
-                    except Exception, e:
-                        log.error("Failed to look up wiki token (%s)" % (e,))
-                        guid = None
+                    except Exception as e:
+                        log.error(
+                            "Failed to look up wiki token: {error}",
+                            error=e
+                        )
+                        uid = None
 
-                    if guid is not None:
-                        log.debug("Wiki lookup returned guid: %s" % (guid,))
+                    if uid is not None:
+                        log.debug(
+                            "Wiki lookup returned uid: {uid}", uid=uid
+                        )
                         principal = None
                         directory = request.site.resource.getDirectory()
-                        record = directory.recordWithGUID(guid)
+                        record = yield directory.recordWithUID(uid)
                         if record is not None:
                             username = record.shortNames[0]
-                            log.debug("Wiki user record for user %s : %s" % (username, record))
+                            log.debug(
+                                "Wiki user record for user {user}: {record}",
+                                user=username, record=record
+                            )
                             for collection in self.principalCollections():
-                                principal = collection.principalForRecord(record)
+                                principal = (
+                                    yield collection.principalForRecord(record)
+                                )
                                 if principal is not None:
                                     break
 
                         if principal:
-                            log.debug("Wiki-authenticated principal %s being assigned to authnUser and authzUser" % (record.uid,))
-                            request.authzUser = request.authnUser = davxml.Principal(
-                                davxml.HRef.fromString("/principals/__uids__/%s/" % (record.uid,))
+                            log.debug(
+                                "Wiki-authenticated principal {record.uid} "
+                                "being assigned to authnUser and authzUser",
+                                record=record
                             )
+                            request.authzUser = request.authnUser = (
+                                davxml.Principal(
+                                    davxml.HRef.fromString(
+                                        "/principals/__uids__/{}/"
+                                        .format(record.uid)
+                                    )
+                                )
+                            )
 
         if not hasattr(request, "authzUser") and config.WebCalendarAuthPath:
             topLevel = request.path.strip("/").split("/")[0]
@@ -286,25 +317,27 @@
 
                 # Use config.ServerHostName if no x-forwarded-host header,
                 # otherwise use the final hostname in x-forwarded-host.
-                host = request.headers.getRawHeaders("x-forwarded-host",
-                    [config.ServerHostName])[-1].split(",")[-1].strip()
+                host = request.headers.getRawHeaders(
+                    "x-forwarded-host",
+                    [config.ServerHostName]
+                )[-1].split(",")[-1].strip()
                 port = 443 if config.EnableSSL else 80
                 scheme = "https" if config.EnableSSL else "http"
 
                 response = RedirectResponse(
-                        request.unparseURL(
-                            host=host,
-                            port=port,
-                            scheme=scheme,
-                            path=config.WebCalendarAuthPath,
-                            querystring="redirect=%s://%s%s" % (
-                                scheme,
-                                host,
-                                request.path
-                            )
-                        ),
-                        temporary=True
-                    )
+                    request.unparseURL(
+                        host=host,
+                        port=port,
+                        scheme=scheme,
+                        path=config.WebCalendarAuthPath,
+                        querystring="redirect={}://{}{}".format(
+                            scheme,
+                            host,
+                            request.path
+                        )
+                    ),
+                    temporary=True
+                )
                 raise HTTPError(response)
 
         # We don't want the /inbox resource to pay attention to SACLs because
@@ -314,10 +347,17 @@
         if segments[0] in ("inbox", "timezones"):
             request.checkedSACL = True
 
-        elif (len(segments) > 2 and segments[0] in ("calendars", "principals") and
+        elif (
             (
-                segments[1] == "wikis" or
-                (segments[1] == "__uids__" and segments[2].startswith("wiki-"))
+                len(segments) > 2 and
+                segments[0] in ("calendars", "principals") and
+                (
+                    segments[1] == "wikis" or
+                    (
+                        segments[1] == "__uids__" and
+                        segments[2].startswith(WikiDirectoryService.uidPrefix)
+                    )
+                )
             )
         ):
             # This is a wiki-related calendar resource. SACLs are not checked.
@@ -332,12 +372,21 @@
                 else:
                     wikiName = segments[2][5:]
                 if wikiName:
-                    log.debug("Wiki principal %s being assigned to authzUser" % (wikiName,))
+                    log.debug(
+                        "Wiki principal {name} being assigned to authzUser",
+                        name=wikiName
+                    )
                     request.authzUser = davxml.Principal(
-                        davxml.HRef.fromString("/principals/wikis/%s/" % (wikiName,))
+                        davxml.HRef.fromString(
+                            "/principals/wikis/{}/".format(wikiName)
+                        )
                     )
 
-        elif self.useSacls and not hasattr(request, "checkedSACL") and not hasattr(request, "checkingSACL"):
+        elif (
+            self.useSacls and
+            not hasattr(request, "checkedSACL") and
+            not hasattr(request, "checkingSACL")
+        ):
             yield self.checkSacl(request)
 
         if config.RejectClients:
@@ -348,28 +397,37 @@
             if agent is not None:
                 for reject in config.RejectClients:
                     if reject.search(agent) is not None:
-                        log.info("Rejecting user-agent: %s" % (agent,))
+                        log.info("Rejecting user-agent: {agent}", agent=agent)
                         raise HTTPError(StatusResponse(
                             responsecode.FORBIDDEN,
-                            "Your client software (%s) is not allowed to access this service." % (agent,)
+                            "Your client software ({}) is not allowed to "
+                            "access this service."
+                            .format(agent)
                         ))
 
-        if config.EnableResponseCache and request.method == "PROPFIND" and not getattr(request, "notInCache", False) and len(segments) > 1:
+        if (
+            config.EnableResponseCache and
+            request.method == "PROPFIND" and
+            not getattr(request, "notInCache", False) and
+            len(segments) > 1
+        ):
             try:
-                authnUser, authzUser = (yield self.authenticate(request))
+                authnUser, authzUser = yield self.authenticate(request)
                 request.authnUser = authnUser
                 request.authzUser = authzUser
             except (UnauthorizedLogin, LoginFailed):
-                response = (yield UnauthorizedResponse.makeResponse(
+                response = yield UnauthorizedResponse.makeResponse(
                     request.credentialFactories,
                     request.remoteAddr
-                ))
+                )
                 raise HTTPError(response)
 
             try:
                 if not getattr(request, "checkingCache", False):
                     request.checkingCache = True
-                    response = (yield self.responseCache.getResponseForRequest(request))
+                    response = yield self.responseCache.getResponseForRequest(
+                        request
+                    )
                     if response is None:
                         request.notInCache = True
                         raise KeyError("Not found in cache.")
@@ -378,7 +436,9 @@
             except KeyError:
                 pass
 
-        child = (yield super(RootResource, self).locateChild(request, segments))
+        child = yield super(RootResource, self).locateChild(
+            request, segments
+        )
         returnValue(child)
 
 

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/provision/test/test_root.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/provision/test/test_root.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/provision/test/test_root.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -14,29 +14,22 @@
 # limitations under the License.
 ##
 
-import os
 
-from twisted.cred.portal import Portal
 from twisted.internet.defer import inlineCallbacks, maybeDeferred, returnValue
 
+from twext.who.idirectory import RecordType
 from txweb2 import http_headers
 from txweb2 import responsecode
-from txweb2 import server
-from txweb2.auth import basic
-from txweb2.dav import auth
 from txdav.xml import element as davxml
 from txweb2.http import HTTPError
 from txweb2.iweb import IResponse
-from txweb2.test.test_server import SimpleRequest
 
-from twistedcaldav.test.util import TestCase
+from twistedcaldav.test.util import StoreTestCase, SimpleStoreRequest
 from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
-from twistedcaldav.directory.xmlfile import XMLDirectoryService
-from twistedcaldav.directory.test.test_xmlfile import xmlFile, augmentsFile
 
 from calendarserver.provision.root import RootResource
-from twistedcaldav.directory import augment
 
+
 class FakeCheckSACL(object):
     def __init__(self, sacls=None):
         self.sacls = sacls or {}
@@ -53,48 +46,16 @@
 
 
 
-class RootTests(TestCase):
+class RootTests(StoreTestCase):
 
+    @inlineCallbacks
     def setUp(self):
-        super(RootTests, self).setUp()
+        yield super(RootTests, self).setUp()
 
-        self.docroot = self.mktemp()
-        os.mkdir(self.docroot)
-
         RootResource.CheckSACL = FakeCheckSACL(sacls={"calendar": ["dreid"]})
 
-        directory = XMLDirectoryService(
-            {
-                "xmlFile" : xmlFile,
-                "augmentService" :
-                    augment.AugmentXMLDB(xmlFiles=(augmentsFile.path,))
-            }
-        )
 
-        principals = DirectoryPrincipalProvisioningResource(
-            "/principals/",
-            directory
-        )
 
-        root = RootResource(self.docroot, principalCollections=[principals])
-
-        root.putChild("principals",
-                      principals)
-
-        portal = Portal(auth.DavRealm())
-        portal.registerChecker(directory)
-
-        self.root = auth.AuthenticationWrapper(
-            root,
-            portal,
-            (basic.BasicCredentialFactory("Test realm"),),
-            (basic.BasicCredentialFactory("Test realm"),),
-            loginInterfaces=(auth.IPrincipal,))
-
-        self.site = server.Site(self.root)
-
-
-
 class ComplianceTests(RootTests):
     """
     Tests to verify CalDAV compliance of the root resource.
@@ -107,8 +68,8 @@
         Deferred which will fire with (something adaptable to) an HTTP response
         object.
         """
-        request = SimpleRequest(self.site, method, ("/".join([""] + segments)))
-        rsrc = self.root
+        request = SimpleStoreRequest(self, method, ("/".join([""] + segments)))
+        rsrc = self.actualRoot
         while segments:
             rsrc, segments = (yield maybeDeferred(
                 rsrc.locateChild, request, segments
@@ -138,18 +99,12 @@
 
         should return a valid resource
         """
-        self.root.resource.useSacls = False
+        self.actualRoot.useSacls = False
 
-        request = SimpleRequest(self.site,
-                                "GET",
-                                "/principals/")
+        request = SimpleStoreRequest(self, "GET", "/principals/")
 
-        resrc, _ignore_segments = (yield maybeDeferred(
-            self.root.locateChild, request, ["principals"]
-        ))
-
         resrc, segments = (yield maybeDeferred(
-            resrc.locateChild, request, ["principals"]
+            self.actualRoot.locateChild, request, ["principals"]
         ))
 
         self.failUnless(
@@ -169,26 +124,21 @@
 
         should return a valid resource
         """
-        self.root.resource.useSacls = True
+        self.actualRoot.useSacls = True
 
-        request = SimpleRequest(
-            self.site,
+        record = yield self.directory.recordWithShortName(
+            RecordType.user,
+            u"dreid"
+        )
+        request = SimpleStoreRequest(
+            self,
             "GET",
             "/principals/",
-            headers=http_headers.Headers({
-                "Authorization": [
-                    "basic",
-                    "%s" % ("dreid:dierd".encode("base64"),)
-                ]
-            })
+            authRecord=record
         )
 
-        resrc, _ignore_segments = (yield maybeDeferred(
-            self.root.locateChild, request, ["principals"]
-        ))
-
         resrc, segments = (yield maybeDeferred(
-            resrc.locateChild, request, ["principals"]
+            self.actualRoot.locateChild, request, ["principals"]
         ))
 
         self.failUnless(
@@ -218,28 +168,27 @@
 
         should return a 403 forbidden response
         """
-        self.root.resource.useSacls = True
+        self.actualRoot.useSacls = True
 
-        request = SimpleRequest(
-            self.site,
+        record = yield self.directory.recordWithShortName(
+            RecordType.user,
+            u"wsanchez"
+        )
+
+        request = SimpleStoreRequest(
+            self,
             "GET",
             "/principals/",
-            headers=http_headers.Headers({
-                "Authorization": [
-                    "basic",
-                    "%s" % ("wsanchez:zehcnasw".encode("base64"),)
-                ]
-            })
+            authRecord=record
         )
 
-        resrc, _ignore_segments = (yield maybeDeferred(
-            self.root.locateChild, request, ["principals"]
-        ))
-
         try:
             resrc, _ignore_segments = (yield maybeDeferred(
-                resrc.locateChild, request, ["principals"]
+                self.actualRoot.locateChild, request, ["principals"]
             ))
+            raise AssertionError(
+                "RootResource.locateChild did not return an error"
+            )
         except HTTPError, e:
             self.assertEquals(e.response.code, 403)
 
@@ -253,20 +202,16 @@
         should return a 401 UnauthorizedResponse
         """
 
-        self.root.resource.useSacls = True
-        request = SimpleRequest(
-            self.site,
+        self.actualRoot.useSacls = True
+        request = SimpleStoreRequest(
+            self,
             "GET",
             "/principals/"
         )
 
-        resrc, _ignore_segments = (yield maybeDeferred(
-            self.root.locateChild, request, ["principals"]
-        ))
-
         try:
             resrc, _ignore_segments = (yield maybeDeferred(
-                resrc.locateChild, request, ["principals"]
+                self.actualRoot.locateChild, request, ["principals"]
             ))
             raise AssertionError(
                 "RootResource.locateChild did not return an error"
@@ -283,24 +228,28 @@
 
         should return a 401 UnauthorizedResponse
         """
-        self.root.resource.useSacls = True
+        self.actualRoot.useSacls = True
 
-        request = SimpleRequest(
-            self.site,
+        request = SimpleStoreRequest(
+            self,
             "GET",
             "/principals/",
-            headers=http_headers.Headers({
-                    "Authorization": ["basic", "%s" % (
-                            "dreid:dreid".encode("base64"),)]}))
+            headers=http_headers.Headers(
+                {
+                    "Authorization": [
+                        "basic", "%s" % ("dreid:dreid".encode("base64"),)
+                    ]
+                }
+            )
+        )
 
-        resrc, _ignore_segments = (yield maybeDeferred(
-            self.root.locateChild, request, ["principals"]
-        ))
-
         try:
             resrc, _ignore_segments = (yield maybeDeferred(
-                resrc.locateChild, request, ["principals"]
+                self.actualRoot.locateChild, request, ["principals"]
             ))
+            raise AssertionError(
+                "RootResource.locateChild did not return an error"
+            )
         except HTTPError, e:
             self.assertEquals(e.response.code, 401)
 
@@ -313,7 +262,7 @@
                 self.fail("Incorrect response for DELETE /: %s"
                           % (response.code,))
 
-        request = SimpleRequest(self.site, "DELETE", "/")
+        request = SimpleStoreRequest(self, "DELETE", "/")
         return self.send(request, do_test)
 
 
@@ -325,8 +274,8 @@
                 self.fail("Incorrect response for COPY /: %s"
                           % (response.code,))
 
-        request = SimpleRequest(
-            self.site,
+        request = SimpleStoreRequest(
+            self,
             "COPY",
             "/",
             headers=http_headers.Headers({"Destination": "/copy/"})
@@ -342,8 +291,8 @@
                 self.fail("Incorrect response for MOVE /: %s"
                           % (response.code,))
 
-        request = SimpleRequest(
-            self.site,
+        request = SimpleStoreRequest(
+            self,
             "MOVE",
             "/",
             headers=http_headers.Headers({"Destination": "/copy/"})
@@ -371,13 +320,15 @@
             return response
 
 
+    @inlineCallbacks
     def setUp(self):
-        super(SACLCacheTests, self).setUp()
-        self.root.resource.responseCache = SACLCacheTests.StubResponseCacheResource()
+        yield super(SACLCacheTests, self).setUp()
+        self.actualRoot.responseCache = SACLCacheTests.StubResponseCacheResource()
 
 
+    @inlineCallbacks
     def test_PROPFIND(self):
-        self.root.resource.useSacls = True
+        self.actualRoot.useSacls = True
 
         body = """<?xml version="1.0" encoding="utf-8" ?>
 <D:propfind xmlns:D="DAV:">
@@ -387,48 +338,46 @@
 </D:prop>
 </D:propfind>
 """
+        record = yield self.directory.recordWithShortName(
+            RecordType.user,
+            u"dreid"
+        )
 
-        request = SimpleRequest(
-            self.site,
+        request = SimpleStoreRequest(
+            self,
             "PROPFIND",
             "/principals/users/dreid/",
             headers=http_headers.Headers({
-                    'Authorization': ['basic', '%s' % ('dreid:dierd'.encode('base64'),)],
-                    'Content-Type': 'application/xml; charset="utf-8"',
                     'Depth': '1',
             }),
+            authRecord=record,
             content=body
         )
+        response = yield self.send(request)
+        response = IResponse(response)
 
-        def gotResponse1(response):
-            if response.code != responsecode.MULTI_STATUS:
-                self.fail("Incorrect response for PROPFIND /principals/: %s" % (response.code,))
+        if response.code != responsecode.MULTI_STATUS:
+            self.fail("Incorrect response for PROPFIND /principals/: %s" % (response.code,))
 
-            request = SimpleRequest(
-                self.site,
-                "PROPFIND",
-                "/principals/users/dreid/",
-                headers=http_headers.Headers({
-                        'Authorization': ['basic', '%s' % ('dreid:dierd'.encode('base64'),)],
-                        'Content-Type': 'application/xml; charset="utf-8"',
-                        'Depth': '1',
-                }),
-                content=body
-            )
+        request = SimpleStoreRequest(
+            self,
+            "PROPFIND",
+            "/principals/users/dreid/",
+            headers=http_headers.Headers({
+                    'Depth': '1',
+            }),
+            authRecord=record,
+            content=body
+        )
+        response = yield self.send(request)
+        response = IResponse(response)
 
-            d = self.send(request, gotResponse2)
-            return d
+        if response.code != responsecode.MULTI_STATUS:
+            self.fail("Incorrect response for PROPFIND /principals/: %s" % (response.code,))
+        self.assertEqual(self.actualRoot.responseCache.cacheHitCount, 1)
 
-        def gotResponse2(response):
-            if response.code != responsecode.MULTI_STATUS:
-                self.fail("Incorrect response for PROPFIND /principals/: %s" % (response.code,))
-            self.assertEqual(self.root.resource.responseCache.cacheHitCount, 1)
 
-        d = self.send(request, gotResponse1)
-        return d
 
-
-
 class WikiTests(RootTests):
 
     @inlineCallbacks
@@ -438,12 +387,9 @@
         request.checkedWiki will be set to True
         """
 
-        request = SimpleRequest(self.site, "GET", "/principals/")
+        request = SimpleStoreRequest(self, "GET", "/principals/")
 
         resrc, _ignore_segments = (yield maybeDeferred(
-            self.root.locateChild, request, ["principals"]
+            self.actualRoot.locateChild, request, ["principals"]
         ))
-        resrc, _ignore_segments = (yield maybeDeferred(
-            resrc.locateChild, request, ["principals"]
-        ))
         self.assertTrue(request.checkedWiki)

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/push/applepush.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/push/applepush.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/push/applepush.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -820,23 +820,25 @@
 
 
     def defaultAccessControlList(self):
-        return davxml.ACL(
-            # DAV:Read for authenticated principals
-            davxml.ACE(
-                davxml.Principal(davxml.Authenticated()),
-                davxml.Grant(
-                    davxml.Privilege(davxml.Read()),
+        return succeed(
+            davxml.ACL(
+                # DAV:Read for authenticated principals
+                davxml.ACE(
+                    davxml.Principal(davxml.Authenticated()),
+                    davxml.Grant(
+                        davxml.Privilege(davxml.Read()),
+                    ),
+                    davxml.Protected(),
                 ),
-                davxml.Protected(),
-            ),
-            # DAV:Write for authenticated principals
-            davxml.ACE(
-                davxml.Principal(davxml.Authenticated()),
-                davxml.Grant(
-                    davxml.Privilege(davxml.Write()),
+                # DAV:Write for authenticated principals
+                davxml.ACE(
+                    davxml.Principal(davxml.Authenticated()),
+                    davxml.Grant(
+                        davxml.Privilege(davxml.Write()),
+                    ),
+                    davxml.Protected(),
                 ),
-                davxml.Protected(),
-            ),
+            )
         )
 
 
@@ -869,6 +871,7 @@
 
     http_GET = http_POST
 
+    @inlineCallbacks
     def principalFromRequest(self, request):
         """
         Given an authenticated request, return the principal based on
@@ -877,9 +880,9 @@
         principal = None
         for collection in self.principalCollections():
             data = request.authnUser.children[0].children[0].data
-            principal = collection._principalForURI(data)
+            principal = yield collection._principalForURI(data)
             if principal is not None:
-                return principal
+                returnValue(principal)
 
 
     @inlineCallbacks
@@ -910,7 +913,7 @@
             msg = "Invalid request: bad 'token' %s" % (token,)
 
         else:
-            principal = self.principalFromRequest(request)
+            principal = yield self.principalFromRequest(request)
             uid = principal.record.uid
             try:
                 yield self.addSubscription(token, key, uid, userAgent, host)

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tap/caldav.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tap/caldav.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tap/caldav.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -90,17 +90,17 @@
 from txdav.common.datastore.work.revision_cleanup import (
     scheduleFirstFindMinRevision
 )
-from txdav.dps.server import DirectoryProxyServiceMaker
+from txdav.who.util import directoryFromConfig
 from txdav.dps.client import DirectoryService as DirectoryProxyClientService
-from txdav.who.groups import GroupCacher as NewGroupCacher
+from txdav.who.groups import GroupCacher
 
 from twistedcaldav import memcachepool
 from twistedcaldav.config import config, ConfigurationError
-from twistedcaldav.directory import calendaruserproxy
-from twistedcaldav.directory.directory import GroupMembershipCacheUpdater
 from twistedcaldav.localization import processLocalizationFiles
 from twistedcaldav.stdconfig import DEFAULT_CONFIG, DEFAULT_CONFIG_FILE
-from twistedcaldav.upgrade import UpgradeFileSystemFormatStep, PostDBImportStep
+from twistedcaldav.upgrade import (
+    UpgradeFileSystemFormatStep, PostDBImportStep,
+)
 
 try:
     from twistedcaldav.authkerb import NegotiateCredentialFactory
@@ -124,6 +124,8 @@
     pgServiceFromConfig, getDBPool, MemoryLimitService,
     storeFromConfig
 )
+from twisted.application.strports import service as strPortsService
+from txdav.dps.server import DirectoryProxyAMPFactory
 
 try:
     from calendarserver.version import version
@@ -536,10 +538,7 @@
             )
             self.monitor.addProcessObject(process, PARENT_ENVIRONMENT)
 
-        if (
-           config.DirectoryProxy.Enabled and
-           config.DirectoryProxy.SocketPath != ""
-        ):
+        if config.DirectoryProxy.Enabled:
             log.info("Adding directory proxy service")
 
             dpsArgv = [
@@ -863,10 +862,10 @@
         CalDAV and CardDAV requests.
         """
         pool, txnFactory = getDBPool(config)
-        store = storeFromConfig(config, txnFactory)
+        directory = DirectoryProxyClientService(config.DirectoryRealmName)
+        store = storeFromConfig(config, txnFactory, directory)
         logObserver = AMPCommonAccessLoggingObserver()
         result = self.requestProcessingService(options, store, logObserver)
-        directory = store.directoryService()
 
         if pool is not None:
             pool.setServiceParent(result)
@@ -938,14 +937,9 @@
 
         # Optionally set up group cacher
         if config.GroupCaching.Enabled:
-            groupCacher = GroupMembershipCacheUpdater(
-                calendaruserproxy.ProxyDBService,
+            groupCacher = GroupCacher(
                 directory,
-                config.GroupCaching.UpdateSeconds,
-                config.GroupCaching.ExpireSeconds,
-                config.GroupCaching.LockSeconds,
-                namespace=config.GroupCaching.MemcachedPool,
-                useExternalProxies=config.GroupCaching.UseExternalProxies,
+                updateSeconds=config.GroupCaching.UpdateSeconds
             )
         else:
             groupCacher = None
@@ -1281,21 +1275,12 @@
 
             # Optionally set up group cacher
             if config.GroupCaching.Enabled:
-                groupCacher = GroupMembershipCacheUpdater(
-                    calendaruserproxy.ProxyDBService,
+                groupCacher = GroupCacher(
                     directory,
-                    config.GroupCaching.UpdateSeconds,
-                    config.GroupCaching.ExpireSeconds,
-                    config.GroupCaching.LockSeconds,
-                    namespace=config.GroupCaching.MemcachedPool,
-                    useExternalProxies=config.GroupCaching.UseExternalProxies
+                    updateSeconds=config.GroupCaching.UpdateSeconds
                 )
-                newGroupCacher = NewGroupCacher(
-                    DirectoryProxyClientService(None)
-                )
             else:
                 groupCacher = None
-                newGroupCacher = None
 
             # Optionally enable Manhole access
             if config.Manhole.Enabled:
@@ -1326,17 +1311,11 @@
                         "manhole_tap could not be imported"
                     )
 
-            # Optionally enable Directory Proxy
-            if config.DirectoryProxy.Enabled:
-                dps = DirectoryProxyServiceMaker().makeService(None)
-                dps.setServiceParent(result)
-
             def decorateTransaction(txn):
                 txn._pushDistributor = pushDistributor
                 txn._rootResource = result.rootResource
                 txn._mailRetriever = mailRetriever
                 txn._groupCacher = groupCacher
-                txn._newGroupCacher = newGroupCacher
 
             store.callWithNewTransactions(decorateTransaction)
 
@@ -1376,7 +1355,7 @@
                 Popen(memcachedArgv)
 
         return self.storageService(
-            slaveSvcCreator, logObserver, uid=uid, gid=gid
+            slaveSvcCreator, logObserver, uid=uid, gid=gid, directory=None
         )
 
 
@@ -1392,7 +1371,8 @@
             return config.UtilityServiceClass(store)
 
         uid, gid = getSystemIDs(config.UserName, config.GroupName)
-        return self.storageService(toolServiceCreator, None, uid=uid, gid=gid)
+        return self.storageService(toolServiceCreator, None, uid=uid, gid=gid,
+                                   directory=None)
 
 
     def makeService_Agent(self, options):
@@ -1440,7 +1420,7 @@
 
 
     def storageService(
-        self, createMainService, logObserver, uid=None, gid=None
+        self, createMainService, logObserver, uid=None, gid=None, directory=None
     ):
         """
         If necessary, create a service to be started used for storage; for
@@ -1466,9 +1446,13 @@
             running as root (also the gid to chown Attachments to).
         @type gid: C{int}
 
+        @param directory: The directory service to use.
+        @type directory: L{IStoreDirectoryService} or None
+
         @return: the appropriate a service to start.
         @rtype: L{IService}
         """
+
         def createSubServiceFactory(
             dialect=POSTGRES_DIALECT, paramstyle='pyformat'
         ):
@@ -1480,7 +1464,14 @@
                     maxConnections=config.MaxDBConnectionsPerPool
                 )
                 cp.setServiceParent(ms)
-                store = storeFromConfig(config, cp.connection)
+                store = storeFromConfig(config, cp.connection, directory)
+                if directory is None:
+                    # Create a Directory Proxy "Server" service and hand it to
+                    # the store.
+                    # FIXME: right now the store passed *to* the directory is the
+                    # calendar/contacts data store, but for a multi-server deployment
+                    # it will need its own separate store.
+                    store.setDirectoryService(directoryFromConfig(config, store=store))
 
                 pps = PreProcessingService(
                     createMainService, cp, store, logObserver, storageService
@@ -1497,7 +1488,7 @@
 
                 # Still need this for Snow Leopard support
                 pps.addStep(
-                    UpgradeFileSystemFormatStep(config)
+                    UpgradeFileSystemFormatStep(config, store)
                 )
 
                 pps.addStep(
@@ -1605,7 +1596,7 @@
                     "Unknown database type {}".format(config.DBType)
                 )
         else:
-            store = storeFromConfig(config, None)
+            store = storeFromConfig(config, None, directory)
             return createMainService(None, store, logObserver, None)
 
 
@@ -1867,14 +1858,9 @@
 
             # Optionally set up group cacher
             if config.GroupCaching.Enabled:
-                groupCacher = GroupMembershipCacheUpdater(
-                    calendaruserproxy.ProxyDBService,
+                groupCacher = GroupCacher(
                     directory,
-                    config.GroupCaching.UpdateSeconds,
-                    config.GroupCaching.ExpireSeconds,
-                    config.GroupCaching.LockSeconds,
-                    namespace=config.GroupCaching.MemcachedPool,
-                    useExternalProxies=config.GroupCaching.UseExternalProxies
+                    updateSeconds=config.GroupCaching.UpdateSeconds
                 )
             else:
                 groupCacher = None
@@ -1887,9 +1873,30 @@
 
             store.callWithNewTransactions(decorateTransaction)
 
+            # Set up AMP for DPS Server in the master instead of sidecar
+            if not config.DirectoryProxy.Enabled:
+                strPortsService(
+                    "unix:{path}:mode=660".format(
+                        path=config.DirectoryProxy.SocketPath
+                    ),
+                    DirectoryProxyAMPFactory(store.directoryService())
+                ).setServiceParent(multi)
+
             return multi
 
-        ssvc = self.storageService(spawnerSvcCreator, None, uid, gid)
+        if config.DirectoryProxy.Enabled:
+            # If the master is to act as a DPS client, and talk to the
+            # DPS sidecar:
+            directory = DirectoryProxyClientService(
+                config.DirectoryRealmName
+            )
+        else:
+            # If the master is to act as the DPS server:
+            directory = None
+
+        ssvc = self.storageService(
+            spawnerSvcCreator, None, uid, gid, directory=directory
+        )
         ssvc.setServiceParent(s)
         return s
 

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tap/test/test_caldav.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tap/test/test_caldav.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tap/test/test_caldav.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -28,12 +28,11 @@
 from twisted.python.threadable import isInIOThread
 from twisted.internet.reactor import callFromThread
 from twisted.python.usage import Options, UsageError
-from twisted.python.reflect import namedAny
 from twisted.python.procutils import which
 
 from twisted.internet.interfaces import IProcessTransport, IReactorProcess
 from twisted.internet.protocol import ServerFactory
-from twisted.internet.defer import Deferred, inlineCallbacks, passthru, succeed
+from twisted.internet.defer import Deferred, inlineCallbacks, succeed
 from twisted.internet.task import Clock
 from twisted.internet import reactor
 from twisted.application.service import (IService, IServiceCollection,
@@ -42,15 +41,15 @@
 
 from twext.python.log import Logger
 from twext.python.filepath import CachingFilePath as FilePath
-from plistlib import writePlist #@UnresolvedImport
+from plistlib import writePlist  # @UnresolvedImport
 from txweb2.dav import auth
 from txweb2.log import LogWrapperResource
 from twext.internet.tcp import MaxAcceptTCPServer, MaxAcceptSSLServer
 
 from twistedcaldav.config import config, ConfigDict, ConfigurationError
+from twistedcaldav.resource import AuthenticationWrapper
 from twistedcaldav.stdconfig import DEFAULT_CONFIG
 
-from twistedcaldav.directory.aggregate import AggregateDirectoryService
 from twistedcaldav.directory.calendar import DirectoryCalendarHomeProvisioningResource
 from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
 
@@ -178,7 +177,7 @@
 
 
 
-class CalDAVOptionsTest (StoreTestCase):
+class CalDAVOptionsTest(StoreTestCase):
     """
     Test various parameters of our usage.Options subclass
     """
@@ -304,111 +303,6 @@
 
 
 
-class BaseServiceMakerTests(StoreTestCase):
-    """
-    Utility class for ServiceMaker tests.
-    """
-    configOptions = None
-
-    @inlineCallbacks
-    def setUp(self):
-        yield super(BaseServiceMakerTests, self).setUp()
-        self.options = TestCalDAVOptions()
-        self.options.parent = Options()
-        self.options.parent["gid"] = None
-        self.options.parent["uid"] = None
-        self.options.parent["nodaemon"] = None
-
-        self.config = ConfigDict(DEFAULT_CONFIG)
-
-        accountsFile = os.path.join(sourceRoot, "twistedcaldav/directory/test/accounts.xml")
-        resourcesFile = os.path.join(sourceRoot, "twistedcaldav/directory/test/resources.xml")
-        augmentsFile = os.path.join(sourceRoot, "twistedcaldav/directory/test/augments.xml")
-        pemFile = os.path.join(sourceRoot, "twistedcaldav/test/data/server.pem")
-
-        self.config["DirectoryService"] = {
-            "params": {"xmlFile": accountsFile},
-            "type": "twistedcaldav.directory.xmlfile.XMLDirectoryService"
-        }
-
-        self.config["ResourceService"] = {
-            "params": {"xmlFile": resourcesFile},
-        }
-
-        self.config["AugmentService"] = {
-            "params": {"xmlFiles": [augmentsFile]},
-            "type": "twistedcaldav.directory.augment.AugmentXMLDB"
-        }
-
-        self.config.UseDatabase = False
-        self.config.ServerRoot = self.mktemp()
-        self.config.ConfigRoot = "config"
-        self.config.ProcessType = "Single"
-        self.config.SSLPrivateKey = pemFile
-        self.config.SSLCertificate = pemFile
-        self.config.EnableSSL = True
-        self.config.Memcached.Pools.Default.ClientEnabled = False
-        self.config.Memcached.Pools.Default.ServerEnabled = False
-        self.config.DirectoryAddressBook.Enabled = False
-        self.config.UsePackageTimezones = True
-
-        if self.configOptions:
-            self.config.update(self.configOptions)
-
-        os.mkdir(self.config.ServerRoot)
-        os.mkdir(os.path.join(self.config.ServerRoot, self.config.DocumentRoot))
-        os.mkdir(os.path.join(self.config.ServerRoot, self.config.DataRoot))
-        os.mkdir(os.path.join(self.config.ServerRoot, self.config.ConfigRoot))
-
-        self.configFile = self.mktemp()
-
-        self.writeConfig()
-
-
-    def tearDown(self):
-        config.setDefaults(DEFAULT_CONFIG)
-        config.reset()
-
-
-    def writeConfig(self):
-        """
-        Flush self.config out to self.configFile
-        """
-        writePlist(self.config, self.configFile)
-
-
-    def makeService(self, patcher=passthru):
-        """
-        Create a service by calling into CalDAVServiceMaker with
-        self.configFile
-        """
-        self.options.parseOptions(["-f", self.configFile])
-
-        maker = CalDAVServiceMaker()
-        maker = patcher(maker)
-        return maker.makeService(self.options)
-
-
-    def getSite(self):
-        """
-        Get the server.Site from the service by finding the HTTPFactory.
-        """
-        service = self.makeService()
-        for listeningService in inServiceHierarchy(
-                service,
-                # FIXME: need a better predicate for 'is this really an HTTP
-                # factory' but this works for now.
-                # NOTE: in a database 'single' configuration, PostgresService
-                # will prevent the HTTP services from actually getting added to
-                # the hierarchy until the hierarchy has started.
-                # 'underlyingSite' assigned in caldav.py
-                lambda x: hasattr(x, 'underlyingSite')
-            ):
-            return listeningService.underlyingSite
-        raise RuntimeError("No site found.")
-
-
-
 def inServiceHierarchy(svc, predicate):
     """
     Find services in the service collection which satisfy the given predicate.
@@ -452,44 +346,72 @@
 
 
 
-class CalDAVServiceMakerTests(BaseServiceMakerTests):
-    """
-    Test the service maker's behavior
-    """
+# Tests for the various makeService_ flavors:
 
-    def test_makeServiceDispatcher(self):
-        """
-        Test the default options of the dispatching makeService
-        """
-        validServices = ["Slave", "Combined"]
+class CalDAVServiceMakerTestBase(StoreTestCase):
 
-        self.config["HTTPPort"] = 0
+    @inlineCallbacks
+    def setUp(self):
+        yield super(CalDAVServiceMakerTestBase, self).setUp()
+        self.options = TestCalDAVOptions()
+        self.options.parent = Options()
+        self.options.parent["gid"] = None
+        self.options.parent["uid"] = None
+        self.options.parent["nodaemon"] = None
 
-        for service in validServices:
-            self.config["ProcessType"] = service
-            self.writeConfig()
-            self.makeService()
 
-        self.config["ProcessType"] = "Unknown Service"
-        self.writeConfig()
-        self.assertRaises(UsageError, self.makeService)
+class CalDAVServiceMakerTestSingle(CalDAVServiceMakerTestBase):
 
+    def configure(self):
+        super(CalDAVServiceMakerTestSingle, self).configure()
+        config.ProcessType = "Single"
 
+    def test_makeService(self):
+        CalDAVServiceMaker().makeService(self.options)
+        # No error
+
+
+class CalDAVServiceMakerTestSlave(CalDAVServiceMakerTestBase):
+
+    def configure(self):
+        super(CalDAVServiceMakerTestSlave, self).configure()
+        config.ProcessType = "Slave"
+
+    def test_makeService(self):
+        CalDAVServiceMaker().makeService(self.options)
+        # No error
+
+
+class CalDAVServiceMakerTestUnknown(CalDAVServiceMakerTestBase):
+
+    def configure(self):
+        super(CalDAVServiceMakerTestUnknown, self).configure()
+        config.ProcessType = "Unknown"
+
+    def test_makeService(self):
+        self.assertRaises(UsageError, CalDAVServiceMaker().makeService, self.options)
+        # error
+
+
+
+class ModesOnUNIXSocketsTests(CalDAVServiceMakerTestBase):
+
+    def configure(self):
+        super(ModesOnUNIXSocketsTests, self).configure()
+        config.ProcessType = "Combined"
+        config.HTTPPort = 0
+        self.alternateGroup = determineAppropriateGroupID()
+        config.GroupName = grp.getgrgid(self.alternateGroup).gr_name
+        config.Stats.EnableUnixStatsSocket = True
+
+
     def test_modesOnUNIXSockets(self):
         """
         The logging and stats UNIX sockets that are bound as part of the
         'Combined' service hierarchy should have a secure mode specified: only
         the executing user should be able to open and send to them.
         """
-
-        self.config["HTTPPort"] = 0 # Don't conflict with the test above.
-        alternateGroup = determineAppropriateGroupID()
-        self.config.GroupName = grp.getgrgid(alternateGroup).gr_name
-
-        self.config["ProcessType"] = "Combined"
-        self.config.Stats.EnableUnixStatsSocket = True
-        self.writeConfig()
-        svc = self.makeService()
+        svc = CalDAVServiceMaker().makeService(self.options)
         for serviceName in [_CONTROL_SERVICE_NAME]:
             socketService = svc.getServiceNamed(serviceName)
             self.assertIsInstance(socketService, GroupOwnedUNIXServer)
@@ -498,7 +420,7 @@
                 m, int("660", 8),
                 "Wrong mode on %s: %s" % (serviceName, oct(m))
             )
-            self.assertEquals(socketService.gid, alternateGroup)
+            self.assertEquals(socketService.gid, self.alternateGroup)
         for serviceName in ["unix-stats"]:
             socketService = svc.getServiceNamed(serviceName)
             self.assertIsInstance(socketService, GroupOwnedUNIXServer)
@@ -507,84 +429,119 @@
                 m, int("660", 8),
                 "Wrong mode on %s: %s" % (serviceName, oct(m))
             )
-            self.assertEquals(socketService.gid, alternateGroup)
+            self.assertEquals(socketService.gid, self.alternateGroup)
 
 
+class ProcessMonitorTests(CalDAVServiceMakerTestBase):
+
+    def configure(self):
+        super(ProcessMonitorTests, self).configure()
+        config.ProcessType = "Combined"
+
     def test_processMonitor(self):
         """
         In the master, there should be exactly one
         L{DelayedStartupProcessMonitor} in the service hierarchy so that it
         will be started by startup.
         """
-        self.config["ProcessType"] = "Combined"
-        self.writeConfig()
         self.assertEquals(
             1,
             len(
                 list(inServiceHierarchy(
-                    self.makeService(),
+                    CalDAVServiceMaker().makeService(self.options),
                     lambda x: isinstance(x, DelayedStartupProcessMonitor)))
             )
         )
 
 
+
+class StoreQueuerSetInMasterTests(CalDAVServiceMakerTestBase):
+
+    def configure(self):
+        super(StoreQueuerSetInMasterTests, self).configure()
+        config.ProcessType = "Combined"
+
+
     def test_storeQueuerSetInMaster(self):
         """
         In the master, the store's queuer should be set to a
         L{PeerConnectionPool}, so that work can be distributed to other
         processes.
         """
-        self.config["ProcessType"] = "Combined"
-        self.writeConfig()
         class NotAStore(object):
             queuer = LocalQueuer(None)
+
             def __init__(self, directory):
                 self.directory = directory
+
             def newTransaction(self):
                 return None
+
             def callWithNewTransactions(self, x):
                 pass
+
             def directoryService(self):
                 return self.directory
+
+
         store = NotAStore(self.directory)
+
+
         def something(proposal):
             pass
+
         store.queuer.callWithNewProposals(something)
+
+
         def patch(maker):
             def storageServiceStandIn(createMainService, logObserver,
-                                      uid=None, gid=None):
+                                      uid=None, gid=None, directory=None):
                 pool = None
                 logObserver = None
                 storageService = None
-                svc = createMainService(pool, store, logObserver,
-                    storageService)
+                svc = createMainService(
+                    pool, store, logObserver, storageService
+                )
                 multi = MultiService()
                 svc.setServiceParent(multi)
                 return multi
             self.patch(maker, "storageService", storageServiceStandIn)
             return maker
-        self.makeService(patch)
+
+        maker = CalDAVServiceMaker()
+        maker = patch(maker)
+        maker.makeService(self.options)
         self.assertIsInstance(store.queuer, PeerConnectionPool)
         self.assertIn(something, store.queuer.proposalCallbacks)
 
 
 
-class SlaveServiceTest(BaseServiceMakerTests):
+
+
+
+
+class SlaveServiceTests(CalDAVServiceMakerTestBase):
     """
     Test various configurations of the Slave service
     """
 
-    configOptions = {
-        "HTTPPort": 8008,
-        "SSLPort": 8443,
-    }
+    def configure(self):
+        super(SlaveServiceTests, self).configure()
+        config.ProcessType = "Slave"
+        config.HTTPPort = 8008
+        config.SSLPort = 8443
+        pemFile = os.path.join(sourceRoot, "twistedcaldav/test/data/server.pem")
+        config.SSLPrivateKey = pemFile
+        config.SSLCertificate = pemFile
+        config.EnableSSL = True
 
+
     def test_defaultService(self):
         """
         Test the value of a Slave service in it's simplest
         configuration.
         """
-        service = self.makeService()
+        service = CalDAVServiceMaker().makeService(self.options)
 
         self.failUnless(
             IService(service),
@@ -606,11 +563,12 @@
         default TCP and SSL configuration
         """
         # Note: the listeners are bundled within a MultiService named "ConnectionService"
-        service = self.makeService().getServiceNamed(CalDAVService.connectionServiceName)
+        service = CalDAVServiceMaker().makeService(self.options)
+        service = service.getServiceNamed(CalDAVService.connectionServiceName)
 
         expectedSubServices = dict((
-            (MaxAcceptTCPServer, self.config["HTTPPort"]),
-            (MaxAcceptSSLServer, self.config["SSLPort"]),
+            (MaxAcceptTCPServer, config.HTTPPort),
+            (MaxAcceptSSLServer, config.SSLPort),
         ))
 
         configuredSubServices = [(s.__class__, getattr(s, 'args', None))
@@ -632,7 +590,9 @@
         Test that the configuration of the SSLServer reflect the config file's
         SSL Private Key and SSL Certificate
         """
-        service = self.makeService().getServiceNamed(CalDAVService.connectionServiceName)
+        # Note: the listeners are bundled within a MultiService named "ConnectionService"
+        service = CalDAVServiceMaker().makeService(self.options)
+        service = service.getServiceNamed(CalDAVService.connectionServiceName)
 
         sslService = None
         for s in service.services:
@@ -645,75 +605,115 @@
         context = sslService.args[2]
 
         self.assertEquals(
-            self.config["SSLPrivateKey"],
+            config.SSLPrivateKey,
             context.privateKeyFileName
         )
         self.assertEquals(
-            self.config["SSLCertificate"],
+            config.SSLCertificate,
             context.certificateFileName,
         )
 
 
+
+class NoSSLTests(CalDAVServiceMakerTestBase):
+
+    def configure(self):
+        super(NoSSLTests, self).configure()
+        config.ProcessType = "Slave"
+        config.HTTPPort = 8008
+        # pemFile = os.path.join(sourceRoot, "twistedcaldav/test/data/server.pem")
+        # config.SSLPrivateKey = pemFile
+        # config.SSLCertificate = pemFile
+        # config.EnableSSL = True
+
     def test_noSSL(self):
         """
         Test the single service to make sure there is no SSL Service when SSL
         is disabled
         """
-        del self.config["SSLPort"]
-        self.writeConfig()
+        # Note: the listeners are bundled within a MultiService named "ConnectionService"
+        service = CalDAVServiceMaker().makeService(self.options)
+        service = service.getServiceNamed(CalDAVService.connectionServiceName)
 
-        service = self.makeService().getServiceNamed(CalDAVService.connectionServiceName)
-
         self.assertNotIn(
             internet.SSLServer,
             [s.__class__ for s in service.services]
         )
 
 
+class NoHTTPTests(CalDAVServiceMakerTestBase):
+
+    def configure(self):
+        super(NoHTTPTests, self).configure()
+        config.ProcessType = "Slave"
+        config.SSLPort = 8443
+        pemFile = os.path.join(sourceRoot, "twistedcaldav/test/data/server.pem")
+        config.SSLPrivateKey = pemFile
+        config.SSLCertificate = pemFile
+        config.EnableSSL = True
+
     def test_noHTTP(self):
         """
         Test the single service to make sure there is no TCPServer when
         HTTPPort is not configured
         """
-        del self.config["HTTPPort"]
-        self.writeConfig()
+        # Note: the listeners are bundled within a MultiService named "ConnectionService"
+        service = CalDAVServiceMaker().makeService(self.options)
+        service = service.getServiceNamed(CalDAVService.connectionServiceName)
 
-        service = self.makeService().getServiceNamed(CalDAVService.connectionServiceName)
-
         self.assertNotIn(
             internet.TCPServer,
             [s.__class__ for s in service.services]
         )
 
 
+class SingleBindAddressesTests(CalDAVServiceMakerTestBase):
+
+    def configure(self):
+        super(SingleBindAddressesTests, self).configure()
+        config.ProcessType = "Slave"
+        config.HTTPPort = 8008
+        config.BindAddresses = ["127.0.0.1"]
+
     def test_singleBindAddresses(self):
         """
         Test that the TCPServer and SSLServers are bound to the proper address
         """
-        self.config.BindAddresses = ["127.0.0.1"]
-        self.writeConfig()
+        # Note: the listeners are bundled within a MultiService named "ConnectionService"
+        service = CalDAVServiceMaker().makeService(self.options)
+        service = service.getServiceNamed(CalDAVService.connectionServiceName)
 
-        service = self.makeService().getServiceNamed(CalDAVService.connectionServiceName)
-
         for s in service.services:
             if isinstance(s, (internet.TCPServer, internet.SSLServer)):
                 self.assertEquals(s.kwargs["interface"], "127.0.0.1")
 
 
+class MultipleBindAddressesTests(CalDAVServiceMakerTestBase):
+
+    def configure(self):
+        super(MultipleBindAddressesTests, self).configure()
+        config.ProcessType = "Slave"
+        config.HTTPPort = 8008
+        config.SSLPort = 8443
+        pemFile = os.path.join(sourceRoot, "twistedcaldav/test/data/server.pem")
+        config.SSLPrivateKey = pemFile
+        config.SSLCertificate = pemFile
+        config.EnableSSL = True
+        config.BindAddresses = [
+            "127.0.0.1",
+            "10.0.0.2",
+            "172.53.13.123",
+        ]
+
     def test_multipleBindAddresses(self):
         """
         Test that the TCPServer and SSLServers are bound to the proper
         addresses.
         """
-        self.config.BindAddresses = [
-            "127.0.0.1",
-            "10.0.0.2",
-            "172.53.13.123",
-        ]
+        # Note: the listeners are bundled within a MultiService named "ConnectionService"
+        service = CalDAVServiceMaker().makeService(self.options)
+        service = service.getServiceNamed(CalDAVService.connectionServiceName)
 
-        self.writeConfig()
-        service = self.makeService().getServiceNamed(CalDAVService.connectionServiceName)
-
         tcpServers = []
         sslServers = []
 
@@ -723,10 +723,10 @@
             elif isinstance(s, internet.SSLServer):
                 sslServers.append(s)
 
-        self.assertEquals(len(tcpServers), len(self.config.BindAddresses))
-        self.assertEquals(len(sslServers), len(self.config.BindAddresses))
+        self.assertEquals(len(tcpServers), len(config.BindAddresses))
+        self.assertEquals(len(sslServers), len(config.BindAddresses))
 
-        for addr in self.config.BindAddresses:
+        for addr in config.BindAddresses:
             for s in tcpServers:
                 if s.kwargs["interface"] == addr:
                     tcpServers.remove(s)
@@ -739,13 +739,32 @@
         self.assertEquals(len(sslServers), 0)
 
 
+
+class ListenBacklogTests(CalDAVServiceMakerTestBase):
+
+    def configure(self):
+        super(ListenBacklogTests, self).configure()
+        config.ProcessType = "Slave"
+        config.ListenBacklog = 1024
+        config.HTTPPort = 8008
+        config.SSLPort = 8443
+        pemFile = os.path.join(sourceRoot, "twistedcaldav/test/data/server.pem")
+        config.SSLPrivateKey = pemFile
+        config.SSLCertificate = pemFile
+        config.EnableSSL = True
+        config.BindAddresses = [
+            "127.0.0.1",
+            "10.0.0.2",
+            "172.53.13.123",
+        ]
+
     def test_listenBacklog(self):
         """
         Test that the backlog arguments is set in TCPServer and SSLServers
         """
-        self.config.ListenBacklog = 1024
-        self.writeConfig()
-        service = self.makeService().getServiceNamed(CalDAVService.connectionServiceName)
+        # Note: the listeners are bundled within a MultiService named "ConnectionService"
+        service = CalDAVServiceMaker().makeService(self.options)
+        service = service.getServiceNamed(CalDAVService.connectionServiceName)
 
         for s in service.services:
             if isinstance(s, (internet.TCPServer, internet.SSLServer)):
@@ -753,32 +772,31 @@
 
 
 
-class ServiceHTTPFactoryTests(BaseServiceMakerTests):
-    """
-    Test the configuration of the initial resource hierarchy of the
-    single service
-    """
-    configOptions = {"HTTPPort": 8008}
+class AuthWrapperAllEnabledTests(CalDAVServiceMakerTestBase):
 
+    def configure(self):
+        super(AuthWrapperAllEnabledTests, self).configure()
+        config.HTTPPort = 8008
+        config.Authentication.Digest.Enabled = True
+        config.Authentication.Kerberos.Enabled = True
+        config.Authentication.Kerberos.ServicePrincipal = "http/hello at bob"
+        config.Authentication.Basic.Enabled = True
+
+
     def test_AuthWrapperAllEnabled(self):
         """
         Test the configuration of the authentication wrapper
         when all schemes are enabled.
         """
-        self.config.Authentication.Digest.Enabled = True
-        self.config.Authentication.Kerberos.Enabled = True
-        self.config.Authentication.Kerberos.ServicePrincipal = "http/hello at bob"
-        self.config.Authentication.Basic.Enabled = True
 
-        self.writeConfig()
-        site = self.getSite()
+        authWrapper = self.rootResource.resource
+        self.failUnless(
+            isinstance(
+                authWrapper,
+                auth.AuthenticationWrapper
+            )
+        )
 
-        self.failUnless(isinstance(
-                site.resource.resource,
-                auth.AuthenticationWrapper))
-
-        authWrapper = site.resource.resource
-
         expectedSchemes = ["negotiate", "digest", "basic"]
 
         for scheme in authWrapper.credentialFactories:
@@ -787,36 +805,39 @@
         self.assertEquals(len(expectedSchemes),
                           len(authWrapper.credentialFactories))
 
+        ncf = authWrapper.credentialFactories["negotiate"]
 
+        self.assertEquals(ncf.service, "http at HELLO")
+        self.assertEquals(ncf.realm, "bob")
+
+
+
+class ServicePrincipalNoneTests(CalDAVServiceMakerTestBase):
+
+    def configure(self):
+        super(ServicePrincipalNoneTests, self).configure()
+        config.HTTPPort = 8008
+        config.Authentication.Digest.Enabled = True
+        config.Authentication.Kerberos.Enabled = True
+        config.Authentication.Kerberos.ServicePrincipal = ""
+        config.Authentication.Basic.Enabled = True
+
     def test_servicePrincipalNone(self):
         """
         Test that the Kerberos principal look is attempted if the principal is empty.
         """
-        self.config.Authentication.Kerberos.ServicePrincipal = ""
-        self.config.Authentication.Kerberos.Enabled = True
-        self.writeConfig()
-        site = self.getSite()
-
-        authWrapper = site.resource.resource
-
+        authWrapper = self.rootResource.resource
         self.assertFalse("negotiate" in authWrapper.credentialFactories)
 
 
-    def test_servicePrincipal(self):
-        """
-        Test that the kerberos realm is the realm portion of a principal
-        in the form proto/host at realm
-        """
-        self.config.Authentication.Kerberos.ServicePrincipal = "http/hello at bob"
-        self.config.Authentication.Kerberos.Enabled = True
-        self.writeConfig()
-        site = self.getSite()
 
-        authWrapper = site.resource.resource
-        ncf = authWrapper.credentialFactories["negotiate"]
+class AuthWrapperPartialEnabledTests(CalDAVServiceMakerTestBase):
 
-        self.assertEquals(ncf.service, "http at HELLO")
-        self.assertEquals(ncf.realm, "bob")
+    def configure(self):
+        super(AuthWrapperPartialEnabledTests, self).configure()
+        config.Authentication.Digest.Enabled = True
+        config.Authentication.Kerberos.Enabled = False
+        config.Authentication.Basic.Enabled = False
 
 
     def test_AuthWrapperPartialEnabled(self):
@@ -826,14 +847,7 @@
         enabled.
         """
 
-        self.config.Authentication.Basic.Enabled = False
-        self.config.Authentication.Kerberos.Enabled = False
-
-        self.writeConfig()
-        site = self.getSite()
-
-        authWrapper = site.resource.resource
-
+        authWrapper = self.rootResource.resource
         expectedSchemes = ["digest"]
 
         for scheme in authWrapper.credentialFactories:
@@ -845,102 +859,67 @@
         )
 
 
+
+
+
+class ResourceTests(CalDAVServiceMakerTestBase):
+
     def test_LogWrapper(self):
         """
         Test the configuration of the log wrapper
         """
-        site = self.getSite()
+        self.failUnless(isinstance(self.rootResource, LogWrapperResource))
 
-        self.failUnless(isinstance(
-                site.resource,
-                LogWrapperResource))
 
+    def test_AuthWrapper(self):
+        """
+        Test the configuration of the auth wrapper
+        """
+        self.failUnless(isinstance(self.rootResource.resource, AuthenticationWrapper))
 
+
     def test_rootResource(self):
         """
         Test the root resource
         """
-        site = self.getSite()
-        root = site.resource.resource.resource
+        self.failUnless(isinstance(self.rootResource.resource.resource, RootResource))
 
-        self.failUnless(isinstance(root, RootResource))
 
-
+    @inlineCallbacks
     def test_principalResource(self):
         """
         Test the principal resource
         """
-        site = self.getSite()
-        root = site.resource.resource.resource
-
         self.failUnless(isinstance(
-            root.getChild("principals"),
+            (yield self.actualRoot.getChild("principals")),
             DirectoryPrincipalProvisioningResource
         ))
 
 
+    @inlineCallbacks
     def test_calendarResource(self):
         """
         Test the calendar resource
         """
-        site = self.getSite()
-        root = site.resource.resource.resource
-
         self.failUnless(isinstance(
-            root.getChild("calendars"),
+            (yield self.actualRoot.getChild("calendars")),
             DirectoryCalendarHomeProvisioningResource
         ))
 
 
-
-class DirectoryServiceTest(BaseServiceMakerTests):
-    """
-    Tests of the directory service
-    """
-
-    configOptions = {"HTTPPort": 8008}
-
+    @inlineCallbacks
     def test_sameDirectory(self):
         """
         Test that the principal hierarchy has a reference
         to the same DirectoryService as the calendar hierarchy
         """
-        site = self.getSite()
-        principals = site.resource.resource.resource.getChild("principals")
-        calendars = site.resource.resource.resource.getChild("calendars")
+        principals = yield self.actualRoot.getChild("principals")
+        calendars = yield self.actualRoot.getChild("calendars")
 
         self.assertEquals(principals.directory, calendars.directory)
 
 
-    def test_aggregateDirectory(self):
-        """
-        Assert that the base directory service is actually
-        an AggregateDirectoryService
-        """
-        site = self.getSite()
-        principals = site.resource.resource.resource.getChild("principals")
-        directory = principals.directory
 
-        self.failUnless(isinstance(directory, AggregateDirectoryService))
-
-
-    def test_configuredDirectoryService(self):
-        """
-        Test that the real directory service is the directory service
-        set in the configuration file.
-        """
-        site = self.getSite()
-        principals = site.resource.resource.resource.getChild("principals")
-        directory = principals.directory
-
-        realDirectory = directory.serviceForRecordType("users")
-
-        configuredDirectory = namedAny(self.config.DirectoryService.type)
-
-        self.failUnless(isinstance(realDirectory, configuredDirectory))
-
-
-
 class DummyProcessObject(object):
     """
     Simple stub for Process Object API which just has an executable and some
@@ -1012,9 +991,13 @@
         at once, to avoid resource exhaustion.
         """
         dspm = DelayedStartupProcessMonitor()
-        dspm.addProcessObject(ScriptProcessObject(
-                'longlines.py', str(DelayedStartupLineLogger.MAX_LENGTH)),
-                          os.environ)
+        dspm.addProcessObject(
+            ScriptProcessObject(
+                'longlines.py',
+                str(DelayedStartupLineLogger.MAX_LENGTH)
+            ),
+            os.environ
+        )
         dspm.startService()
         self.addCleanup(dspm.stopService)
 
@@ -1038,10 +1021,11 @@
         logging.addObserver(tempObserver)
         self.addCleanup(logging.removeObserver, tempObserver)
         d = Deferred()
+
         def assertions(result):
             self.assertEquals(["[Dummy] x",
                                "[Dummy] y",
-                               "[Dummy] y", # final segment
+                               "[Dummy] y",  # final segment
                                "[Dummy] z"],
                               [''.join(evt['message'])[:len('[Dummy]') + 2]
                                for evt in logged])
@@ -1066,20 +1050,27 @@
             ("a", ["a"]),
             ("abcde", ["abcde"]),
             ("abcdefghij", ["abcdefghij"]),
-            ("abcdefghijk",
-                ["abcdefghij (truncated, continued)",
-                 "k"
+            (
+                "abcdefghijk",
+                [
+                    "abcdefghij (truncated, continued)",
+                    "k"
                 ]
             ),
-            ("abcdefghijklmnopqrst",
-                ["abcdefghij (truncated, continued)",
-                 "klmnopqrst"
+            (
+                "abcdefghijklmnopqrst",
+                [
+                    "abcdefghij (truncated, continued)",
+                    "klmnopqrst"
                 ]
             ),
-            ("abcdefghijklmnopqrstuv",
-                ["abcdefghij (truncated, continued)",
-                 "klmnopqrst (truncated, continued)",
-                 "uv"]
+            (
+                "abcdefghijklmnopqrstuv",
+                [
+                    "abcdefghij (truncated, continued)",
+                    "klmnopqrst (truncated, continued)",
+                    "uv"
+                ]
             ),
         ]:
             self.assertEquals(output, testLogger._breakLineIntoSegments(input))
@@ -1282,9 +1273,10 @@
         twistd = which("twistd")[0]
         deferred = Deferred()
         proc = reactor.spawnProcess(
-            CapturingProcessProtocol(deferred, None), twistd,
-                [twistd, reactorArg, '-n', '-y', tacFilePath],
-                env=os.environ
+            CapturingProcessProtocol(deferred, None),
+            twistd,
+            [twistd, reactorArg, '-n', '-y', tacFilePath],
+            env=os.environ
         )
         reactor.callLater(3, proc.signalProcess, "HUP")
         reactor.callLater(6, proc.signalProcess, "TERM")
@@ -1325,14 +1317,15 @@
         def _getgid():
             return 45
 
-        return type(getSystemIDs)(getSystemIDs.func_code,
+        return type(getSystemIDs)(
+            getSystemIDs.func_code,
             {
-                "getpwnam" : _getpwnam,
-                "getgrnam" : _getgrnam,
-                "getuid" : _getuid,
-                "getgid" : _getgid,
-                "KeyError" : KeyError,
-                "ConfigurationError" : ConfigurationError,
+                "getpwnam": _getpwnam,
+                "getgrnam": _getgrnam,
+                "getuid": _getuid,
+                "getgid": _getgid,
+                "KeyError": KeyError,
+                "ConfigurationError": ConfigurationError,
             }
         )
 
@@ -1342,8 +1335,10 @@
         If userName is passed in but is not found on the system, raise a
         ConfigurationError
         """
-        self.assertRaises(ConfigurationError, self._wrappedFunction(),
-            "nonexistent", "exists")
+        self.assertRaises(
+            ConfigurationError, self._wrappedFunction(),
+            "nonexistent", "exists"
+        )
 
 
     def test_getSystemIDs_GroupNameNotFound(self):
@@ -1351,8 +1346,10 @@
         If groupName is passed in but is not found on the system, raise a
         ConfigurationError
         """
-        self.assertRaises(ConfigurationError, self._wrappedFunction(),
-            "exists", "nonexistent")
+        self.assertRaises(
+            ConfigurationError, self._wrappedFunction(),
+            "exists", "nonexistent"
+        )
 
 
     def test_getSystemIDs_NamesNotSpecified(self):
@@ -1428,8 +1425,10 @@
     def setUp(self):
         self.history = []
         self.clock = Clock()
-        self.pps = PreProcessingService(self.fakeServiceCreator, None, "store",
-            None, "storageService", reactor=self.clock)
+        self.pps = PreProcessingService(
+            self.fakeServiceCreator, None, "store",
+            None, "storageService", reactor=self.clock
+        )
 
 
     def _record(self, value, failure):
@@ -1447,9 +1446,13 @@
             StepFour(self._record, False)
         )
         self.pps.startService()
-        self.assertEquals(self.history,
-            ['one success', 'two success', 'three success', 'four success',
-            ('serviceCreator', 'store', 'storageService')])
+        self.assertEquals(
+            self.history,
+            [
+                'one success', 'two success', 'three success', 'four success',
+                ('serviceCreator', 'store', 'storageService')
+            ]
+        )
 
 
     def test_allFailure(self):
@@ -1463,9 +1466,13 @@
             StepFour(self._record, True)
         )
         self.pps.startService()
-        self.assertEquals(self.history,
-            ['one success', 'two failure', 'three failure', 'four failure',
-            ('serviceCreator', None, 'storageService')])
+        self.assertEquals(
+            self.history,
+            [
+                'one success', 'two failure', 'three failure', 'four failure',
+                ('serviceCreator', None, 'storageService')
+            ]
+        )
 
 
     def test_partialFailure(self):
@@ -1479,9 +1486,13 @@
             StepFour(self._record, False)
         )
         self.pps.startService()
-        self.assertEquals(self.history,
-            ['one success', 'two failure', 'three success', 'four failure',
-            ('serviceCreator', 'store', 'storageService')])
+        self.assertEquals(
+            self.history,
+            [
+                'one success', 'two failure', 'three success', 'four failure',
+                ('serviceCreator', 'store', 'storageService')
+            ]
+        )
 
 
     def test_quitAfterUpgradeStep(self):
@@ -1498,9 +1509,13 @@
         )
         triggerFile.setContent("")
         self.pps.startService()
-        self.assertEquals(self.history,
-            ['one success', 'two success', 'four failure',
-            ('serviceCreator', None, 'storageService')])
+        self.assertEquals(
+            self.history,
+            [
+                'one success', 'two success', 'four failure',
+                ('serviceCreator', None, 'storageService')
+            ]
+        )
         self.assertFalse(triggerFile.exists())
 
 

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tap/test/test_util.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tap/test/test_util.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tap/test/test_util.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -14,11 +14,9 @@
 # limitations under the License.
 ##
 
-from calendarserver.tap.util import directoryFromConfig, MemoryLimitService, Stepper
+from calendarserver.tap.util import MemoryLimitService, Stepper
 from twistedcaldav.util import computeProcessCount
 from twistedcaldav.test.util import TestCase
-from twistedcaldav.config import config
-from twistedcaldav.directory.augment import AugmentXMLDB
 from twisted.internet.task import Clock
 from twisted.internet.defer import succeed, inlineCallbacks
 
@@ -55,21 +53,7 @@
 
 
 
-class UtilTestCase(TestCase):
 
-    def test_directoryFromConfig(self):
-        """
-        Ensure augments service is on by default
-        """
-        dir = directoryFromConfig(config)
-        for service in dir._recordTypes.values():
-            # all directory services belonging to the aggregate have
-            # augmentService set to AugmentXMLDB
-            if hasattr(service, "augmentService"):
-                self.assertTrue(isinstance(service.augmentService, AugmentXMLDB))
-
-
-
 # Stub classes for MemoryLimitServiceTestCase
 
 class StubProtocol(object):

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tap/util.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tap/util.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tap/util.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -28,7 +28,6 @@
 
 import errno
 import os
-from time import sleep
 from socket import fromfd, AF_UNIX, SOCK_STREAM, socketpair
 import psutil
 
@@ -36,6 +35,7 @@
 from twext.python.log import Logger
 from txweb2.auth.basic import BasicCredentialFactory
 from txweb2.dav import auth
+from txweb2.dav.util import joinURL
 from txweb2.http_headers import Headers
 from txweb2.resource import Resource
 from txweb2.static import File as FileResource
@@ -46,32 +46,26 @@
 from twisted.internet import reactor as _reactor
 from twisted.internet.reactor import addSystemEventTrigger
 from twisted.internet.tcp import Connection
-from twisted.python.reflect import namedClass
-# from twisted.python.failure import Failure
 
+from calendarserver.push.applepush import APNSubscriptionResource
+from calendarserver.push.notifier import NotifierFactory
+from twext.enterprise.adbapi2 import ConnectionPool, ConnectionPoolConnection
+from twext.enterprise.ienterprise import ORACLE_DIALECT
+from twext.enterprise.ienterprise import POSTGRES_DIALECT
 from twistedcaldav.bind import doBind
 from twistedcaldav.cache import CacheStoreNotifierFactory
-from twistedcaldav.directory import calendaruserproxy
 from twistedcaldav.directory.addressbook import DirectoryAddressBookHomeProvisioningResource
-from twistedcaldav.directory.aggregate import AggregateDirectoryService
 from twistedcaldav.directory.calendar import DirectoryCalendarHomeProvisioningResource
 from twistedcaldav.directory.digest import QopDigestCredentialFactory
-from twistedcaldav.directory.directory import GroupMembershipCache
 from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
-from twistedcaldav.directory.wiki import WikiDirectoryService
-from calendarserver.push.notifier import NotifierFactory
-from calendarserver.push.applepush import APNSubscriptionResource
 from twistedcaldav.directorybackedaddressbook import DirectoryBackedAddressBookResource
 from twistedcaldav.resource import AuthenticationWrapper
-from txdav.caldav.datastore.scheduling.ischedule.dkim import DKIMUtils, DomainKeyResource
-from txdav.caldav.datastore.scheduling.ischedule.resource import IScheduleInboxResource
 from twistedcaldav.simpleresource import SimpleResource, SimpleRedirectResource
 from twistedcaldav.timezones import TimezoneCache
 from twistedcaldav.timezoneservice import TimezoneServiceResource
 from twistedcaldav.timezonestdservice import TimezoneStdServiceResource
-from twext.enterprise.ienterprise import POSTGRES_DIALECT
-from twext.enterprise.ienterprise import ORACLE_DIALECT
-from twext.enterprise.adbapi2 import ConnectionPool, ConnectionPoolConnection
+from txdav.caldav.datastore.scheduling.ischedule.dkim import DKIMUtils, DomainKeyResource
+from txdav.caldav.datastore.scheduling.ischedule.resource import IScheduleInboxResource
 
 
 try:
@@ -100,7 +94,10 @@
 from urllib import quote
 from twisted.python.usage import UsageError
 
-
+from twext.who.checker import UsernamePasswordCredentialChecker
+from twext.who.checker import HTTPDigestCredentialChecker
+from twisted.cred.error import UnauthorizedLogin
+from txweb2.dav.auth import IPrincipalCredentials
 log = Logger()
 
 
@@ -218,7 +215,7 @@
 
 
 
-def storeFromConfig(config, txnFactory, directoryService=None):
+def storeFromConfig(config, txnFactory, directoryService):
     """
     Produce an L{IDataStore} from the given configuration, transaction factory,
     and notifier factory.
@@ -236,17 +233,14 @@
     if config.EnableResponseCache and config.Memcached.Pools.Default.ClientEnabled:
         notifierFactories["cache"] = CacheStoreNotifierFactory()
 
-    if directoryService is None:
-        directoryService = directoryFromConfig(config)
-
     quota = config.UserQuota
     if quota == 0:
         quota = None
     if txnFactory is not None:
         if config.EnableSSL:
-            uri = "https://%s:%s" % (config.ServerHostName, config.SSLPort,)
+            uri = "https://{config.ServerHostName}:{config.SSLPort}".format(config=config)
         else:
-            uri = "http://%s:%s" % (config.ServerHostName, config.HTTPPort,)
+            uri = "https://{config.ServerHostName}:{config.HTTPPort}".format(config=config)
         attachments_uri = uri + "/calendars/__uids__/%(home)s/dropbox/%(dropbox_id)s/%(name)s"
         store = CommonSQLDataStore(
             txnFactory, notifierFactories,
@@ -281,96 +275,65 @@
 
 
 
-def directoryFromConfig(config):
-    """
-    Create an L{AggregateDirectoryService} from the given configuration.
-    """
-    #
-    # Setup the Augment Service
-    #
-    if config.AugmentService.type:
-        augmentClass = namedClass(config.AugmentService.type)
-        log.info("Configuring augment service of type: {augmentClass}",
-            augmentClass=augmentClass)
-        try:
-            augmentService = augmentClass(**config.AugmentService.params)
-        except IOError:
-            log.error("Could not start augment service")
-            raise
-    else:
-        augmentService = None
+# MOVE2WHO -- should we move this class somewhere else?
+class PrincipalCredentialChecker(object):
+    credentialInterfaces = (IPrincipalCredentials,)
 
-    #
-    # Setup the group membership cacher
-    #
-    if config.GroupCaching.Enabled:
-        groupMembershipCache = GroupMembershipCache(
-            config.GroupCaching.MemcachedPool,
-            expireSeconds=config.GroupCaching.ExpireSeconds)
-    else:
-        groupMembershipCache = None
+    @inlineCallbacks
+    def requestAvatarId(self, credentials):
+        credentials = IPrincipalCredentials(credentials)
 
-    #
-    # Setup the Directory
-    #
-    directories = []
+        if credentials.authnPrincipal is None:
+            raise UnauthorizedLogin(
+                "No such user: {user}".format(
+                    user=credentials.credentials.username
+                )
+        )
 
-    directoryClass = namedClass(config.DirectoryService.type)
-    principalResourceClass = DirectoryPrincipalProvisioningResource
+        # See if record is enabledForLogin
+        if not credentials.authnPrincipal.record.isLoginEnabled():
+            raise UnauthorizedLogin(
+                "User not allowed to log in: {user}".format(
+                    user=credentials.credentials.username
+                )
+            )
 
-    log.info("Configuring directory service of type: {directoryType}",
-        directoryType=config.DirectoryService.type)
+        # Handle Kerberos as a separate behavior
+        try:
+            from twistedcaldav.authkerb import NegotiateCredentials
+        except ImportError:
+            NegotiateCredentials = None
 
-    config.DirectoryService.params.augmentService = augmentService
-    config.DirectoryService.params.groupMembershipCache = groupMembershipCache
-    baseDirectory = directoryClass(config.DirectoryService.params)
+        if NegotiateCredentials and isinstance(credentials.credentials,
+                                               NegotiateCredentials):
+            # If we get here with Kerberos, then authentication has already succeeded
+            returnValue(
+                (
+                    credentials.authnPrincipal.principalURL(),
+                    credentials.authzPrincipal.principalURL(),
+                    credentials.authnPrincipal,
+                    credentials.authzPrincipal,
+                )
+            )
+        else:
+            if (yield credentials.authnPrincipal.record.verifyCredentials(credentials.credentials)):
+                returnValue(
+                    (
+                        credentials.authnPrincipal.principalURL(),
+                        credentials.authzPrincipal.principalURL(),
+                        credentials.authnPrincipal,
+                        credentials.authzPrincipal,
+                    )
+                )
+            else:
+                raise UnauthorizedLogin(
+                    "Incorrect credentials for user: {user}".format(
+                        user=credentials.credentials.username
+                    )
+                )
 
-    # Wait for the directory to become available
-    while not baseDirectory.isAvailable():
-        sleep(5)
 
-    directories.append(baseDirectory)
 
-    #
-    # Setup the Locations and Resources Service
-    #
-    if config.ResourceService.Enabled:
-        resourceClass = namedClass(config.ResourceService.type)
-
-        log.info("Configuring resource service of type: {resourceClass}",
-            resourceClass=resourceClass)
-
-        config.ResourceService.params.augmentService = augmentService
-        config.ResourceService.params.groupMembershipCache = groupMembershipCache
-        resourceDirectory = resourceClass(config.ResourceService.params)
-        resourceDirectory.realmName = baseDirectory.realmName
-        directories.append(resourceDirectory)
-
-    #
-    # Add wiki directory service
-    #
-    if config.Authentication.Wiki.Enabled:
-        wikiDirectory = WikiDirectoryService()
-        wikiDirectory.realmName = baseDirectory.realmName
-        directories.append(wikiDirectory)
-
-    directory = AggregateDirectoryService(directories, groupMembershipCache)
-
-    #
-    # Use system-wide realm on OSX
-    #
-    try:
-        import ServerFoundation
-        realmName = ServerFoundation.XSAuthenticator.defaultRealm().encode("utf-8")
-        directory.setRealm(realmName)
-    except ImportError:
-        pass
-    log.info("Setting up principal collection: {cls}", cls=principalResourceClass)
-    principalResourceClass("/principals/", directory)
-    return directory
-
-
-
 def getRootResource(config, newStore, resources=None):
     """
     Set up directory service and resource hierarchy based on config.
@@ -407,22 +370,26 @@
     addressBookResourceClass = DirectoryAddressBookHomeProvisioningResource
     directoryBackedAddressBookResourceClass = DirectoryBackedAddressBookResource
     apnSubscriptionResourceClass = APNSubscriptionResource
+    principalResourceClass = DirectoryPrincipalProvisioningResource
 
     directory = newStore.directoryService()
+    principalCollection = principalResourceClass("/principals/", directory)
 
     #
     # Setup the ProxyDB Service
     #
-    proxydbClass = namedClass(config.ProxyDBService.type)
 
-    log.info("Configuring proxydb service of type: {cls}", cls=proxydbClass)
+    # MOVE2WHO
+    # proxydbClass = namedClass(config.ProxyDBService.type)
 
-    try:
-        calendaruserproxy.ProxyDBService = proxydbClass(**config.ProxyDBService.params)
-    except IOError:
-        log.error("Could not start proxydb service")
-        raise
+    # log.info("Configuring proxydb service of type: {cls}", cls=proxydbClass)
 
+    # try:
+    #     calendaruserproxy.ProxyDBService = proxydbClass(**config.ProxyDBService.params)
+    # except IOError:
+    #     log.error("Could not start proxydb service")
+    #     raise
+
     #
     # Configure the Site and Wrappers
     #
@@ -431,9 +398,11 @@
 
     portal = Portal(auth.DavRealm())
 
-    portal.registerChecker(directory)
+    portal.registerChecker(UsernamePasswordCredentialChecker(directory))
+    portal.registerChecker(HTTPDigestCredentialChecker(directory))
+    portal.registerChecker(PrincipalCredentialChecker())
 
-    realm = directory.realmName or ""
+    realm = directory.realmName.encode("utf-8") or ""
 
     log.info("Configuring authentication for realm: {realm}", realm=realm)
 
@@ -491,7 +460,7 @@
     #
     log.info("Setting up document root at: {root}", root=config.DocumentRoot)
 
-    principalCollection = directory.principalCollection
+    # principalCollection = directory.principalCollection
 
     if config.EnableCalDAV:
         log.info("Setting up calendar collection: {cls}", cls=calendarResourceClass)
@@ -509,13 +478,14 @@
             newStore,
         )
 
-        directoryPath = os.path.join(config.DocumentRoot, config.DirectoryAddressBook.name)
         if config.DirectoryAddressBook.Enabled and config.EnableSearchAddressBook:
             log.info("Setting up directory address book: {cls}",
                 cls=directoryBackedAddressBookResourceClass)
 
             directoryBackedAddressBookCollection = directoryBackedAddressBookResourceClass(
-                principalCollections=(principalCollection,)
+                principalCollections=(principalCollection,),
+                principalDirectory=directory,
+                uri=joinURL("/", config.DirectoryAddressBook.name, "/")
             )
             if _reactor._started:
                 directoryBackedAddressBookCollection.provisionDirectory()
@@ -523,6 +493,7 @@
                 addSystemEventTrigger("after", "startup", directoryBackedAddressBookCollection.provisionDirectory)
         else:
             # remove /directory from previous runs that may have created it
+            directoryPath = os.path.join(config.DocumentRoot, config.DirectoryAddressBook.name)
             try:
                 FilePath(directoryPath).remove()
                 log.info("Deleted: {path}", path=directoryPath)
@@ -712,6 +683,7 @@
     #
     # Configure ancillary data
     #
+    # MOVE2WHO
     log.info("Configuring authentication wrapper")
 
     overrides = {}

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/agent.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/agent.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/agent.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -18,121 +18,46 @@
 
 """
 A service spawned on-demand by launchd, meant to handle configuration requests
-from Server.app.  When a request comes in on the socket specified in the launchd
-agent.plist, launchd will run "caldavd -t Agent" which ends up creating this
-service.  Requests are made using HTTP POSTS to /gateway, and are authenticated
-by OpenDirectory.
+from Server.app.  When a request comes in on the socket specified in the
+launchd agent.plist, launchd will run "caldavd -t Agent" which ends up creating
+this service.  Requests are made using HTTP POSTS to /gateway, and are
+authenticated by OpenDirectory.
 """
 
 from __future__ import print_function
 
+__all__ = [
+    "makeAgentService",
+]
+
 import cStringIO
+from plistlib import readPlistFromString, writePlistToString
 import socket
 
 from calendarserver.tap.util import getRootResource
-from plistlib import readPlistFromString, writePlistToString
+from twext.python.launchd import getLaunchDSocketFDs
+from twext.python.log import Logger
+from twext.who.checker import HTTPDigestCredentialChecker
+from twext.who.opendirectory import (
+    DirectoryService as OpenDirectoryDirectoryService,
+    NoQOPDigestCredentialFactory
+)
 from twisted.application.internet import StreamServerEndpointService
-from twisted.cred.checkers import ICredentialsChecker
-from twisted.cred.credentials import IUsernameHashedPassword
-from twisted.cred.error import UnauthorizedLogin
 from twisted.cred.portal import IRealm, Portal
-from twisted.internet.defer import inlineCallbacks, returnValue, succeed, fail
+from twisted.internet.defer import inlineCallbacks, returnValue
 from twisted.internet.endpoints import AdoptedStreamServerEndpoint
 from twisted.internet.protocol import Factory
 from twisted.protocols import amp
-from twisted.web.guard import HTTPAuthSessionWrapper, DigestCredentialFactory
+from twisted.web.guard import HTTPAuthSessionWrapper
 from twisted.web.resource import IResource, Resource, ForbiddenResource
 from twisted.web.server import Site, NOT_DONE_YET
 from zope.interface import implements
 
-from twext.python.launchd import getLaunchDSocketFDs
-from twext.python.log import Logger
+
 log = Logger()
 
 
 
-class DirectoryServiceChecker:
-    """
-    A checker that knows how to ask OpenDirectory to authenticate via Digest
-    """
-    implements(ICredentialsChecker)
-
-    credentialInterfaces = (IUsernameHashedPassword,)
-
-    from calendarserver.platform.darwin.od import opendirectory
-    directoryModule = opendirectory
-
-    def __init__(self, node):
-        """
-        @param node: the name of the OpenDirectory node to use, e.g. /Local/Default
-        """
-        self.node = node
-        self.directory = self.directoryModule.odInit(node)
-
-
-    def requestAvatarId(self, credentials):
-        record = self.directoryModule.getUserRecord(self.directory, credentials.username)
-
-        if record is not None:
-            try:
-                if "algorithm" not in credentials.fields:
-                    credentials.fields["algorithm"] = "md5"
-
-                challenge = 'Digest realm="%(realm)s", nonce="%(nonce)s", algorithm=%(algorithm)s' % credentials.fields
-
-                response = (
-                    'Digest username="%(username)s", '
-                    'realm="%(realm)s", '
-                    'nonce="%(nonce)s", '
-                    'uri="%(uri)s", '
-                    'response="%(response)s",'
-                    'algorithm=%(algorithm)s'
-                ) % credentials.fields
-
-            except KeyError as e:
-                log.error(
-                    "OpenDirectory (node=%s) error while performing digest authentication for user %s: "
-                    "missing digest response field: %s in: %s"
-                    % (self.node, credentials.username, e, credentials.fields)
-                )
-                return fail(UnauthorizedLogin())
-
-            try:
-                if self.directoryModule.authenticateUserDigest(self.directory,
-                    self.node,
-                    credentials.username,
-                    challenge,
-                    response,
-                    credentials.method
-                ):
-                    return succeed(credentials.username)
-                else:
-                    log.error("Failed digest auth with response: %s" % (response,))
-                    return fail(UnauthorizedLogin())
-            except Exception as e:
-                log.error(
-                    "OpenDirectory error while performing digest authentication for user %s: %s"
-                    % (credentials.username, e)
-                )
-                return fail(UnauthorizedLogin())
-
-        else:
-            return fail(UnauthorizedLogin())
-
-
-
-class CustomDigestCredentialFactory(DigestCredentialFactory):
-    """
-    DigestCredentialFactory without qop, to interop with OD.
-    """
-
-    def getChallenge(self, address):
-        result = DigestCredentialFactory.getChallenge(self, address)
-        del result["qop"]
-        return result
-
-
-
 class AgentRealm(object):
     """
     Only allow a specified list of avatar IDs to access the site
@@ -161,7 +86,8 @@
 
 class AgentGatewayResource(Resource):
     """
-    The gateway resource which forwards incoming requests through gateway.Runner.
+    The gateway resource which forwards incoming requests through
+    gateway.Runner.
     """
     isLeaf = True
 
@@ -202,10 +128,10 @@
             tbString = tbStringIO.getvalue()
             tbStringIO.close()
             error = {
-                "Error" : message,
-                "Traceback" : tbString,
+                "Error": message,
+                "Traceback": tbString,
             }
-            log.error("command failed %s" % (failure,))
+            log.error("command failed {error}", error=failure)
             request.write(writePlistToString(error))
             request.finish()
 
@@ -213,8 +139,10 @@
         body = request.content.read()
         command = readPlistFromString(body)
         output = cStringIO.StringIO()
-        runner = Runner(self.davRootResource, self.directory, self.store,
-            [command], output=output)
+        runner = Runner(
+            self.davRootResource, self.directory, self.store,
+            [command], output=output
+        )
         d = runner.run()
         d.addCallback(onSuccess, output)
         d.addErrback(onError)
@@ -246,16 +174,26 @@
         log.warn("Agent inactive; shutting down")
         reactor.stop()
 
-    inactivityDetector = InactivityDetector(reactor,
-        config.AgentInactivityTimeoutSeconds, becameInactive)
+    inactivityDetector = InactivityDetector(
+        reactor, config.AgentInactivityTimeoutSeconds, becameInactive
+    )
     root = Resource()
-    root.putChild("gateway", AgentGatewayResource(store,
-        davRootResource, directory, inactivityDetector))
+    root.putChild(
+        "gateway",
+        AgentGatewayResource(
+            store, davRootResource, directory, inactivityDetector
+        )
+    )
 
-    realmName = "/Local/Default"
-    portal = Portal(AgentRealm(root, ["com.apple.calendarserver"]),
-        [DirectoryServiceChecker(realmName)])
-    credentialFactory = CustomDigestCredentialFactory("md5", realmName)
+    directory = OpenDirectoryDirectoryService("/Local/Default")
+
+    portal = Portal(
+        AgentRealm(root, ["com.apple.calendarserver"]),
+        [HTTPDigestCredentialChecker(directory)]
+    )
+    credentialFactory = NoQOPDigestCredentialFactory(
+        "md5", "CalendarServer Agent Realm"
+    )
     wrapper = HTTPAuthSessionWrapper(portal, [credentialFactory])
 
     site = Site(wrapper)
@@ -283,8 +221,10 @@
         self._becameInactive = becameInactive
 
         if self._timeoutSeconds > 0:
-            self._delayedCall = self._reactor.callLater(self._timeoutSeconds,
-                self._inactivityThresholdReached)
+            self._delayedCall = self._reactor.callLater(
+                self._timeoutSeconds,
+                self._inactivityThresholdReached
+            )
 
 
     def _inactivityThresholdReached(self):
@@ -304,8 +244,10 @@
             if self._delayedCall.active():
                 self._delayedCall.reset(self._timeoutSeconds)
             else:
-                self._delayedCall = self._reactor.callLater(self._timeoutSeconds,
-                    self._inactivityThresholdReached)
+                self._delayedCall = self._reactor.callLater(
+                    self._timeoutSeconds,
+                    self._inactivityThresholdReached
+                )
 
 
     def stop(self):
@@ -361,17 +303,17 @@
         command = readPlistFromString(command)
         output = cStringIO.StringIO()
         from calendarserver.tools.gateway import Runner
-        runner = Runner(self.davRootResource, self.directory, self.store,
-            [command], output=output)
+        runner = Runner(
+            self.davRootResource, self.directory, self.store,
+            [command], output=output
+        )
 
         try:
             yield runner.run()
             result = output.getvalue()
             output.close()
         except Exception as e:
-            error = {
-                "Error" : str(e),
-            }
+            error = {"Error": str(e)}
             result = writePlistToString(error)
 
         output.close()
@@ -396,8 +338,9 @@
 
 
     def buildProtocol(self, addr):
-        return GatewayAMPProtocol(self.store, self.davRootResource,
-            self.directory)
+        return GatewayAMPProtocol(
+            self.store, self.davRootResource, self.directory
+        )
 
 
 
@@ -406,7 +349,8 @@
 #
 
 command = """<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN"
+ "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
 <plist version="1.0">
 <dict>
         <key>command</key>
@@ -414,6 +358,7 @@
 </dict>
 </plist>"""
 
+
 def getList():
     # For the sample client, below:
     from twisted.internet import reactor

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/calverify.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/calverify.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/calverify.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -47,41 +47,39 @@
 import traceback
 import uuid
 
+from calendarserver.tools import tables
+from calendarserver.tools.cmdline import utilityMain, WorkerService
+from pycalendar.datetime import DateTime
+from pycalendar.exceptions import ErrorBase
 from pycalendar.icalendar import definitions
 from pycalendar.icalendar.calendar import Calendar
-from pycalendar.datetime import DateTime
-from pycalendar.exceptions import ErrorBase
 from pycalendar.period import Period
 from pycalendar.timezone import Timezone
-
+from twext.enterprise.dal.syntax import Select, Parameter, Count
+from twext.python.log import Logger
 from twisted.internet.defer import inlineCallbacks, returnValue
 from twisted.python import usage
 from twisted.python.usage import Options
-
-from twext.python.log import Logger
-from twext.enterprise.dal.syntax import Select, Parameter, Count
-
 from twistedcaldav.datafilters.peruserdata import PerUserDataFilter
 from twistedcaldav.dateops import pyCalendarTodatetime
-from twistedcaldav.directory.directory import DirectoryService
-from twistedcaldav.ical import Component, ignoredComponents, \
+from twistedcaldav.ical import (
+    Component, ignoredComponents,
     InvalidICalendarDataError, Property, PERUSER_COMPONENT
-from txdav.caldav.datastore.scheduling.itip import iTipGenerator
+)
 from twistedcaldav.stdconfig import DEFAULT_CONFIG_FILE
-from twistedcaldav.util import normalizationLookup
-
-from txdav.caldav.icalendarstore import ComponentUpdateState
 from txdav.caldav.datastore.scheduling.icalsplitter import iCalSplitter
 from txdav.caldav.datastore.scheduling.implicit import ImplicitScheduler
+from txdav.caldav.datastore.scheduling.itip import iTipGenerator
 from txdav.caldav.datastore.sql import CalendarStoreFeatures
+from txdav.caldav.datastore.util import normalizationLookup
+from txdav.caldav.icalendarstore import ComponentUpdateState
 from txdav.common.datastore.sql_tables import schema, _BIND_MODE_OWN
 from txdav.common.icommondatastore import InternalDataStoreError
+from txdav.who.idirectory import (
+    RecordType as CalRecordType, AutoScheduleMode
+)
 
-from calendarserver.tools.cmdline import utilityMain, WorkerService
 
-from calendarserver.tools import tables
-from calendarserver.tools.util import getDirectory
-
 log = Logger()
 
 
@@ -396,7 +394,7 @@
         self.output = output
         self.reactor = reactor
         self.config = config
-        self._directory = None
+        self._directory = store.directoryService()
 
         self.cuaCache = {}
 
@@ -427,11 +425,8 @@
 
     def directoryService(self):
         """
-        Get an appropriate directory service for this L{CalVerifyService}'s
-        configuration, creating one first if necessary.
+        Return the directory service
         """
-        if self._directory is None:
-            self._directory = getDirectory(self.config) #directoryFromConfig(self.config)
         return self._directory
 
 
@@ -970,14 +965,14 @@
                 )).ljust(80))
                 self.output.flush()
 
-            record = self.directoryService().recordWithGUID(uid)
+            record = yield self.directoryService().recordWithUID(uid)
             if record is None:
                 contents = yield self.countHomeContents(uid)
                 missing.append((uid, contents,))
             elif not record.thisServer():
                 contents = yield self.countHomeContents(uid)
                 wrong_server.append((uid, contents,))
-            elif not record.enabledForCalendaring:
+            elif not record.hasCalendars:
                 contents = yield self.countHomeContents(uid)
                 disabled.append((uid, contents,))
 
@@ -1008,7 +1003,7 @@
         table = tables.Table()
         table.addHeader(("Owner UID", "Calendar Objects"))
         for uid, count in sorted(wrong_server, key=lambda x: x[0]):
-            record = self.directoryService().recordWithGUID(uid)
+            record = yield self.directoryService().recordWithUID(uid)
             table.addRow((
                 "%s/%s (%s)" % (record.recordType if record else "-", record.shortNames[0] if record else "-", uid,),
                 count,
@@ -1022,7 +1017,7 @@
         table = tables.Table()
         table.addHeader(("Owner UID", "Calendar Objects"))
         for uid, count in sorted(disabled, key=lambda x: x[0]):
-            record = self.directoryService().recordWithGUID(uid)
+            record = yield self.directoryService().recordWithUID(uid)
             table.addRow((
                 "%s/%s (%s)" % (record.recordType if record else "-", record.shortNames[0] if record else "-", uid,),
                 count,
@@ -1152,7 +1147,7 @@
         table.addHeader(("Owner", "Event UID", "RID", "Problem",))
         for item in sorted(results_bad, key=lambda x: (x[0], x[1])):
             owner, uid, resid, message = item
-            owner_record = self.directoryService().recordWithGUID(owner)
+            owner_record = yield self.directoryService().recordWithUID(owner)
             table.addRow((
                 "%s/%s (%s)" % (owner_record.recordType if owner_record else "-", owner_record.shortNames[0] if owner_record else "-", owner,),
                 uid,
@@ -1199,7 +1194,7 @@
                 component.validOrganizerForScheduling(doFix=False)
                 if component.hasDuplicateAlarms(doFix=False):
                     raise InvalidICalendarDataError("Duplicate VALARMS")
-            self.noPrincipalPathCUAddresses(component, doFix=False)
+            yield self.noPrincipalPathCUAddresses(component, doFix=False)
             if self.options["ical"]:
                 self.attendeesWithoutOrganizer(component, doFix=False)
 
@@ -1220,15 +1215,17 @@
         returnValue((result, message,))
 
 
+    @inlineCallbacks
     def noPrincipalPathCUAddresses(self, component, doFix):
 
-        def lookupFunction(cuaddr, principalFunction, conf):
+        @inlineCallbacks
+        def lookupFunction(cuaddr, recordFunction, conf):
 
             # Return cached results, if any.
             if cuaddr in self.cuaCache:
-                return self.cuaCache[cuaddr]
+                returnValue(self.cuaCache[cuaddr])
 
-            result = normalizationLookup(cuaddr, principalFunction, conf)
+            result = yield normalizationLookup(cuaddr, recordFunction, conf)
             _ignore_name, guid, _ignore_cuaddrs = result
             if guid is None:
                 if cuaddr.find("__uids__") != -1:
@@ -1237,7 +1234,7 @@
 
             # Cache the result
             self.cuaCache[cuaddr] = result
-            return result
+            returnValue(result)
 
         for subcomponent in component.subcomponents():
             if subcomponent.name() in ignoredComponents:
@@ -1249,13 +1246,13 @@
                 # http(s) principals need to be converted to urn:uuid
                 if cuaddr.startswith("http"):
                     if doFix:
-                        component.normalizeCalendarUserAddresses(lookupFunction, self.directoryService().principalForCalendarUserAddress)
+                        yield component.normalizeCalendarUserAddresses(lookupFunction, self.directoryService().recordWithCalendarUserAddress)
                     else:
                         raise InvalidICalendarDataError("iCalendar ORGANIZER starts with 'http(s)'")
                 elif cuaddr.startswith("mailto:"):
-                    if lookupFunction(cuaddr, self.directoryService().principalForCalendarUserAddress, self.config)[1] is not None:
+                    if (yield lookupFunction(cuaddr, self.directoryService().recordWithCalendarUserAddress, self.config))[1] is not None:
                         if doFix:
-                            component.normalizeCalendarUserAddresses(lookupFunction, self.directoryService().principalForCalendarUserAddress)
+                            yield component.normalizeCalendarUserAddresses(lookupFunction, self.directoryService().recordWithCalendarUserAddress)
                         else:
                             raise InvalidICalendarDataError("iCalendar ORGANIZER starts with 'mailto:' and record exists")
                 else:
@@ -1263,7 +1260,7 @@
                         if doFix:
                             # Add back in mailto: then re-normalize to urn:uuid if possible
                             organizer.setValue("mailto:%s" % (cuaddr,))
-                            component.normalizeCalendarUserAddresses(lookupFunction, self.directoryService().principalForCalendarUserAddress)
+                            yield component.normalizeCalendarUserAddresses(lookupFunction, self.directoryService().recordWithCalendarUserAddress)
 
                             # Remove any SCHEDULE-AGENT=NONE
                             if organizer.parameterValue("SCHEDULE-AGENT", "SERVER") == "NONE":
@@ -1286,13 +1283,13 @@
                 # http(s) principals need to be converted to urn:uuid
                 if cuaddr.startswith("http"):
                     if doFix:
-                        component.normalizeCalendarUserAddresses(lookupFunction, self.directoryService().principalForCalendarUserAddress)
+                        yield component.normalizeCalendarUserAddresses(lookupFunction, self.directoryService().recordWithCalendarUserAddress)
                     else:
                         raise InvalidICalendarDataError("iCalendar ATTENDEE starts with 'http(s)'")
                 elif cuaddr.startswith("mailto:"):
-                    if lookupFunction(cuaddr, self.directoryService().principalForCalendarUserAddress, self.config)[1] is not None:
+                    if (yield lookupFunction(cuaddr, self.directoryService().recordWithCalendarUserAddress, self.config))[1] is not None:
                         if doFix:
-                            component.normalizeCalendarUserAddresses(lookupFunction, self.directoryService().principalForCalendarUserAddress)
+                            yield component.normalizeCalendarUserAddresses(lookupFunction, self.directoryService().recordWithCalendarUserAddress)
                         else:
                             raise InvalidICalendarDataError("iCalendar ATTENDEE starts with 'mailto:' and record exists")
                 else:
@@ -1300,7 +1297,7 @@
                         if doFix:
                             # Add back in mailto: then re-normalize to urn:uuid if possible
                             attendee.setValue("mailto:%s" % (cuaddr,))
-                            component.normalizeCalendarUserAddresses(lookupFunction, self.directoryService().principalForCalendarUserAddress)
+                            yield component.normalizeCalendarUserAddresses(lookupFunction, self.directoryService().recordWithCalendarUserAddress)
                         else:
                             raise InvalidICalendarDataError("iCalendar ATTENDEE missing mailto:")
 
@@ -1352,7 +1349,7 @@
                 component.validCalendarForCalDAV(methodAllowed=isinbox)
                 component.validOrganizerForScheduling(doFix=True)
                 component.hasDuplicateAlarms(doFix=True)
-            self.noPrincipalPathCUAddresses(component, doFix=True)
+            yield self.noPrincipalPathCUAddresses(component, doFix=True)
             if self.options["ical"]:
                 self.attendeesWithoutOrganizer(component, doFix=True)
         except ValueError:
@@ -1460,7 +1457,7 @@
         self.attended = []
         self.attended_byuid = collections.defaultdict(list)
         self.matched_attendee_to_organizer = collections.defaultdict(set)
-        skipped, inboxes = self.buildResourceInfo(rows)
+        skipped, inboxes = yield self.buildResourceInfo(rows)
 
         self.logResult("Number of organizer events to process", len(self.organized), self.total)
         self.logResult("Number of attendee events to process", len(self.attended), self.total)
@@ -1488,6 +1485,7 @@
         self.printSummary()
 
 
+    @inlineCallbacks
     def buildResourceInfo(self, rows, onlyOrganizer=False, onlyAttendee=False):
         """
         For each resource, determine whether it is an organizer or attendee event, and also
@@ -1506,7 +1504,7 @@
         for owner, resid, uid, calname, md5, organizer, created, modified in rows:
 
             # Skip owners not enabled for calendaring
-            if not self.testForCalendaringUUID(owner):
+            if not (yield self.testForCalendaringUUID(owner)):
                 skipped += 1
                 continue
 
@@ -1530,9 +1528,10 @@
                     self.attended.append((owner, resid, uid, md5, organizer, created, modified,))
                     self.attended_byuid[uid].append((owner, resid, uid, md5, organizer, created, modified,))
 
-        return skipped, inboxes
+        returnValue((skipped, inboxes))
 
 
+    @inlineCallbacks
     def testForCalendaringUUID(self, uuid):
         """
         Determine if the specified directory UUID is valid for calendaring. Keep a cache of
@@ -1545,9 +1544,9 @@
         """
 
         if uuid not in self.validForCalendaringUUIDs:
-            record = self.directoryService().recordWithGUID(uuid)
-            self.validForCalendaringUUIDs[uuid] = record is not None and record.enabledForCalendaring and record.thisServer()
-        return self.validForCalendaringUUIDs[uuid]
+            record = yield self.directoryService().recordWithUID(uuid)
+            self.validForCalendaringUUIDs[uuid] = record is not None and record.hasCalendars and record.thisServer()
+        returnValue(self.validForCalendaringUUIDs[uuid])
 
 
     @inlineCallbacks
@@ -1622,7 +1621,7 @@
                 self.matched_attendee_to_organizer[uid].add(organizerAttendee)
 
                 # Skip attendees not enabled for calendaring
-                if not self.testForCalendaringUUID(organizerAttendee):
+                if not (yield self.testForCalendaringUUID(organizerAttendee)):
                     continue
 
                 # Double check the missing attendee situation in case we missed it during the original query
@@ -1699,8 +1698,8 @@
         results_missing.sort()
         for item in results_missing:
             uid, resid, organizer, attendee, created, modified = item
-            organizer_record = self.directoryService().recordWithGUID(organizer)
-            attendee_record = self.directoryService().recordWithGUID(attendee)
+            organizer_record = yield self.directoryService().recordWithUID(organizer)
+            attendee_record = yield self.directoryService().recordWithUID(attendee)
             table.addRow((
                 "%s/%s (%s)" % (organizer_record.recordType if organizer_record else "-", organizer_record.shortNames[0] if organizer_record else "-", organizer,),
                 "%s/%s (%s)" % (attendee_record.recordType if attendee_record else "-", attendee_record.shortNames[0] if attendee_record else "-", attendee,),
@@ -1721,8 +1720,8 @@
         results_mismatch.sort()
         for item in results_mismatch:
             uid, org_resid, organizer, org_created, org_modified, attendee, att_created, att_modified = item
-            organizer_record = self.directoryService().recordWithGUID(organizer)
-            attendee_record = self.directoryService().recordWithGUID(attendee)
+            organizer_record = yield self.directoryService().recordWithUID(organizer)
+            attendee_record = yield self.directoryService().recordWithUID(attendee)
             table.addRow((
                 "%s/%s (%s)" % (organizer_record.recordType if organizer_record else "-", organizer_record.shortNames[0] if organizer_record else "-", organizer,),
                 "%s/%s (%s)" % (attendee_record.recordType if attendee_record else "-", attendee_record.shortNames[0] if attendee_record else "-", attendee,),
@@ -1789,14 +1788,14 @@
             organizer = organizer[9:]
 
             # Skip organizers not enabled for calendaring
-            if not self.testForCalendaringUUID(organizer):
+            if not (yield self.testForCalendaringUUID(organizer)):
                 continue
 
             # Double check the missing attendee situation in case we missed it during the original query
             if uid not in self.organized_byuid:
                 # Try to reload the organizer info data
                 rows = yield self.getAllResourceInfoWithUID(uid)
-                self.buildResourceInfo(rows, onlyOrganizer=True)
+                yield self.buildResourceInfo(rows, onlyOrganizer=True)
 
                 #if uid in self.organized_byuid:
                 #    print("Reloaded missing organizer data: %s" % (uid,))
@@ -1849,9 +1848,9 @@
             uid, attendee, organizer, resid, created, modified = item
             unique_set.add(uid)
             if organizer:
-                organizerRecord = self.directoryService().recordWithGUID(organizer)
+                organizerRecord = yield self.directoryService().recordWithUID(organizer)
                 organizer = "%s/%s (%s)" % (organizerRecord.recordType if organizerRecord else "-", organizerRecord.shortNames[0] if organizerRecord else "-", organizer,)
-            attendeeRecord = self.directoryService().recordWithGUID(attendee)
+            attendeeRecord = yield self.directoryService().recordWithUID(attendee)
             table.addRow((
                 organizer,
                 "%s/%s (%s)" % (attendeeRecord.recordType if attendeeRecord else "-", attendeeRecord.shortNames[0] if attendeeRecord else "-", attendee,),
@@ -1874,9 +1873,9 @@
         for item in mismatched:
             uid, attendee, organizer, resid, att_created, att_modified = item
             if organizer:
-                organizerRecord = self.directoryService().recordWithGUID(organizer)
+                organizerRecord = yield self.directoryService().recordWithUID(organizer)
                 organizer = "%s/%s (%s)" % (organizerRecord.recordType if organizerRecord else "-", organizerRecord.shortNames[0] if organizerRecord else "-", organizer,)
-            attendeeRecord = self.directoryService().recordWithGUID(attendee)
+            attendeeRecord = yield self.directoryService().recordWithUID(attendee)
             table.addRow((
                 organizer,
                 "%s/%s (%s)" % (attendeeRecord.recordType if attendeeRecord else "-", attendeeRecord.shortNames[0] if attendeeRecord else "-", attendee,),
@@ -1980,8 +1979,9 @@
             self.txn = self.store.newTransaction()
 
             # Need to know whether the attendee is a location or resource with auto-accept set
-            record = self.directoryService().recordWithGUID(attendee)
-            if record.autoSchedule:
+            record = yield self.directoryService().recordWithUID(attendee)
+            autoScheduleMode = getattr(record, "autoScheduleMode", None)
+            if autoScheduleMode not in (None, AutoScheduleMode.none):
                 # Log details about the event so we can have a human manually process
                 self.fixedAutoAccepts.append(details)
 
@@ -2159,8 +2159,8 @@
             self.txn = None
             uuids = []
             for uuid in sorted(homes):
-                record = self.directoryService().recordWithGUID(uuid)
-                if record is not None and record.recordType in (DirectoryService.recordType_locations, DirectoryService.recordType_resources):
+                record = yield self.directoryService().recordWithUID(uuid)
+                if record is not None and record.recordType in (CalRecordType.location, CalRecordType.resource):
                     uuids.append(uuid)
         else:
             uuids = [self.options["uuid"], ]
@@ -2172,14 +2172,14 @@
             self.total = 0
             count += 1
 
-            record = self.directoryService().recordWithGUID(uuid)
+            record = yield self.directoryService().recordWithUID(uuid)
             if record is None:
                 continue
-            if not record.thisServer() or not record.enabledForCalendaring:
+            if not record.thisServer() or not record.hasCalendars:
                 continue
 
-            rname = record.fullName
-            auto = record.autoSchedule
+            rname = record.displayName
+            autoScheduleMode = getattr(record, "autoSchedule", AutoScheduleMode.none)
 
             if len(uuids) > 1 and not self.options["summary"]:
                 self.output.write("\n\n-----------------------------\n")
@@ -2205,7 +2205,7 @@
             if not self.options["summary"]:
                 self.logResult("UUID to process", uuid)
                 self.logResult("Record name", rname)
-                self.logResult("Auto-schedule", "True" if auto else "False")
+                self.logResult("Auto-schedule-mode", autoScheduleMode.description)
                 self.addSummaryBreak()
                 self.logResult("Number of events to process", self.total)
 
@@ -2216,7 +2216,7 @@
             else:
                 doubled = False
 
-            self.uuid_details.append(UUIDDetails(uuid, rname, auto, doubled))
+            self.uuid_details.append(UUIDDetails(uuid, rname, autoScheduleMode, doubled))
 
             if not self.options["summary"]:
                 self.printSummary()
@@ -2234,7 +2234,7 @@
                 table.addRow((
                     item.uuid,
                     item.rname,
-                    item.auto,
+                    item.autoScheduleMode,
                     item.doubled,
                 ))
                 doubled += 1
@@ -2468,8 +2468,8 @@
             if self.options["verbose"]:
                 self.output.write("%d uuids to check\n" % (len(homes,)))
             for uuid in sorted(homes):
-                record = self.directoryService().recordWithGUID(uuid)
-                if record is not None and record.recordType in (DirectoryService.recordType_locations, DirectoryService.recordType_resources):
+                record = yield self.directoryService().recordWithUID(uuid)
+                if record is not None and record.recordType in (CalRecordType.location, CalRecordType.resource):
                     uuids.append(uuid)
         else:
             uuids = [self.options["uuid"], ]
@@ -2483,13 +2483,13 @@
             self.total = 0
             count += 1
 
-            record = self.directoryService().recordWithGUID(uuid)
+            record = yield self.directoryService().recordWithUID(uuid)
             if record is None:
                 continue
-            if not record.thisServer() or not record.enabledForCalendaring:
+            if not record.thisServer() or not record.hasCalendars:
                 continue
 
-            rname = record.fullName
+            rname = record.displayName
 
             if len(uuids) > 1 and not self.options["summary"]:
                 self.output.write("\n\n-----------------------------\n")
@@ -2602,9 +2602,10 @@
                 if self.options["no-organizer"]:
                     fail = True
             else:
-                principal = self.directoryService().principalForCalendarUserAddress(organizer)
+                principal = yield self.directoryService().recordWithCalendarUserAddress(organizer)
+                # FIXME: Why the mix of records and principals here?
                 if principal is None and organizer.startswith("urn:uuid:"):
-                    principal = self.directoryService().principalCollection.principalForUID(organizer[9:])
+                    principal = yield self.directoryService().principalCollection.principalForUID(organizer[9:])
                 if principal is None:
                     if self.options["invalid-organizer"]:
                         fail = True

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/export.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/export.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/export.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -41,7 +41,7 @@
 
 from twisted.python.text import wordWrap
 from twisted.python.usage import Options, UsageError
-from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.internet.defer import inlineCallbacks, returnValue, succeed
 
 from twext.python.log import Logger
 from twistedcaldav.ical import Component
@@ -76,6 +76,7 @@
     )
 )
 
+
 class ExportOptions(Options):
     """
     Command-line options for 'calendarserver_export'
@@ -188,7 +189,7 @@
         Enumerate all calendars based on the directory record and/or calendars
         for this calendar home.
         """
-        uid = self.getHomeUID(exportService)
+        uid = yield self.getHomeUID(exportService)
         home = yield txn.calendarHomeWithUID(uid, True)
         result = []
         if self.collections:
@@ -218,7 +219,7 @@
 
 
     def getHomeUID(self, exportService):
-        return self.uid
+        return succeed(self.uid)
 
 
 
@@ -244,13 +245,17 @@
         self.shortName = shortName
 
 
+    @inlineCallbacks
     def getHomeUID(self, exportService):
         """
         Retrieve the home UID.
         """
         directory = exportService.directoryService()
-        record = directory.recordWithShortName(self.recordType, self.shortName)
-        return record.uid
+        record = yield directory.recordWithShortName(
+            directory.oldNameToRecordType(self.recordType),
+            self.shortName
+        )
+        returnValue(record.uid)
 
 
 

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/gateway.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/gateway.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/gateway.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -19,31 +19,55 @@
 
 from getopt import getopt, GetoptError
 import os
+from plistlib import readPlistFromString, writePlistToString
 import sys
 import xml
 
-from plistlib import readPlistFromString, writePlistToString
-
-from twisted.internet.defer import inlineCallbacks, succeed
-from twistedcaldav.directory.directory import DirectoryError
-from txdav.xml import element as davxml
-
-from calendarserver.tools.util import (
-    principalForPrincipalID, proxySubprincipal, addProxy, removeProxy,
-    ProxyError, ProxyWarning, autoDisableMemcached
-)
+from calendarserver.tools.cmdline import utilityMain
+from calendarserver.tools.config import WRITABLE_CONFIG_KEYS, setKeyPath, getKeyPath, flattenDictionary, WritableConfig
 from calendarserver.tools.principals import (
-    getProxies, setProxies, updateRecord, attrMap
+    getProxies, setProxies
 )
 from calendarserver.tools.purge import WorkerService, PurgeOldEventsService, DEFAULT_BATCH_SIZE, DEFAULT_RETAIN_DAYS
-from calendarserver.tools.cmdline import utilityMain
-
+from calendarserver.tools.util import (
+    recordForPrincipalID, autoDisableMemcached
+)
 from pycalendar.datetime import DateTime
-
+from twext.who.directory import DirectoryRecord
+from twisted.internet.defer import inlineCallbacks, succeed, returnValue
 from twistedcaldav.config import config, ConfigDict
 
-from calendarserver.tools.config import WRITABLE_CONFIG_KEYS, setKeyPath, getKeyPath, flattenDictionary, WritableConfig
+from txdav.who.idirectory import RecordType as CalRecordType
+from twext.who.idirectory import FieldName
+from twisted.python.constants import Names, NamedConstant
+from txdav.who.delegates import (
+    addDelegate, removeDelegate, RecordType as DelegateRecordType
+)
 
+
+attrMap = {
+    'GeneratedUID': {'attr': 'uid', },
+    'RealName': {'attr': 'fullNames', },
+    'RecordName': {'attr': 'shortNames', },
+    'AutoScheduleMode': {'attr': 'autoScheduleMode', },
+    'AutoAcceptGroup': {'attr': 'autoAcceptGroup', },
+
+    # 'Comment': {'extras': True, 'attr': 'comment', },
+    # 'Description': {'extras': True, 'attr': 'description', },
+    # 'Type': {'extras': True, 'attr': 'type', },
+
+    # For "Locations", i.e. scheduled spaces
+    'Capacity': {'attr': 'capacity', },
+    'Floor': {'attr': 'floor', },
+    'AssociatedAddress': {'attr': 'associatedAddress', },
+
+    # For "Addresses", i.e. nonscheduled areas containing Locations
+    'AbbreviatedName': {'attr': 'abbreviatedName', },
+    'StreetAddress': {'attr': 'streetAddress', },
+    'GeographicLocation': {'attr': 'geographicLocation', },
+}
+
+
 def usage(e=None):
 
     name = os.path.basename(sys.argv[0])
@@ -76,9 +100,7 @@
         """
         Create/run a Runner to execute the commands
         """
-        rootResource = self.rootResource()
-        directory = rootResource.getDirectory()
-        runner = Runner(rootResource, directory, self.store, self.commands)
+        runner = Runner(self.store, self.commands)
         if runner.validate():
             yield runner.run()
 
@@ -145,10 +167,9 @@
 
 class Runner(object):
 
-    def __init__(self, root, directory, store, commands, output=None):
-        self.root = root
-        self.dir = directory
+    def __init__(self, store, commands, output=None):
         self.store = store
+        self.dir = store.directoryService()
         self.commands = commands
         if output is None:
             output = sys.stdout
@@ -180,12 +201,13 @@
             pool.ClientEnabled = True
         autoDisableMemcached(config)
 
-        from twistedcaldav.directory import calendaruserproxy
-        if calendaruserproxy.ProxyDBService is not None:
-            # Reset the proxy db memcacher because memcached may have come or
-            # gone since the last time through here.
-            # TODO: figure out a better way to do this
-            calendaruserproxy.ProxyDBService._memcacher._memcacheProtocol = None
+        # FIXME:
+        # from twistedcaldav.directory import calendaruserproxy
+        # if calendaruserproxy.ProxyDBService is not None:
+        #     # Reset the proxy db memcacher because memcached may have come or
+        #     # gone since the last time through here.
+        #     # TODO: figure out a better way to do this
+        #     calendaruserproxy.ProxyDBService._memcacher._memcacheProtocol = None
 
         try:
             for command in self.commands:
@@ -203,224 +225,202 @@
 
     # Locations
 
+    # deferred
     def command_getLocationList(self, command):
-        self.respondWithRecordsOfTypes(self.dir, command, ["locations"])
+        return self.respondWithRecordsOfTypes(self.dir, command, ["locations"])
 
-
     @inlineCallbacks
-    def command_createLocation(self, command):
-        kwargs = {}
-        for key, info in attrMap.iteritems():
-            if key in command:
-                kwargs[info['attr']] = command[key]
+    def _saveRecord(self, typeName, recordType, command, oldFields=None):
+        """
+        Save a record using the values in the command plist, starting with
+        any fields in the optional oldFields.
 
-        try:
-            record = (yield updateRecord(True, self.dir, "locations", **kwargs))
-        except DirectoryError, e:
-            self.respondWithError(str(e))
-            return
+        @param typeName: one of "locations", "resources", "addresses"; used
+            to return the appropriate list of records afterwards.
+        @param recordType: the type of record to save
+        @param command: the command containing values
+        @type command: C{dict}
+        @param oldFields: the optional fields to start with, which will be
+            overridden by values from command
+        @type oldFiles: C{dict}
+        """
 
-        readProxies = command.get("ReadProxies", None)
-        writeProxies = command.get("WriteProxies", None)
-        principal = principalForPrincipalID(record.guid, directory=self.dir)
-        (yield setProxies(self.store, principal, readProxies, writeProxies, directory=self.dir))
+        if oldFields is None:
+            fields = {
+                FieldName.recordType: recordType
+            }
+            create = True
+        else:
+            fields = oldFields.copy()
+            create = False
 
-        self.respondWithRecordsOfTypes(self.dir, command, ["locations"])
+        for key, info in attrMap.iteritems():
+            if key in command:
+                attrName = info['attr']
+                field = self.dir.fieldName.lookupByName(attrName)
+                valueType = self.dir.fieldName.valueType(field)
+                value = command[key]
 
+                # For backwards compatibility, convert to a list if needed
+                if (
+                    self.dir.fieldName.isMultiValue(field) and
+                    not isinstance(value, list)
+                ):
+                    value = [value]
 
-    @inlineCallbacks
-    def command_getLocationAttributes(self, command):
-        guid = command['GeneratedUID']
-        record = self.dir.recordWithGUID(guid)
-        if record is None:
-            self.respondWithError("Principal not found: %s" % (guid,))
-            return
-        recordDict = recordToDict(record)
-        principal = principalForPrincipalID(guid, directory=self.dir)
-        if principal is None:
-            self.respondWithError("Principal not found: %s" % (guid,))
-            return
-        recordDict['AutoSchedule'] = principal.getAutoSchedule()
-        recordDict['AutoAcceptGroup'] = principal.getAutoAcceptGroup()
-        recordDict['ReadProxies'], recordDict['WriteProxies'] = (yield getProxies(principal,
-            directory=self.dir))
-        self.respond(command, recordDict)
+                if valueType == int:
+                    value = int(value)
+                elif issubclass(valueType, Names):
+                    if value is not None:
+                        value = valueType.lookupByName(value)
+                else:
+                    if isinstance(value, list):
+                        newList = []
+                        for item in value:
+                            if isinstance(item, str):
+                                newList.append(item.decode("utf-8"))
+                            else:
+                                newList.append(item)
+                        value = newList
+                    elif isinstance(value, str):
+                        value = value.decode("utf-8")
 
-    command_getResourceAttributes = command_getLocationAttributes
+                fields[field] = value
 
+        if FieldName.shortNames not in fields:
+            # No short names were provided, so copy from uid
+            fields[FieldName.shortNames] = [fields[FieldName.uid]]
 
-    @inlineCallbacks
-    def command_setLocationAttributes(self, command):
+        record = DirectoryRecord(self.dir, fields)
+        yield self.dir.updateRecords([record], create=create)
 
-        # Set autoSchedule prior to the updateRecord so that the right
-        # value ends up in memcached
-        principal = principalForPrincipalID(command['GeneratedUID'],
-            directory=self.dir)
-        (yield principal.setAutoSchedule(command.get('AutoSchedule', False)))
-        (yield principal.setAutoAcceptGroup(command.get('AutoAcceptGroup', "")))
-
-        kwargs = {}
-        for key, info in attrMap.iteritems():
-            if key in command:
-                kwargs[info['attr']] = command[key]
-        try:
-            record = (yield updateRecord(False, self.dir, "locations", **kwargs))
-        except DirectoryError, e:
-            self.respondWithError(str(e))
-            return
-
         readProxies = command.get("ReadProxies", None)
+        if readProxies:
+            proxyRecords = []
+            for proxyUID in readProxies:
+                proxyRecord = yield self.dir.recordWithUID(proxyUID)
+                if proxyRecord is not None:
+                    proxyRecords.append(proxyRecord)
+            readProxies = proxyRecords
+
         writeProxies = command.get("WriteProxies", None)
-        principal = principalForPrincipalID(record.guid, directory=self.dir)
-        (yield setProxies(self.store, principal, readProxies, writeProxies, directory=self.dir))
+        if writeProxies:
+            proxyRecords = []
+            for proxyUID in writeProxies:
+                proxyRecord = yield self.dir.recordWithUID(proxyUID)
+                if proxyRecord is not None:
+                    proxyRecords.append(proxyRecord)
+            writeProxies = proxyRecords
 
-        yield self.command_getLocationAttributes(command)
+        yield setProxies(record, readProxies, writeProxies)
 
+        yield self.respondWithRecordsOfTypes(self.dir, command, [typeName])
 
-    def command_deleteLocation(self, command):
-        kwargs = {}
-        for key, info in attrMap.iteritems():
-            if key in command:
-                kwargs[info['attr']] = command[key]
-        try:
-            self.dir.destroyRecord("locations", **kwargs)
-        except DirectoryError, e:
-            self.respondWithError(str(e))
-            return
-        self.respondWithRecordsOfTypes(self.dir, command, ["locations"])
 
+    def command_createLocation(self, command):
+        return self._saveRecord("locations", CalRecordType.location, command)
 
-    # Resources
 
-    def command_getResourceList(self, command):
-        self.respondWithRecordsOfTypes(self.dir, command, ["resources"])
-
-
-    @inlineCallbacks
     def command_createResource(self, command):
-        kwargs = {}
-        for key, info in attrMap.iteritems():
-            if key in command:
-                kwargs[info['attr']] = command[key]
+        return self._saveRecord("resources", CalRecordType.resource, command)
 
-        try:
-            record = (yield updateRecord(True, self.dir, "resources", **kwargs))
-        except DirectoryError, e:
-            self.respondWithError(str(e))
-            return
 
-        readProxies = command.get("ReadProxies", None)
-        writeProxies = command.get("WriteProxies", None)
-        principal = principalForPrincipalID(record.guid, directory=self.dir)
-        (yield setProxies(self.store, principal, readProxies, writeProxies, directory=self.dir))
+    def command_createAddress(self, command):
+        return self._saveRecord("addresses", CalRecordType.address, command)
 
-        self.respondWithRecordsOfTypes(self.dir, command, ["resources"])
 
+    @inlineCallbacks
+    def command_setLocationAttributes(self, command):
+        uid = command['GeneratedUID']
+        record = yield self.dir.recordWithUID(uid)
+        yield self._saveRecord(
+            "locations",
+            CalRecordType.location,
+            command,
+            oldFields=record.fields
+        )
 
     @inlineCallbacks
     def command_setResourceAttributes(self, command):
+        uid = command['GeneratedUID']
+        record = yield self.dir.recordWithUID(uid)
+        yield self._saveRecord(
+            "resources",
+            CalRecordType.resource,
+            command,
+            oldFields=record.fields
+        )
 
-        # Set autoSchedule prior to the updateRecord so that the right
-        # value ends up in memcached
-        principal = principalForPrincipalID(command['GeneratedUID'],
-            directory=self.dir)
-        (yield principal.setAutoSchedule(command.get('AutoSchedule', False)))
-        (yield principal.setAutoAcceptGroup(command.get('AutoAcceptGroup', "")))
 
-        kwargs = {}
-        for key, info in attrMap.iteritems():
-            if key in command:
-                kwargs[info['attr']] = command[key]
-        try:
-            record = (yield updateRecord(False, self.dir, "resources", **kwargs))
-        except DirectoryError, e:
-            self.respondWithError(str(e))
+    @inlineCallbacks
+    def command_setAddressAttributes(self, command):
+        uid = command['GeneratedUID']
+        record = yield self.dir.recordWithUID(uid)
+        yield self._saveRecord(
+            "addresses",
+            CalRecordType.address,
+            command,
+            oldFields=record.fields
+        )
+
+
+    @inlineCallbacks
+    def command_getLocationAttributes(self, command):
+        uid = command['GeneratedUID']
+        record = yield self.dir.recordWithUID(uid)
+        if record is None:
+            self.respondWithError("Principal not found: %s" % (uid,))
             return
+        recordDict = recordToDict(record)
+        # recordDict['AutoSchedule'] = principal.getAutoSchedule()
+        try:
+            recordDict['AutoAcceptGroup'] = record.autoAcceptGroup
+        except AttributeError:
+            pass
 
-        readProxies = command.get("ReadProxies", None)
-        writeProxies = command.get("WriteProxies", None)
-        principal = principalForPrincipalID(record.guid, directory=self.dir)
-        (yield setProxies(self.store, principal, readProxies, writeProxies, directory=self.dir))
+        readProxies, writeProxies = yield getProxies(record)
+        recordDict['ReadProxies'] = [r.uid for r in readProxies]
+        recordDict['WriteProxies'] = [r.uid for r in writeProxies]
+        self.respond(command, recordDict)
 
-        yield self.command_getResourceAttributes(command)
+    command_getResourceAttributes = command_getLocationAttributes
+    command_getAddressAttributes = command_getLocationAttributes
 
 
-    def command_deleteResource(self, command):
-        kwargs = {}
-        for key, info in attrMap.iteritems():
-            if key in command:
-                kwargs[info['attr']] = command[key]
-        try:
-            self.dir.destroyRecord("resources", **kwargs)
-        except DirectoryError, e:
-            self.respondWithError(str(e))
-            return
+    # Resources
+
+    def command_getResourceList(self, command):
         self.respondWithRecordsOfTypes(self.dir, command, ["resources"])
 
 
+    # deferred
     def command_getLocationAndResourceList(self, command):
-        self.respondWithRecordsOfTypes(self.dir, command, ["locations", "resources"])
+        return self.respondWithRecordsOfTypes(self.dir, command, ["locations", "resources"])
 
 
     # Addresses
 
     def command_getAddressList(self, command):
-        self.respondWithRecordsOfTypes(self.dir, command, ["addresses"])
+        return self.respondWithRecordsOfTypes(self.dir, command, ["addresses"])
 
 
     @inlineCallbacks
-    def command_createAddress(self, command):
-        kwargs = {}
-        for key, info in attrMap.iteritems():
-            if key in command:
-                kwargs[info['attr']] = command[key]
+    def _delete(self, typeName, command):
+        uid = command['GeneratedUID']
+        yield self.dir.removeRecords([uid])
+        self.respondWithRecordsOfTypes(self.dir, command, [typeName])
 
-        try:
-            yield updateRecord(True, self.dir, "addresses", **kwargs)
-        except DirectoryError, e:
-            self.respondWithError(str(e))
-            return
 
-        self.respondWithRecordsOfTypes(self.dir, command, ["addresses"])
+    def command_deleteLocation(self, command):
+        return self._delete("locations", command)
 
 
-    def command_getAddressAttributes(self, command):
-        guid = command['GeneratedUID']
-        record = self.dir.recordWithGUID(guid)
-        if record is None:
-            self.respondWithError("Principal not found: %s" % (guid,))
-            return
-        recordDict = recordToDict(record)
-        self.respond(command, recordDict)
-        return succeed(None)
+    def command_deleteResource(self, command):
+        return self._delete("resources", command)
 
 
-    @inlineCallbacks
-    def command_setAddressAttributes(self, command):
-        kwargs = {}
-        for key, info in attrMap.iteritems():
-            if key in command:
-                kwargs[info['attr']] = command[key]
-        try:
-            yield updateRecord(False, self.dir, "addresses", **kwargs)
-        except DirectoryError, e:
-            self.respondWithError(str(e))
-            return
-
-        yield self.command_getAddressAttributes(command)
-
-
     def command_deleteAddress(self, command):
-        kwargs = {}
-        for key, info in attrMap.iteritems():
-            if key in command:
-                kwargs[info['attr']] = command[key]
-        try:
-            self.dir.destroyRecord("addresses", **kwargs)
-        except DirectoryError, e:
-            self.respondWithError(str(e))
-            return
-        self.respondWithRecordsOfTypes(self.dir, command, ["addresses"])
+        return self._delete("addresses", command)
 
 
     # Config
@@ -471,106 +471,74 @@
 
     # Proxies
 
-    @inlineCallbacks
     def command_listWriteProxies(self, command):
-        principal = principalForPrincipalID(command['Principal'], directory=self.dir)
-        if principal is None:
-            self.respondWithError("Principal not found: %s" % (command['Principal'],))
-            return
-        (yield self.respondWithProxies(self.dir, command, principal, "write"))
+        return self._listProxies(command, "write")
 
 
+    def command_listReadProxies(self, command):
+        return self._listProxies(command, "read")
+
     @inlineCallbacks
-    def command_addWriteProxy(self, command):
-        principal = principalForPrincipalID(command['Principal'],
-            directory=self.dir)
-        if principal is None:
+    def _listProxies(self, command, proxyType):
+        record = yield recordForPrincipalID(self.dir, command['Principal'])
+        if record is None:
             self.respondWithError("Principal not found: %s" % (command['Principal'],))
-            return
+            returnValue(None)
+        yield self.respondWithProxies(command, record, proxyType)
 
-        proxy = principalForPrincipalID(command['Proxy'], directory=self.dir)
-        if proxy is None:
-            self.respondWithError("Proxy not found: %s" % (command['Proxy'],))
-            return
-        try:
-            (yield addProxy(self.root, self.dir, self.store, principal, "write", proxy))
-        except ProxyError, e:
-            self.respondWithError(str(e))
-            return
-        except ProxyWarning, e:
-            pass
-        (yield self.respondWithProxies(self.dir, command, principal, "write"))
 
+    def command_addReadProxy(self, command):
+        return self._addProxy(command, "read")
 
+
+    def command_addWriteProxy(self, command):
+        return self._addProxy(command, "write")
+
+
     @inlineCallbacks
-    def command_removeWriteProxy(self, command):
-        principal = principalForPrincipalID(command['Principal'], directory=self.dir)
-        if principal is None:
+    def _addProxy(self, command, proxyType):
+        record = yield recordForPrincipalID(self.dir, command['Principal'])
+        if record is None:
             self.respondWithError("Principal not found: %s" % (command['Principal'],))
-            return
-        proxy = principalForPrincipalID(command['Proxy'], directory=self.dir)
-        if proxy is None:
+            returnValue(None)
+
+        proxyRecord = yield recordForPrincipalID(self.dir, command['Proxy'])
+        if proxyRecord is None:
             self.respondWithError("Proxy not found: %s" % (command['Proxy'],))
-            return
-        try:
-            (yield removeProxy(self.root, self.dir, self.store, principal, proxy, proxyTypes=("write",)))
-        except ProxyError, e:
-            self.respondWithError(str(e))
-            return
-        except ProxyWarning, e:
-            pass
-        (yield self.respondWithProxies(self.dir, command, principal, "write"))
+            returnValue(None)
 
+        txn = self.store.newTransaction()
+        yield addDelegate(txn, record, proxyRecord, (proxyType == "write"))
+        yield txn.commit()
+        yield self.respondWithProxies(command, record, proxyType)
 
-    @inlineCallbacks
-    def command_listReadProxies(self, command):
-        principal = principalForPrincipalID(command['Principal'], directory=self.dir)
-        if principal is None:
-            self.respondWithError("Principal not found: %s" % (command['Principal'],))
-            return
-        (yield self.respondWithProxies(self.dir, command, principal, "read"))
 
+    def command_removeReadProxy(self, command):
+        return self._removeProxy(command, "read")
 
-    @inlineCallbacks
-    def command_addReadProxy(self, command):
-        principal = principalForPrincipalID(command['Principal'], directory=self.dir)
-        if principal is None:
-            self.respondWithError("Principal not found: %s" % (command['Principal'],))
-            return
-        proxy = principalForPrincipalID(command['Proxy'], directory=self.dir)
-        if proxy is None:
-            self.respondWithError("Proxy not found: %s" % (command['Proxy'],))
-            return
-        try:
-            (yield addProxy(self.root, self.dir, self.store, principal, "read", proxy))
-        except ProxyError, e:
-            self.respondWithError(str(e))
-            return
-        except ProxyWarning, e:
-            pass
-        (yield self.respondWithProxies(self.dir, command, principal, "read"))
 
+    def command_removeWriteProxy(self, command):
+        return self._removeProxy(command, "write")
 
+
     @inlineCallbacks
-    def command_removeReadProxy(self, command):
-        principal = principalForPrincipalID(command['Principal'], directory=self.dir)
-        if principal is None:
+    def _removeProxy(self, command, proxyType):
+        record = yield recordForPrincipalID(self.dir, command['Principal'])
+        if record is None:
             self.respondWithError("Principal not found: %s" % (command['Principal'],))
-            return
-        proxy = principalForPrincipalID(command['Proxy'], directory=self.dir)
-        if proxy is None:
+            returnValue(None)
+
+        proxyRecord = yield recordForPrincipalID(self.dir, command['Proxy'])
+        if proxyRecord is None:
             self.respondWithError("Proxy not found: %s" % (command['Proxy'],))
-            return
-        try:
-            (yield removeProxy(self.root, self.dir, self.store, principal, proxy, proxyTypes=("read",)))
-        except ProxyError, e:
-            self.respondWithError(str(e))
-            return
-        except ProxyWarning, e:
-            pass
-        (yield self.respondWithProxies(self.dir, command, principal, "read"))
+            returnValue(None)
 
+        txn = self.store.newTransaction()
+        yield removeDelegate(txn, record, proxyRecord, (proxyType == "write"))
+        yield txn.commit()
+        yield self.respondWithProxies(command, record, proxyType)
 
+
     @inlineCallbacks
     def command_purgeOldEvents(self, command):
         """
@@ -585,40 +553,42 @@
         cutoff.setDateOnly(False)
         cutoff.offsetDay(-retainDays)
         eventCount = (yield PurgeOldEventsService.purgeOldEvents(self.store, cutoff, DEFAULT_BATCH_SIZE))
-        self.respond(command, {'EventsRemoved' : eventCount, "RetainDays" : retainDays})
+        self.respond(command, {'EventsRemoved': eventCount, "RetainDays": retainDays})
 
 
     @inlineCallbacks
-    def respondWithProxies(self, directory, command, principal, proxyType):
+    def respondWithProxies(self, command, record, proxyType):
         proxies = []
-        subPrincipal = proxySubprincipal(principal, proxyType)
-        if subPrincipal is not None:
-            membersProperty = (yield subPrincipal.readProperty(davxml.GroupMemberSet, None))
-            if membersProperty.children:
-                for member in membersProperty.children:
-                    proxyPrincipal = principalForPrincipalID(str(member), directory=directory)
-                    proxies.append(proxyPrincipal.record.guid)
+        recordType = {
+            "read": DelegateRecordType.readDelegateGroup,
+            "write": DelegateRecordType.writeDelegateGroup,
+        }[proxyType]
+        proxyGroup = yield self.dir.recordWithShortName(recordType, record.uid)
+        for member in (yield proxyGroup.members()):
+            proxies.append(member.uid)
 
         self.respond(command, {
-            'Principal' : principal.record.guid, 'Proxies' : proxies
+            'Principal': record.uid, 'Proxies': proxies
         })
 
 
+    @inlineCallbacks
     def respondWithRecordsOfTypes(self, directory, command, recordTypes):
         result = []
         for recordType in recordTypes:
-            for record in directory.listRecords(recordType):
+            recordType = directory.oldNameToRecordType(recordType)
+            for record in (yield directory.recordsWithRecordType(recordType)):
                 recordDict = recordToDict(record)
                 result.append(recordDict)
         self.respond(command, result)
 
 
     def respond(self, command, result):
-        self.output.write(writePlistToString({'command' : command['command'], 'result' : result}))
+        self.output.write(writePlistToString({'command': command['command'], 'result': result}))
 
 
     def respondWithError(self, msg, status=1):
-        self.output.write(writePlistToString({'error' : msg, }))
+        self.output.write(writePlistToString({'error': msg, }))
 
 
 
@@ -626,12 +596,17 @@
     recordDict = {}
     for key, info in attrMap.iteritems():
         try:
-            if info.get('extras', False):
-                value = record.extras[info['attr']]
-            else:
-                value = getattr(record, info['attr'])
+            value = record.fields[record.service.fieldName.lookupByName(info['attr'])]
+            if value is None:
+                continue
+            # For backwards compatibility, present fullName/RealName as single
+            # value even though twext.who now has it as multiValue
+            if key == "RealName":
+                value = value[0]
             if isinstance(value, str):
                 value = value.decode("utf-8")
+            elif isinstance(value, NamedConstant):
+                value = value.name
             recordDict[key] = value
         except KeyError:
             pass
@@ -640,7 +615,7 @@
 
 
 def respondWithError(msg, status=1):
-    sys.stdout.write(writePlistToString({'error' : msg, }))
+    sys.stdout.write(writePlistToString({'error': msg, }))
 
 
 

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/migrate.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/migrate.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/migrate.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -33,8 +33,9 @@
 from twistedcaldav.config import ConfigurationError
 from twistedcaldav.upgrade import upgradeData
 
-from calendarserver.tools.util import loadConfig, getDirectory
+from calendarserver.tools.util import loadConfig
 
+
 def usage(e=None):
     if e:
         print(e)
@@ -81,7 +82,6 @@
 
     try:
         config = loadConfig(configFileName)
-        config.directory = getDirectory()
     except ConfigurationError, e:
         sys.stdout.write("%s\n" % (e,))
         sys.exit(1)
@@ -90,7 +90,7 @@
 
     if profiling:
         import cProfile
-        cProfile.runctx("upgradeData(c)", globals(), {"c" : config}, "/tmp/upgrade.prof")
+        cProfile.runctx("upgradeData(c)", globals(), {"c": config}, "/tmp/upgrade.prof")
     else:
         upgradeData(config)
 

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/principals.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/principals.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/principals.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -17,39 +17,40 @@
 ##
 from __future__ import print_function
 
-import sys
-import os
-import operator
 from getopt import getopt, GetoptError
+import operator
+import os
+import sys
 from uuid import UUID
 
+from calendarserver.tools.cmdline import utilityMain, WorkerService
+from calendarserver.tools.util import (
+    recordForPrincipalID, prettyRecord
+)
+from twext.who.directory import DirectoryRecord
+from twext.who.idirectory import RecordType, InvalidDirectoryRecordError
 from twisted.internet import reactor
 from twisted.internet.defer import inlineCallbacks, returnValue, succeed
-from txdav.xml import element as davxml
-
-from txdav.xml.base import decodeXMLName, encodeXMLName
-
 from twistedcaldav.config import config
-from twistedcaldav.directory.directory import UnknownRecordTypeError, DirectoryError
-from txdav.who.groups import schedulePolledGroupCachingUpdate
-
-from calendarserver.tools.util import (
-    booleanArgument, proxySubprincipal, action_addProxyPrincipal,
-    principalForPrincipalID, prettyPrincipal, ProxyError,
-    action_removeProxyPrincipal
+from txdav.who.delegates import (
+    addDelegate, removeDelegate, RecordType as DelegateRecordType
 )
-from twistedcaldav.directory.augment import allowedAutoScheduleModes
+from txdav.who.idirectory import AutoScheduleMode
 
-from calendarserver.tools.cmdline import utilityMain, WorkerService
 
+allowedAutoScheduleModes = {
+    "default": None,
+    "none": AutoScheduleMode.none,
+    "accept-always": AutoScheduleMode.accept,
+    "decline-always": AutoScheduleMode.decline,
+    "accept-if-free": AutoScheduleMode.acceptIfFree,
+    "decline-if-busy": AutoScheduleMode.declineIfBusy,
+    "automatic": AutoScheduleMode.acceptIfFreeDeclineIfBusy,
+}
 
+
 def usage(e=None):
     if e:
-        if isinstance(e, UnknownRecordTypeError):
-            print("Valid record types:")
-            for recordType in config.directory.recordTypes():
-                print("    %s" % (recordType,))
-
         print(e)
         print("")
 
@@ -74,20 +75,18 @@
     print("  --search <search-string>: search for matching principals")
     print("  --list-principal-types: list all of the known principal types")
     print("  --list-principals type: list all principals of the given type")
-    print("  --read-property=property: read DAV property (eg.: {DAV:}group-member-set)")
     print("  --list-read-proxies: list proxies with read-only access")
     print("  --list-write-proxies: list proxies with read-write access")
     print("  --list-proxies: list all proxies")
+    print("  --list-proxy-for: principals this principal is a proxy for")
     print("  --add-read-proxy=principal: add a read-only proxy")
     print("  --add-write-proxy=principal: add a read-write proxy")
     print("  --remove-proxy=principal: remove a proxy")
-    print("  --set-auto-schedule={true|false}: set auto-accept state")
-    print("  --get-auto-schedule: read auto-schedule state")
     print("  --set-auto-schedule-mode={default|none|accept-always|decline-always|accept-if-free|decline-if-busy|automatic}: set auto-schedule mode")
     print("  --get-auto-schedule-mode: read auto-schedule mode")
     print("  --set-auto-accept-group=principal: set auto-accept-group")
     print("  --get-auto-accept-group: read auto-accept-group")
-    print("  --add {locations|resources|addresses} 'full name' [record name] [GUID]: add a principal")
+    print("  --add {locations|resources|addresses} full-name record-name UID: add a principal")
     print("  --remove: remove a principal")
     print("  --set-geo=url: set the geo: url for an address (e.g. geo:37.331741,-122.030333)")
     print("  --get-geo: get the geo: url for an address")
@@ -102,7 +101,6 @@
         sys.exit(0)
 
 
-
 class PrincipalService(WorkerService):
     """
     Executes principals-related functions in a context which has access to the store
@@ -118,33 +116,10 @@
         resource, directory, store, and whatever has been assigned to "params".
         """
         if self.function is not None:
-            rootResource = self.rootResource()
-            directory = rootResource.getDirectory()
-            yield self.function(rootResource, directory, self.store, *self.params)
+            yield self.function(self.store, *self.params)
 
-attrMap = {
-    'GeneratedUID' : { 'attr' : 'guid', },
-    'RealName' : { 'attr' : 'fullName', },
-    'RecordName' : { 'attr' : 'shortNames', },
-    'AutoSchedule' : { 'attr' : 'autoSchedule', },
-    'AutoAcceptGroup' : { 'attr' : 'autoAcceptGroup', },
 
-    'Comment' : { 'extras' : True, 'attr' : 'comment', },
-    'Description' : { 'extras' : True, 'attr' : 'description', },
-    'Type' : { 'extras' : True, 'attr' : 'type', },
 
-    # For "Locations", i.e. scheduled spaces
-    'Capacity' : { 'extras' : True, 'attr' : 'capacity', },
-    'Floor' : { 'extras' : True, 'attr' : 'floor', },
-    'AssociatedAddress' : { 'extras' : True, 'attr' : 'associatedAddress', },
-
-    # For "Addresses", i.e. nonscheduled areas containing Locations
-    'AbbreviatedName' : { 'extras' : True, 'attr' : 'abbreviatedName', },
-    'StreetAddress' : { 'extras' : True, 'attr' : 'streetAddress', },
-    'Geo' : { 'extras' : True, 'attr' : 'geo', },
-}
-
-
 def main():
     try:
         (optargs, args) = getopt(
@@ -156,15 +131,13 @@
                 "search=",
                 "list-principal-types",
                 "list-principals=",
-                "read-property=",
                 "list-read-proxies",
                 "list-write-proxies",
                 "list-proxies",
+                "list-proxy-for",
                 "add-read-proxy=",
                 "add-write-proxy=",
                 "remove-proxy=",
-                "set-auto-schedule=",
-                "get-auto-schedule",
                 "set-auto-schedule-mode=",
                 "get-auto-schedule-mode",
                 "set-auto-accept-group=",
@@ -193,6 +166,10 @@
     verbose = False
 
     for opt, arg in optargs:
+
+        # Args come in as encoded bytes
+        arg = arg.decode("utf-8")
+
         if opt in ("-h", "--help"):
             usage()
 
@@ -217,13 +194,6 @@
         elif opt in ("", "--search"):
             searchPrincipals = arg
 
-        elif opt in ("", "--read-property"):
-            try:
-                qname = decodeXMLName(arg)
-            except ValueError, e:
-                abort(e)
-            principalActions.append((action_readProperty, qname))
-
         elif opt in ("", "--list-read-proxies"):
             principalActions.append((action_listProxies, "read"))
 
@@ -233,6 +203,9 @@
         elif opt in ("-L", "--list-proxies"):
             principalActions.append((action_listProxies, "read", "write"))
 
+        elif opt in ("--list-proxy-for"):
+            principalActions.append((action_listProxyFor, "read", "write"))
+
         elif opt in ("--add-read-proxy", "--add-write-proxy"):
             if "read" in opt:
                 proxyType = "read"
@@ -240,38 +213,17 @@
                 proxyType = "write"
             else:
                 raise AssertionError("Unknown proxy type")
-
-            try:
-                principalForPrincipalID(arg, checkOnly=True)
-            except ValueError, e:
-                abort(e)
-
             principalActions.append((action_addProxy, proxyType, arg))
 
         elif opt in ("", "--remove-proxy"):
-            try:
-                principalForPrincipalID(arg, checkOnly=True)
-            except ValueError, e:
-                abort(e)
-
             principalActions.append((action_removeProxy, arg))
 
-        elif opt in ("", "--set-auto-schedule"):
-            try:
-                autoSchedule = booleanArgument(arg)
-            except ValueError, e:
-                abort(e)
-
-            principalActions.append((action_setAutoSchedule, autoSchedule))
-
-        elif opt in ("", "--get-auto-schedule"):
-            principalActions.append((action_getAutoSchedule,))
-
         elif opt in ("", "--set-auto-schedule-mode"):
             try:
                 if arg not in allowedAutoScheduleModes:
-                    raise ValueError("Unknown auto-schedule mode: %s" % (arg,))
-                autoScheduleMode = arg
+                    raise ValueError("Unknown auto-schedule mode: {mode}".format(
+                        mode=arg))
+                autoScheduleMode = allowedAutoScheduleModes[arg]
             except ValueError, e:
                 abort(e)
 
@@ -281,33 +233,28 @@
             principalActions.append((action_getAutoScheduleMode,))
 
         elif opt in ("", "--set-auto-accept-group"):
-            try:
-                principalForPrincipalID(arg, checkOnly=True)
-            except ValueError, e:
-                abort(e)
-
             principalActions.append((action_setAutoAcceptGroup, arg))
 
         elif opt in ("", "--get-auto-accept-group"):
             principalActions.append((action_getAutoAcceptGroup,))
 
         elif opt in ("", "--set-geo"):
-            principalActions.append((action_setValue, "Geo", arg))
+            principalActions.append((action_setValue, u"geographicLocation", arg))
 
         elif opt in ("", "--get-geo"):
-            principalActions.append((action_getValue, "Geo"))
+            principalActions.append((action_getValue, u"geographicLocation"))
 
         elif opt in ("", "--set-street-address"):
-            principalActions.append((action_setValue, "StreetAddress", arg))
+            principalActions.append((action_setValue, u"streetAddress", arg))
 
         elif opt in ("", "--get-street-address"):
-            principalActions.append((action_getValue, "StreetAddress"))
+            principalActions.append((action_getValue, u"streetAddress"))
 
         elif opt in ("", "--set-address"):
-            principalActions.append((action_setValue, "AssociatedAddress", arg))
+            principalActions.append((action_setValue, u"associatedAddress", arg))
 
         elif opt in ("", "--get-address"):
-            principalActions.append((action_getValue, "AssociatedAddress"))
+            principalActions.append((action_getValue, u"associatedAddress"))
 
         else:
             raise NotImplementedError(opt)
@@ -325,29 +272,41 @@
     elif addType:
 
         try:
-            addType = matchStrings(addType, ["locations", "resources", "addresses"])
+            addType = matchStrings(
+                addType,
+                [
+                    "locations", "resources", "addresses", "users", "groups"
+                ]
+            )
         except ValueError, e:
             print(e)
             return
 
         try:
-            fullName, shortName, guid = parseCreationArgs(args)
+            fullName, shortName, uid = parseCreationArgs(args)
         except ValueError, e:
             print(e)
             return
 
+        if fullName is not None:
+            fullNames = [fullName]
+        else:
+            fullNames = ()
+
         if shortName is not None:
             shortNames = [shortName]
         else:
             shortNames = ()
 
         function = runAddPrincipal
-        params = (addType, guid, shortNames, fullName)
+        params = (addType, uid, shortNames, fullNames)
 
     elif listPrincipals:
         try:
-            listPrincipals = matchStrings(listPrincipals, ["users", "groups",
-                "locations", "resources", "addresses"])
+            listPrincipals = matchStrings(
+                listPrincipals,
+                ["users", "groups", "locations", "resources", "addresses"]
+            )
         except ValueError, e:
             print(e)
             return
@@ -363,21 +322,12 @@
         params = (searchPrincipals,)
 
     else:
-        #
-        # Do a quick sanity check that arguments look like principal
-        # identifiers.
-        #
         if not args:
             usage("No principals specified.")
 
-        for arg in args:
-            try:
-                principalForPrincipalID(arg, checkOnly=True)
-            except ValueError, e:
-                abort(e)
-
+        unicodeArgs = [a.decode("utf-8") for a in args]
         function = runPrincipalActions
-        params = (args, principalActions)
+        params = (unicodeArgs, principalActions)
 
     PrincipalService.function = function
     PrincipalService.params = params
@@ -385,74 +335,86 @@
 
 
 
-def runListPrincipalTypes(service, rootResource, directory, store):
+def runListPrincipalTypes(service, store):
+    directory = store.directoryService()
     for recordType in directory.recordTypes():
-        print(recordType)
+        print(directory.recordTypeToOldName(recordType))
     return succeed(None)
 
 
 
-def runListPrincipals(service, rootResource, directory, store, listPrincipals):
+ at inlineCallbacks
+def runListPrincipals(service, store, listPrincipals):
+    directory = store.directoryService()
+    recordType = directory.oldNameToRecordType(listPrincipals)
     try:
-        records = list(directory.listRecords(listPrincipals))
+        records = list((yield directory.recordsWithRecordType(recordType)))
         if records:
             printRecordList(records)
         else:
             print("No records of type %s" % (listPrincipals,))
-    except UnknownRecordTypeError, e:
+    except InvalidDirectoryRecordError, e:
         usage(e)
-    return succeed(None)
+    returnValue(None)
 
 
 
 @inlineCallbacks
-def runPrincipalActions(service, rootResource, directory, store, principalIDs,
-    actions):
+def runPrincipalActions(service, store, principalIDs, actions):
+    directory = store.directoryService()
     for principalID in principalIDs:
-        # Resolve the given principal IDs to principals
+        # Resolve the given principal IDs to records
         try:
-            principal = principalForPrincipalID(principalID, directory=directory)
+            record = yield recordForPrincipalID(directory, principalID)
         except ValueError:
-            principal = None
+            record = None
 
-        if principal is None:
+        if record is None:
             sys.stderr.write("Invalid principal ID: %s\n" % (principalID,))
             continue
 
         # Performs requested actions
         for action in actions:
-            (yield action[0](rootResource, directory, store, principal,
-                *action[1:]))
+            (yield action[0](store, record, *action[1:]))
             print("")
 
 
 
 @inlineCallbacks
-def runSearch(service, rootResource, directory, store, searchTerm):
-
+def runSearch(service, store, searchTerm):
+    directory = store.directoryService()
     fields = []
-    for fieldName in ("fullName", "firstName", "lastName", "emailAddresses"):
+    for fieldName in ("fullNames", "emailAddresses"):
         fields.append((fieldName, searchTerm, True, "contains"))
 
     records = list((yield directory.recordsMatchingTokens(searchTerm.strip().split())))
     if records:
-        records.sort(key=operator.attrgetter('fullName'))
-        print("%d matches found:" % (len(records),))
+        records.sort(key=operator.attrgetter('fullNames'))
+        print("{n} matches found:".format(n=len(records)))
         for record in records:
-            print("\n%s (%s)" % (record.fullName,
-                {"users" : "User",
-                 "groups" : "Group",
-                 "locations" : "Place",
-                 "resources" : "Resource",
-                 "addresses" : "Address",
-                }.get(record.recordType),
-            ))
-            print("   GUID: %s" % (record.guid,))
-            print("   Record name(s): %s" % (", ".join(record.shortNames),))
-            if record.authIDs:
-                print("   Auth ID(s): %s" % (", ".join(record.authIDs),))
-            if record.emailAddresses:
-                print("   Email(s): %s" % (", ".join(record.emailAddresses),))
+            print(
+                "\n{d} ({rt})".format(
+                    d=record.displayName,
+                    rt=record.recordType.name
+                )
+            )
+            print("   UID: {u}".format(u=record.uid,))
+            print(
+                "   Record name{plural}: {names}".format(
+                    plural=("s" if len(record.shortNames) > 1 else ""),
+                    names=(", ".join(record.shortNames))
+                )
+            )
+            try:
+                if record.emailAddresses:
+                    print(
+                        "   Email{plural}: {emails}".format(
+                            plural=("s" if len(record.emailAddresses) > 1 else ""),
+                            emails=(", ".join(record.emailAddresses))
+                        )
+                    )
+            except AttributeError:
+                pass
     else:
         print("No matches found")
 
@@ -461,291 +423,346 @@
 
 
 @inlineCallbacks
-def runAddPrincipal(service, rootResource, directory, store, addType, guid,
-    shortNames, fullName):
-    try:
-        yield updateRecord(True, directory, addType, guid=guid,
-            shortNames=shortNames, fullName=fullName)
-        print("Added '%s'" % (fullName,))
-    except DirectoryError, e:
-        print(e)
+def runAddPrincipal(service, store, addType, uid, shortNames, fullNames):
+    directory = store.directoryService()
+    recordType = directory.oldNameToRecordType(addType)
 
+    # See if that UID is in use
+    record = yield directory.recordWithUID(uid)
+    if record is not None:
+        print("UID already in use: {uid}".format(uid=uid))
+        returnValue(None)
 
+    # See if the shortnames are in use
+    for shortName in shortNames:
+        record = yield directory.recordWithShortName(recordType, shortName)
+        if record is not None:
+            print("Record name already in use: {name}".format(name=shortName))
+            returnValue(None)
 
-def action_removePrincipal(rootResource, directory, store, principal):
-    record = principal.record
-    fullName = record.fullName
-    shortName = record.shortNames[0]
-    guid = record.guid
+    fields = {
+        directory.fieldName.recordType: recordType,
+        directory.fieldName.uid: uid,
+        directory.fieldName.shortNames: shortNames,
+        directory.fieldName.fullNames: fullNames,
+    }
+    record = DirectoryRecord(directory, fields)
+    yield record.service.updateRecords([record], create=True)
+    print("Added '{name}'".format(name=fullNames[0]))
 
-    directory.destroyRecord(record.recordType, guid=guid)
-    print("Removed '%s' %s %s" % (fullName, shortName, guid))
 
 
-
 @inlineCallbacks
-def action_readProperty(rootResource, directory, store, resource, qname):
-    property = (yield resource.readProperty(qname, None))
-    print("%r on %s:" % (encodeXMLName(*qname), resource))
-    print("")
-    print(property.toxml())
+def action_removePrincipal(store, record):
+    directory = store.directoryService()
+    fullName = record.displayName
+    shortNames = ",".join(record.shortNames)
 
+    yield directory.removeRecords([record.uid])
+    print(
+        "Removed '{full}' {shorts} {uid}".format(
+            full=fullName, shorts=shortNames, uid=record.uid
+        )
+    )
 
 
+
+
 @inlineCallbacks
-def action_listProxies(rootResource, directory, store, principal, *proxyTypes):
+def action_listProxies(store, record, *proxyTypes):
+    directory = store.directoryService()
     for proxyType in proxyTypes:
-        subPrincipal = proxySubprincipal(principal, proxyType)
-        if subPrincipal is None:
-            print("No %s proxies for %s" % (proxyType,
-                prettyPrincipal(principal)))
-            continue
 
-        membersProperty = (yield subPrincipal.readProperty(davxml.GroupMemberSet, None))
+        groupRecordType = {
+            "read": directory.recordType.readDelegateGroup,
+            "write": directory.recordType.writeDelegateGroup,
+        }.get(proxyType)
 
-        if membersProperty.children:
+        pseudoGroup = yield directory.recordWithShortName(
+            groupRecordType,
+            record.uid
+        )
+        proxies = yield pseudoGroup.members()
+        if proxies:
             print("%s proxies for %s:" % (
                 {"read": "Read-only", "write": "Read/write"}[proxyType],
-                prettyPrincipal(principal)
+                prettyRecord(record)
             ))
-            records = []
-            for member in membersProperty.children:
-                proxyPrincipal = principalForPrincipalID(str(member),
-                    directory=directory)
-                records.append(proxyPrincipal.record)
-
-            printRecordList(records)
-            print
+            printRecordList(proxies)
+            print("")
         else:
-            print("No %s proxies for %s" % (proxyType,
-                prettyPrincipal(principal)))
+            print("No %s proxies for %s" % (proxyType, prettyRecord(record)))
 
 
+ at inlineCallbacks
+def action_listProxyFor(store, record, *proxyTypes):
+    directory = store.directoryService()
+    for proxyType in proxyTypes:
 
+        groupRecordType = {
+            "read": directory.recordType.readDelegatorGroup,
+            "write": directory.recordType.writeDelegatorGroup,
+        }.get(proxyType)
+
+        pseudoGroup = yield directory.recordWithShortName(
+            groupRecordType,
+            record.uid
+        )
+        proxies = yield pseudoGroup.members()
+        if proxies:
+            print("%s is a %s proxy for:" % (
+                prettyRecord(record),
+                {"read": "Read-only", "write": "Read/write"}[proxyType]
+            ))
+            printRecordList(proxies)
+            print("")
+        else:
+            print(
+                "{r} is not a {t} proxy for anyone".format(
+                    r=prettyRecord(record),
+                    t={"read": "Read-only", "write": "Read/write"}[proxyType]
+                )
+            )
+
+
 @inlineCallbacks
-def action_addProxy(rootResource, directory, store, principal, proxyType, *proxyIDs):
+def _addRemoveProxy(msg, fn, store, record, proxyType, *proxyIDs):
+    directory = store.directoryService()
+    readWrite = (proxyType == "write")
     for proxyID in proxyIDs:
-        proxyPrincipal = principalForPrincipalID(proxyID, directory=directory)
-        if proxyPrincipal is None:
+        proxyRecord = yield recordForPrincipalID(directory, proxyID)
+        if proxyRecord is None:
             print("Invalid principal ID: %s" % (proxyID,))
         else:
-            (yield action_addProxyPrincipal(rootResource, directory, store,
-                principal, proxyType, proxyPrincipal))
+            txn = store.newTransaction()
+            yield fn(txn, record, proxyRecord, readWrite)
+            yield txn.commit()
+            print(
+                "{msg} {proxy} as a {proxyType} proxy for {record}".format(
+                    msg=msg, proxy=prettyRecord(proxyRecord),
+                    proxyType=proxyType, record=prettyRecord(record)
+                )
+            )
 
 
+ at inlineCallbacks
+def action_addProxy(store, record, proxyType, *proxyIDs):
+    yield _addRemoveProxy("Added", addDelegate, store, record, proxyType, *proxyIDs)
 
+
 @inlineCallbacks
-def setProxies(store, principal, readProxyPrincipals, writeProxyPrincipals, directory=None):
+def action_removeProxy(store, record, *proxyIDs):
+    # Write
+    yield _addRemoveProxy("Removed", removeDelegate, store, record, "write", *proxyIDs)
+    # Read
+    yield _addRemoveProxy("Removed", removeDelegate, store, record, "read", *proxyIDs)
+
+
+
+ at inlineCallbacks
+def setProxies(record, readProxyRecords, writeProxyRecords):
     """
-    Set read/write proxies en masse for a principal
-    @param principal: DirectoryPrincipalResource
-    @param readProxyPrincipals: a list of principal IDs (see principalForPrincipalID)
-    @param writeProxyPrincipals: a list of principal IDs (see principalForPrincipalID)
+    Set read/write proxies en masse for a record
+    @param record: L{IDirectoryRecord}
+    @param readProxyRecords: a list of records
+    @param writeProxyRecords: a list of records
     """
 
     proxyTypes = [
-        ("read", readProxyPrincipals),
-        ("write", writeProxyPrincipals),
+        (DelegateRecordType.readDelegateGroup, readProxyRecords),
+        (DelegateRecordType.writeDelegateGroup, writeProxyRecords),
     ]
-    for proxyType, proxyIDs in proxyTypes:
-        if proxyIDs is None:
+    for recordType, proxyRecords in proxyTypes:
+        if proxyRecords is None:
             continue
-        subPrincipal = proxySubprincipal(principal, proxyType)
-        if subPrincipal is None:
-            raise ProxyError("Unable to edit %s proxies for %s\n" % (proxyType,
-                prettyPrincipal(principal)))
-        memberURLs = []
-        for proxyID in proxyIDs:
-            proxyPrincipal = principalForPrincipalID(proxyID, directory=directory)
-            proxyURL = proxyPrincipal.url()
-            memberURLs.append(davxml.HRef(proxyURL))
-        membersProperty = davxml.GroupMemberSet(*memberURLs)
-        yield subPrincipal.writeProperty(membersProperty, None)
-        if store is not None:
-            # Schedule work the PeerConnectionPool will pick up as overdue
-            yield schedulePolledGroupCachingUpdate(store)
+        proxyGroup = yield record.service.recordWithShortName(
+            recordType, record.uid
+        )
+        yield proxyGroup.setMembers(proxyRecords)
 
+    # if store is not None:
+    #     # Schedule work the PeerConnectionPool will pick up as overdue
+    #     yield schedulePolledGroupCachingUpdate(store)
 
 
+
 @inlineCallbacks
-def getProxies(principal, directory=None):
+def getProxies(record):
     """
-    Returns a tuple containing the GUIDs for read proxies and write proxies
-    of the given principal
+    Returns a tuple containing the records for read proxies and write proxies
+    of the given record
     """
 
-    proxies = {
-        "read" : [],
-        "write" : [],
+    allProxies = {
+        DelegateRecordType.readDelegateGroup: [],
+        DelegateRecordType.writeDelegateGroup: [],
     }
-    for proxyType in proxies.iterkeys():
-        subPrincipal = proxySubprincipal(principal, proxyType)
-        if subPrincipal is not None:
-            membersProperty = (yield subPrincipal.readProperty(davxml.GroupMemberSet, None))
-            if membersProperty.children:
-                for member in membersProperty.children:
-                    proxyPrincipal = principalForPrincipalID(str(member), directory=directory)
-                    proxies[proxyType].append(proxyPrincipal.record.guid)
+    for recordType in allProxies.iterkeys():
+        proxyGroup = yield record.service.recordWithShortName(
+            recordType, record.uid
+        )
+        allProxies[recordType] = yield proxyGroup.members()
 
-    returnValue((proxies['read'], proxies['write']))
+    returnValue(
+        (
+            allProxies[DelegateRecordType.readDelegateGroup],
+            allProxies[DelegateRecordType.writeDelegateGroup]
+        )
+    )
 
 
 
- at inlineCallbacks
-def action_removeProxy(rootResource, directory, store, principal, *proxyIDs, **kwargs):
-    for proxyID in proxyIDs:
-        proxyPrincipal = principalForPrincipalID(proxyID, directory=directory)
-        if proxyPrincipal is None:
-            print("Invalid principal ID: %s" % (proxyID,))
-        else:
-            (yield action_removeProxyPrincipal(rootResource, directory, store,
-                principal, proxyPrincipal, **kwargs))
 
 
+def action_getAutoScheduleMode(store, record):
+    print(
+        "Auto-schedule mode for {record} is {mode}".format(
+            record=prettyRecord(record),
+            mode=(
+                record.autoScheduleMode.description if record.autoScheduleMode
+                else "Default"
+            )
+        )
+    )
 
+
 @inlineCallbacks
-def action_setAutoSchedule(rootResource, directory, store, principal, autoSchedule):
-    if principal.record.recordType == "groups":
-        print("Enabling auto-schedule for %s is not allowed." % (principal,))
+def action_setAutoScheduleMode(store, record, autoScheduleMode):
+    if record.recordType == RecordType.group:
+        print(
+            "Setting auto-schedule-mode for {record} is not allowed.".format(
+                record=prettyRecord(record)
+            )
+        )
 
-    elif principal.record.recordType == "users" and not config.Scheduling.Options.AutoSchedule.AllowUsers:
-        print("Enabling auto-schedule for %s is not allowed." % (principal,))
+    elif (
+        record.recordType == RecordType.user and
+        not config.Scheduling.Options.AutoSchedule.AllowUsers
+    ):
+        print(
+            "Setting auto-schedule-mode for {record} is not allowed.".format(
+                record=prettyRecord(record)
+            )
+        )
 
     else:
-        print("Setting auto-schedule to %s for %s" % (
-            {True: "true", False: "false"}[autoSchedule],
-            prettyPrincipal(principal),
-        ))
+        print(
+            "Setting auto-schedule-mode to {mode} for {record}".format(
+                mode=autoScheduleMode.description,
+                record=prettyRecord(record),
+            )
+        )
 
-        (yield updateRecord(False, directory,
-            principal.record.recordType,
-            guid=principal.record.guid,
-            shortNames=principal.record.shortNames,
-            fullName=principal.record.fullName,
-            autoSchedule=autoSchedule,
-            **principal.record.extras
-        ))
+        # Get original fields
+        newFields = record.fields.copy()
 
+        # Set new values
+        newFields[record.service.fieldName.autoScheduleMode] = autoScheduleMode
 
+        updatedRecord = DirectoryRecord(record.service, newFields)
+        yield record.service.updateRecords([updatedRecord], create=False)
 
-def action_getAutoSchedule(rootResource, directory, store, principal):
-    autoSchedule = principal.getAutoSchedule()
-    print("Auto-schedule for %s is %s" % (
-        prettyPrincipal(principal),
-        {True: "true", False: "false"}[autoSchedule],
-    ))
 
-
-
 @inlineCallbacks
-def action_setAutoScheduleMode(rootResource, directory, store, principal, autoScheduleMode):
-    if principal.record.recordType == "groups":
-        print("Setting auto-schedule mode for %s is not allowed." % (principal,))
+def action_setAutoAcceptGroup(store, record, autoAcceptGroup):
+    if record.recordType == RecordType.group:
+        print(
+            "Setting auto-accept-group for {record} is not allowed.".format(
+                record=prettyRecord(record)
+            )
+        )
 
-    elif principal.record.recordType == "users" and not config.Scheduling.Options.AutoSchedule.AllowUsers:
-        print("Setting auto-schedule mode for %s is not allowed." % (principal,))
+    elif (
+        record.recordType == RecordType.user and
+        not config.Scheduling.Options.AutoSchedule.AllowUsers
+    ):
+        print(
+            "Setting auto-accept-group for {record} is not allowed.".format(
+                record=prettyRecord(record)
+            )
+        )
 
     else:
-        print("Setting auto-schedule mode to %s for %s" % (
-            autoScheduleMode,
-            prettyPrincipal(principal),
-        ))
+        groupRecord = yield recordForPrincipalID(record.service, autoAcceptGroup)
+        if groupRecord is None or groupRecord.recordType != RecordType.group:
+            print("Invalid principal ID: {id}".format(id=autoAcceptGroup))
+        else:
+            print("Setting auto-accept-group to {group} for {record}".format(
+                group=prettyRecord(groupRecord),
+                record=prettyRecord(record),
+            ))
 
-        (yield updateRecord(False, directory,
-            principal.record.recordType,
-            guid=principal.record.guid,
-            shortNames=principal.record.shortNames,
-            fullName=principal.record.fullName,
-            autoScheduleMode=autoScheduleMode,
-            **principal.record.extras
-        ))
+            # Get original fields
+            newFields = record.fields.copy()
 
+            # Set new values
+            newFields[record.service.fieldName.autoAcceptGroup] = groupRecord.uid
 
+            updatedRecord = DirectoryRecord(record.service, newFields)
+            yield record.service.updateRecords([updatedRecord], create=False)
 
-def action_getAutoScheduleMode(rootResource, directory, store, principal):
-    autoScheduleMode = principal.getAutoScheduleMode()
-    if not autoScheduleMode:
-        autoScheduleMode = "automatic"
-    print("Auto-schedule mode for %s is %s" % (
-        prettyPrincipal(principal),
-        autoScheduleMode,
-    ))
 
 
-
 @inlineCallbacks
-def action_setAutoAcceptGroup(rootResource, directory, store, principal, autoAcceptGroup):
-    if principal.record.recordType == "groups":
-        print("Setting auto-accept-group for %s is not allowed." % (principal,))
-
-    elif principal.record.recordType == "users" and not config.Scheduling.Options.AutoSchedule.AllowUsers:
-        print("Setting auto-accept-group for %s is not allowed." % (principal,))
-
-    else:
-        groupPrincipal = principalForPrincipalID(autoAcceptGroup, directory=directory)
-        if groupPrincipal is None or groupPrincipal.record.recordType != "groups":
-            print("Invalid principal ID: %s" % (autoAcceptGroup,))
+def action_getAutoAcceptGroup(store, record):
+    if record.autoAcceptGroup:
+        groupRecord = yield record.service.recordWithUID(
+            record.autoAcceptGroup
+        )
+        if groupRecord is not None:
+            print(
+                "Auto-accept-group for {record} is {group}".format(
+                    record=prettyRecord(record),
+                    group=prettyRecord(groupRecord),
+                )
+            )
         else:
-            print("Setting auto-accept-group to %s for %s" % (
-                prettyPrincipal(groupPrincipal),
-                prettyPrincipal(principal),
-            ))
-
-            (yield updateRecord(False, directory,
-                principal.record.recordType,
-                guid=principal.record.guid,
-                shortNames=principal.record.shortNames,
-                fullName=principal.record.fullName,
-                autoAcceptGroup=groupPrincipal.record.guid,
-                **principal.record.extras
-            ))
-
-
-
-def action_getAutoAcceptGroup(rootResource, directory, store, principal):
-    autoAcceptGroup = principal.getAutoAcceptGroup()
-    if autoAcceptGroup:
-        record = directory.recordWithGUID(autoAcceptGroup)
-        if record is not None:
-            groupPrincipal = directory.principalCollection.principalForUID(record.uid)
-            if groupPrincipal is not None:
-                print("Auto-accept-group for %s is %s" % (
-                    prettyPrincipal(principal),
-                    prettyPrincipal(groupPrincipal),
-                ))
-                return
-        print("Invalid auto-accept-group assigned: %s" % (autoAcceptGroup,))
+            print(
+                "Invalid auto-accept-group assigned: {uid}".format(
+                    uid=record.autoAcceptGroup
+                )
+            )
     else:
-        print("No auto-accept-group assigned to %s" % (prettyPrincipal(principal),))
+        print(
+            "No auto-accept-group assigned to {record}".format(
+                record=prettyRecord(record)
+            )
+        )
 
 
-
 @inlineCallbacks
-def action_setValue(rootResource, directory, store, principal, name, value):
-    print("Setting %s to %s for %s" % (
-        name, value, prettyPrincipal(principal),
-    ))
+def action_setValue(store, record, name, value):
+    print(
+        "Setting {name} to {value} for {record}".format(
+            name=name, value=value, record=prettyRecord(record),
+        )
+    )
+    # Get original fields
+    newFields = record.fields.copy()
 
-    principal.record.extras[attrMap[name]["attr"]] = value
-    (yield updateRecord(False, directory,
-        principal.record.recordType,
-        guid=principal.record.guid,
-        shortNames=principal.record.shortNames,
-        fullName=principal.record.fullName,
-        **principal.record.extras
-    ))
+    # Set new value
+    newFields[record.service.fieldName.lookupByName(name)] = value
 
+    updatedRecord = DirectoryRecord(record.service, newFields)
+    yield record.service.updateRecords([updatedRecord], create=False)
 
 
-def action_getValue(rootResource, directory, store, principal, name):
-    print("%s for %s is %s" % (
-        name,
-        prettyPrincipal(principal),
-        principal.record.extras[attrMap[name]["attr"]]
-    ))
+def action_getValue(store, record, name):
+    try:
+        value = record.fields[record.service.fieldName.lookupByName(name)]
+        print(
+            "{name} for {record} is {value}".format(
+                name=name, record=prettyRecord(record), value=value
+            )
+        )
+    except KeyError:
+        print(
+            "{name} is not set for {record}".format(
+                name=name, record=prettyRecord(record),
+            )
+        )
 
 
-
 def abort(msg, status=1):
     sys.stdout.write("%s\n" % (msg,))
     try:
@@ -758,29 +775,23 @@
 
 def parseCreationArgs(args):
     """
-    Look at the command line arguments for --add, and figure out which
-    one is the shortName and which one is the guid by attempting to make a
-    UUID object out of them.
+    Look at the command line arguments for --add, and simply assume the first
+    is full name, the second is short name, and the third is uid.  We can make
+    this fancier later.
     """
 
-    fullName = args[0]
-    shortName = None
-    guid = None
-    for arg in args[1:]:
-        if isUUID(arg):
-            if guid is not None:
-                # Both the 2nd and 3rd args are UUIDs.  The first one
-                # should be used for shortName.
-                shortName = guid
-            guid = arg
-        else:
-            shortName = arg
+    if len(args) != 3:
+        print(
+            "When adding a principal, you must provide full-name, record-name, "
+            "and UID"
+        )
+        sys.exit(64)
 
-    if len(args) == 3 and guid is None:
-        # both shortName and guid were specified but neither was a UUID
-        raise ValueError("Invalid value for guid")
+    fullName = args[0].decode("utf-8")
+    shortName = args[1].decode("utf-8")
+    uid = args[2].decode("utf-8")
 
-    return fullName, shortName, guid
+    return fullName, shortName, uid
 
 
 
@@ -803,95 +814,20 @@
 
 
 def printRecordList(records):
-    results = [(record.fullName, record.shortNames[0], record.guid)
-        for record in records]
+    results = [
+        (record.displayName, record.recordType.name, record.uid, record.shortNames)
+        for record in records
+    ]
     results.sort()
-    format = "%-22s %-17s %s"
-    print(format % ("Full name", "Record name", "UUID"))
-    print(format % ("---------", "-----------", "----"))
-    for fullName, shortName, guid in results:
-        print(format % (fullName, shortName, guid))
+    format = "%-22s %-10s %-20s %s"
+    print(format % ("Full name", "Type", "UID", "Short names"))
+    print(format % ("---------", "----", "---", "-----------"))
+    for fullName, recordType, uid, shortNames in results:
+        print(format % (fullName, recordType, uid, u", ".join(shortNames)))
 
 
 
- at inlineCallbacks
-def updateRecord(create, directory, recordType, **kwargs):
-    """
-    Create/update a record, including the extra work required to set the
-    autoSchedule bit in the augment record.
 
-    If C{create} is true, the record is created, otherwise update the record
-    matching the guid in kwargs.
-    """
 
-    assignAutoSchedule = False
-    if "autoSchedule" in kwargs:
-        assignAutoSchedule = True
-        autoSchedule = kwargs["autoSchedule"]
-        del kwargs["autoSchedule"]
-    elif create:
-        assignAutoSchedule = True
-        autoSchedule = recordType in ("locations", "resources")
-
-    assignAutoScheduleMode = False
-    if "autoScheduleMode" in kwargs:
-        assignAutoScheduleMode = True
-        autoScheduleMode = kwargs["autoScheduleMode"]
-        del kwargs["autoScheduleMode"]
-    elif create:
-        assignAutoScheduleMode = True
-        autoScheduleMode = None
-
-    assignAutoAcceptGroup = False
-    if "autoAcceptGroup" in kwargs:
-        assignAutoAcceptGroup = True
-        autoAcceptGroup = kwargs["autoAcceptGroup"]
-        del kwargs["autoAcceptGroup"]
-    elif create:
-        assignAutoAcceptGroup = True
-        autoAcceptGroup = None
-
-    for key, value in kwargs.items():
-        if isinstance(value, unicode):
-            kwargs[key] = value.encode("utf-8")
-        elif isinstance(value, list):
-            newValue = [v.encode("utf-8") for v in value]
-            kwargs[key] = newValue
-
-    if create:
-        record = directory.createRecord(recordType, **kwargs)
-        kwargs['guid'] = record.guid
-    else:
-        try:
-            record = directory.updateRecord(recordType, **kwargs)
-        except NotImplementedError:
-            # Updating of directory information is not supported by underlying
-            # directory implementation, but allow augment information to be
-            # updated
-            record = directory.recordWithGUID(kwargs["guid"])
-            pass
-
-    augmentService = directory.serviceForRecordType(recordType).augmentService
-    augmentRecord = (yield augmentService.getAugmentRecord(kwargs['guid'], recordType))
-
-    if assignAutoSchedule:
-        augmentRecord.autoSchedule = autoSchedule
-    if assignAutoScheduleMode:
-        augmentRecord.autoScheduleMode = autoScheduleMode
-    if assignAutoAcceptGroup:
-        augmentRecord.autoAcceptGroup = autoAcceptGroup
-    (yield augmentService.addAugmentRecords([augmentRecord]))
-    try:
-        directory.updateRecord(recordType, **kwargs)
-    except NotImplementedError:
-        # Updating of directory information is not supported by underlying
-        # directory implementation, but allow augment information to be
-        # updated
-        pass
-
-    returnValue(record)
-
-
-
 if __name__ == "__main__":
     main()

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/purge.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/purge.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/purge.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -19,7 +19,6 @@
 
 from calendarserver.tools import tables
 from calendarserver.tools.cmdline import utilityMain, WorkerService
-from calendarserver.tools.util import removeProxy
 
 from getopt import getopt, GetoptError
 
@@ -30,10 +29,9 @@
 from twisted.internet.defer import inlineCallbacks, returnValue
 
 from twistedcaldav import caldavxml
-from twistedcaldav.directory.directory import DirectoryRecord
+# from twistedcaldav.directory.directory import DirectoryRecord
 
 from txdav.caldav.datastore.query.filter import Filter
-from txdav.xml import element as davxml
 
 
 import collections
@@ -170,7 +168,7 @@
         service.batchSize = batchSize
         service.dryrun = dryrun
         service.verbose = verbose
-        result = (yield service.doWork())
+        result = yield service.doWork()
         returnValue(result)
 
 
@@ -181,7 +179,7 @@
             if self.verbose:
                 print("(Dry run) Searching for old events...")
             txn = self.store.newTransaction(label="Find old events")
-            oldEvents = (yield txn.eventsOlderThan(self.cutoff))
+            oldEvents = yield txn.eventsOlderThan(self.cutoff)
             eventCount = len(oldEvents)
             if self.verbose:
                 if eventCount == 0:
@@ -199,8 +197,8 @@
         totalRemoved = 0
         while numEventsRemoved:
             txn = self.store.newTransaction(label="Remove old events")
-            numEventsRemoved = (yield txn.removeOldEvents(self.cutoff, batchSize=self.batchSize))
-            (yield txn.commit())
+            numEventsRemoved = yield txn.removeOldEvents(self.cutoff, batchSize=self.batchSize)
+            yield txn.commit()
             if numEventsRemoved:
                 totalRemoved += numEventsRemoved
                 if self.verbose:
@@ -360,7 +358,7 @@
         service.batchSize = limit
         service.dryrun = dryrun
         service.verbose = verbose
-        result = (yield service.doWork())
+        result = yield service.doWork()
         returnValue(result)
 
 
@@ -368,20 +366,20 @@
     def doWork(self):
 
         if self.dryrun:
-            orphans = (yield self._orphansDryRun())
+            orphans = yield self._orphansDryRun()
             if self.cutoff is not None:
-                dropbox = (yield self._dropboxDryRun())
-                managed = (yield self._managedDryRun())
+                dropbox = yield self._dropboxDryRun()
+                managed = yield self._managedDryRun()
             else:
                 dropbox = ()
                 managed = ()
 
             returnValue(self._dryRunSummary(orphans, dropbox, managed))
         else:
-            total = (yield self._orphansPurge())
+            total = yield self._orphansPurge()
             if self.cutoff is not None:
-                total += (yield self._dropboxPurge())
-                total += (yield self._managedPurge())
+                total += yield self._dropboxPurge()
+                total += yield self._managedPurge()
             returnValue(total)
 
 
@@ -391,7 +389,7 @@
         if self.verbose:
             print("(Dry run) Searching for orphaned attachments...")
         txn = self.store.newTransaction(label="Find orphaned attachments")
-        orphans = (yield txn.orphanedAttachments(self.uuid))
+        orphans = yield txn.orphanedAttachments(self.uuid)
         returnValue(orphans)
 
 
@@ -401,7 +399,7 @@
         if self.verbose:
             print("(Dry run) Searching for old dropbox attachments...")
         txn = self.store.newTransaction(label="Find old dropbox attachments")
-        cutoffs = (yield txn.oldDropboxAttachments(self.cutoff, self.uuid))
+        cutoffs = yield txn.oldDropboxAttachments(self.cutoff, self.uuid)
         yield txn.commit()
 
         returnValue(cutoffs)
@@ -413,7 +411,7 @@
         if self.verbose:
             print("(Dry run) Searching for old managed attachments...")
         txn = self.store.newTransaction(label="Find old managed attachments")
-        cutoffs = (yield txn.oldManagedAttachments(self.cutoff, self.uuid))
+        cutoffs = yield txn.oldManagedAttachments(self.cutoff, self.uuid)
         yield txn.commit()
 
         returnValue(cutoffs)
@@ -495,7 +493,7 @@
         totalRemoved = 0
         while numOrphansRemoved:
             txn = self.store.newTransaction(label="Remove orphaned attachments")
-            numOrphansRemoved = (yield txn.removeOrphanedAttachments(self.uuid, batchSize=self.batchSize))
+            numOrphansRemoved = yield txn.removeOrphanedAttachments(self.uuid, batchSize=self.batchSize)
             yield txn.commit()
             if numOrphansRemoved:
                 totalRemoved += numOrphansRemoved
@@ -526,7 +524,7 @@
         totalRemoved = 0
         while numOldRemoved:
             txn = self.store.newTransaction(label="Remove old dropbox attachments")
-            numOldRemoved = (yield txn.removeOldDropboxAttachments(self.cutoff, self.uuid, batchSize=self.batchSize))
+            numOldRemoved = yield txn.removeOldDropboxAttachments(self.cutoff, self.uuid, batchSize=self.batchSize)
             yield txn.commit()
             if numOldRemoved:
                 totalRemoved += numOldRemoved
@@ -557,7 +555,7 @@
         totalRemoved = 0
         while numOldRemoved:
             txn = self.store.newTransaction(label="Remove old managed attachments")
-            numOldRemoved = (yield txn.removeOldManagedAttachments(self.cutoff, self.uuid, batchSize=self.batchSize))
+            numOldRemoved = yield txn.removeOldManagedAttachments(self.cutoff, self.uuid, batchSize=self.batchSize)
             yield txn.commit()
             if numOldRemoved:
                 totalRemoved += numOldRemoved
@@ -697,7 +695,7 @@
         service.doimplicit = doimplicit
         service.proxies = proxies
         service.when = when
-        result = (yield service.doWork())
+        result = yield service.doWork()
         returnValue(result)
 
 
@@ -711,10 +709,8 @@
 
         total = 0
 
-        allAssignments = {}
-
         for uid in self.uids:
-            count, allAssignments[uid] = (yield self._purgeUID(uid))
+            count = yield self._purgeUID(uid)
             total += count
 
         if self.verbose:
@@ -724,7 +720,7 @@
             else:
                 print("Modified or deleted %s" % (amount,))
 
-        returnValue((total, allAssignments,))
+        returnValue(total)
 
 
     @inlineCallbacks
@@ -734,30 +730,26 @@
             self.when = DateTime.getNowUTC()
 
         # Does the record exist?
-        record = self.directory.recordWithUID(uid)
-        if record is None:
+        record = yield self.directory.recordWithUID(uid)
+        # if record is None:
             # The user has already been removed from the directory service.  We
             # need to fashion a temporary, fake record
 
             # FIXME: probably want a more elegant way to accomplish this,
             # since it requires the aggregate directory to examine these first:
-            record = DirectoryRecord(self.directory, "users", uid, shortNames=(uid,), enabledForCalendaring=True)
-            self.directory._tmpRecords["shortNames"][uid] = record
-            self.directory._tmpRecords["uids"][uid] = record
+            # record = DirectoryRecord(self.directory, "users", uid, shortNames=(uid,), enabledForCalendaring=True)
+            # self.directory._tmpRecords["shortNames"][uid] = record
+            # self.directory._tmpRecords["uids"][uid] = record
 
         # Override augments settings for this record
-        record.enabled = True
-        record.enabledForCalendaring = True
-        record.enabledForAddressBooks = True
+        record.hasCalendars = True
+        record.hasContacts = True
 
-        cua = "urn:uuid:%s" % (uid,)
+        cua = record.canonicalCalendarUserAddress()
 
-        principalCollection = self.directory.principalCollection
-        principal = principalCollection.principalForRecord(record)
-
         # See if calendar home is provisioned
         txn = self.store.newTransaction()
-        storeCalHome = (yield txn.calendarHomeWithUID(uid))
+        storeCalHome = yield txn.calendarHomeWithUID(uid)
         calHomeProvisioned = storeCalHome is not None
 
         # If in "completely" mode, unshare collections, remove notifications
@@ -767,24 +759,23 @@
         yield txn.commit()
 
         count = 0
-        assignments = []
 
         if calHomeProvisioned:
-            count = (yield self._cancelEvents(txn, uid, cua))
+            count = yield self._cancelEvents(txn, uid, cua)
 
         # Remove empty calendar collections (and calendar home if no more
         # calendars)
         yield self._removeCalendarHome(uid)
 
         # Remove VCards
-        count += (yield self._removeAddressbookHome(uid))
+        count += yield self._removeAddressbookHome(uid)
 
         if self.proxies and not self.dryrun:
             if self.verbose:
                 print("Deleting any proxy assignments")
-            assignments = (yield self._purgeProxyAssignments(principal))
+            yield self._purgeProxyAssignments(self.store, record)
 
-        returnValue((count, assignments))
+        returnValue(count)
 
 
     @inlineCallbacks
@@ -799,13 +790,13 @@
                 else:
                     print("Unsharing: %s" % (child.name(),))
             if not self.dryrun:
-                (yield child.unshare())
+                yield child.unshare()
 
         if not self.dryrun:
-            (yield storeCalHome.removeUnacceptedShares())
-            notificationHome = (yield txn.notificationsWithUID(storeCalHome.uid()))
+            yield storeCalHome.removeUnacceptedShares()
+            notificationHome = yield txn.notificationsWithUID(storeCalHome.uid())
             if notificationHome is not None:
-                (yield notificationHome.remove())
+                yield notificationHome.remove()
 
 
     @inlineCallbacks
@@ -826,15 +817,15 @@
 
         count = 0
         txn = self.store.newTransaction()
-        storeCalHome = (yield txn.calendarHomeWithUID(uid))
-        calendarNames = (yield storeCalHome.listCalendars())
+        storeCalHome = yield txn.calendarHomeWithUID(uid)
+        calendarNames = yield storeCalHome.listCalendars()
         yield txn.commit()
 
         for calendarName in calendarNames:
 
             txn = self.store.newTransaction(authz_uid=uid)
-            storeCalHome = (yield txn.calendarHomeWithUID(uid))
-            calendar = (yield storeCalHome.calendarWithName(calendarName))
+            storeCalHome = yield txn.calendarHomeWithUID(uid)
+            calendar = yield storeCalHome.calendarWithName(calendarName)
             childNames = []
 
             if self.completely:
@@ -850,17 +841,17 @@
             for childName in childNames:
 
                 txn = self.store.newTransaction(authz_uid=uid)
-                storeCalHome = (yield txn.calendarHomeWithUID(uid))
-                calendar = (yield storeCalHome.calendarWithName(calendarName))
+                storeCalHome = yield txn.calendarHomeWithUID(uid)
+                calendar = yield storeCalHome.calendarWithName(calendarName)
 
                 try:
-                    childResource = (yield calendar.calendarObjectWithName(childName))
+                    childResource = yield calendar.calendarObjectWithName(childName)
 
                     # Always delete inbox items
                     if self.completely or calendar.isInbox():
                         action = self.CANCELEVENT_SHOULD_DELETE
                     else:
-                        event = (yield childResource.componentForUser())
+                        event = yield childResource.componentForUser()
                         action = self._cancelEvent(event, self.when, cua)
 
                     uri = "/calendars/__uids__/%s/%s/%s" % (storeCalHome.uid(), calendar.name(), childName)
@@ -921,7 +912,7 @@
 
             # Remove empty calendar collections (and calendar home if no more
             # calendars)
-            storeCalHome = (yield txn.calendarHomeWithUID(uid))
+            storeCalHome = yield txn.calendarHomeWithUID(uid)
             if storeCalHome is not None:
                 calendars = list((yield storeCalHome.calendars()))
                 remainingCalendars = len(calendars)
@@ -947,7 +938,7 @@
                         else:
                             print("Deleting calendar home")
                     if not self.dryrun:
-                        (yield storeCalHome.remove())
+                        yield storeCalHome.remove()
 
             # Commit
             yield txn.commit()
@@ -966,7 +957,7 @@
 
         try:
             # Remove VCards
-            storeAbHome = (yield txn.addressbookHomeWithUID(uid))
+            storeAbHome = yield txn.addressbookHomeWithUID(uid)
             if storeAbHome is not None:
                 for abColl in list((yield storeAbHome.addressbooks())):
                     for card in list((yield abColl.addressbookObjects())):
@@ -978,7 +969,7 @@
                             else:
                                 print("Deleting: %s" % (uri,))
                         if not self.dryrun:
-                            (yield card.remove())
+                            yield card.remove()
                         count += 1
                     abName = abColl.name()
                     if self.verbose:
@@ -988,10 +979,14 @@
                             print("Deleting addressbook: %s" % (abName,))
                     if not self.dryrun:
                         # Also remove the addressbook collection itself
+<<<<<<< .working
                         if abColl.owned():
                             yield storeAbHome.removeChildWithName(abName)
                         else:
                             yield abColl.unshare()
+=======
+                        yield storeAbHome.removeChildWithName(abColl.name())
+>>>>>>> .merge-right.r13157
 
                 if self.verbose:
                     if self.dryrun:
@@ -999,7 +994,7 @@
                     else:
                         print("Deleting addressbook home")
                 if not self.dryrun:
-                    (yield storeAbHome.remove())
+                    yield storeAbHome.remove()
 
             # Commit
             yield txn.commit()
@@ -1113,22 +1108,10 @@
 
 
     @inlineCallbacks
-    def _purgeProxyAssignments(self, principal):
+    def _purgeProxyAssignments(self, store, record):
 
-        assignments = []
-
-        for proxyType in ("read", "write"):
-
-            proxyFor = (yield principal.proxyFor(proxyType == "write"))
-            for other in proxyFor:
-                assignments.append((principal.record.uid, proxyType, other.record.uid))
-                (yield removeProxy(self.root, self.directory, self.store, other, principal))
-
-            subPrincipal = principal.getChild("calendar-proxy-" + proxyType)
-            proxies = (yield subPrincipal.readProperty(davxml.GroupMemberSet, None))
-            for other in proxies.children:
-                assignments.append((str(other).split("/")[3], proxyType, principal.record.uid))
-
-            (yield subPrincipal.writeProperty(davxml.GroupMemberSet(), None))
-
-        returnValue(assignments)
+        txn = store.newTransaction()
+        for readWrite in (True, False):
+            yield txn.removeDelegates(record.uid, readWrite)
+            yield txn.removeDelegateGroupss(record.uid, readWrite)
+        yield txn.commit()

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/push.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/push.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/push.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -20,6 +20,7 @@
 from argparse import ArgumentParser
 from twext.python.log import Logger
 from twisted.internet.defer import inlineCallbacks
+from twext.who.idirectory import RecordType
 import time
 
 log = Logger()
@@ -59,7 +60,7 @@
 def displayAPNSubscriptions(store, directory, root, users):
     for user in users:
         print
-        record = directory.recordWithShortName("users", user)
+        record = yield directory.recordWithShortName(RecordType.user, user)
         if record is not None:
             print("User %s (%s)..." % (user, record.uid))
             txn = store.newTransaction(label="Display APN Subscriptions")
@@ -81,7 +82,7 @@
                     else:
                         uid = path
                         collection = None
-                    record = directory.recordWithUID(uid)
+                    record = yield directory.recordWithUID(uid)
                     user = record.shortNames[0]
                     if collection:
                         print("...is subscribed to a share from %s's %s home" % (user, resource),)

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/resources.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/resources.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/resources.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -15,38 +15,36 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ##
+
 from __future__ import print_function
 
+__all__ = [
+    "migrateResources",
+]
+
+from getopt import getopt, GetoptError
+from grp import getgrnam
 import os
+from pwd import getpwnam
 import sys
-from grp import getgrnam
-from pwd import getpwnam
-from getopt import getopt, GetoptError
 
+from calendarserver.tools.util import (
+    loadConfig, setupMemcached, checkDirectory
+)
+from twext.python.log import Logger, StandardIOObserver
 from twisted.internet import reactor
 from twisted.internet.defer import inlineCallbacks
 from twisted.python.util import switchUID
-
-from twext.python.log import Logger, StandardIOObserver
-
 from twistedcaldav.config import config, ConfigurationError
 from twistedcaldav.directory.appleopendirectory import OpenDirectoryService
 from twistedcaldav.directory.directory import DirectoryService, DirectoryError
 from twistedcaldav.directory.xmlfile import XMLDirectoryService
+from txdav.who.util import directoryFromConfig
 
-from calendarserver.platform.darwin.od import dsattributes
-from calendarserver.tools.util import loadConfig, getDirectory, setupMemcached, checkDirectory
-
 log = Logger()
 
 
 
-__all__ = [
-    "migrateResources",
-]
-
-
-
 def usage():
 
     name = os.path.basename(sys.argv[0])
@@ -141,28 +139,37 @@
         os.umask(config.umask)
 
         # Configure memcached client settings prior to setting up resource
-        # hierarchy (in getDirectory)
+        # hierarchy
         setupMemcached(config)
 
         try:
-            config.directory = getDirectory()
+            config.directory = directoryFromConfig(config)
         except DirectoryError, e:
             abort(e)
 
     except ConfigurationError, e:
         abort(e)
 
+    # FIXME: this all has to change:
     # Find the opendirectory service
     userService = config.directory.serviceForRecordType("users")
     resourceService = config.directory.serviceForRecordType("resources")
-    if (not isinstance(userService, OpenDirectoryService) or
-        not isinstance(resourceService, XMLDirectoryService)):
-        abort("This script only migrates resources and locations from OpenDirectory to XML; this calendar server does not have such a configuration.")
+    if (
+        not isinstance(userService, OpenDirectoryService) or
+        not isinstance(resourceService, XMLDirectoryService)
+    ):
+        abort(
+            "This script only migrates resources and locations from "
+            "OpenDirectory to XML; this calendar server does not have such a "
+            "configuration."
+        )
 
     #
     # Start the reactor
     #
-    reactor.callLater(0, migrate, userService, resourceService, verbose=verbose)
+    reactor.callLater(
+        0, migrate, userService, resourceService, verbose=verbose
+    )
     reactor.run()
 
 
@@ -186,8 +193,8 @@
     """
 
     attrs = [
-        dsattributes.kDS1AttrGeneratedUID,
-        dsattributes.kDS1AttrDistinguishedName,
+        "dsAttrTypeStandard:GeneratedUID",
+        "dsAttrTypeStandard:RealName",
     ]
 
     if verbose:
@@ -207,24 +214,26 @@
 
 
 @inlineCallbacks
-def migrateResources(sourceService, destService, autoSchedules=None,
-    queryMethod=queryForType, verbose=False):
+def migrateResources(
+    sourceService, destService, autoSchedules=None,
+    queryMethod=queryForType, verbose=False
+):
 
     directoryRecords = []
     augmentRecords = []
 
     for recordTypeOD, recordType in (
-        (dsattributes.kDSStdRecordTypeResources, DirectoryService.recordType_resources),
-        (dsattributes.kDSStdRecordTypePlaces, DirectoryService.recordType_locations),
+        ("dsRecTypeStandard:Resources", DirectoryService.recordType_resources),
+        ("dsRecTypeStandard:Places", DirectoryService.recordType_locations),
     ):
         data = queryMethod(sourceService, recordTypeOD, verbose=verbose)
         for recordName, val in data:
-            guid = val.get(dsattributes.kDS1AttrGeneratedUID, None)
-            fullName = val.get(dsattributes.kDS1AttrDistinguishedName, None)
+            guid = val.get("dsAttrTypeStandard:GeneratedUID", None)
+            fullName = val.get("dsAttrTypeStandard:RealName", None)
             if guid and fullName:
                 if not recordName:
                     recordName = guid
-                record = destService.recordWithGUID(guid)
+                record = yield destService.recordWithGUID(guid)
                 if record is None:
                     if verbose:
                         print("Migrating %s (%s)" % (fullName, recordType))
@@ -233,23 +242,29 @@
                         autoSchedule = autoSchedules.get(guid, 1)
                     else:
                         autoSchedule = True
-                    augmentRecord = (yield destService.augmentService.getAugmentRecord(guid, recordType))
-                    augmentRecord.autoSchedule = autoSchedule
-                    augmentRecords.append(augmentRecord)
-
-                    directoryRecords.append(
-                        (recordType,
-                            {
-                                "guid" : guid,
-                                "shortNames" : [recordName],
-                                "fullName" : fullName,
-                            }
+                    augmentRecord = (
+                        yield destService.augmentService.getAugmentRecord(
+                            guid, recordType
                         )
                     )
+                    if autoSchedule:
+                        augmentRecord.autoScheduleMode = "automatic"
+                    else:
+                        augmentRecord.autoScheduleMode = "none"
+                    augmentRecords.append(augmentRecord)
 
+                    directoryRecords.append((
+                        recordType,
+                        {
+                            "guid": guid,
+                            "shortNames": [recordName],
+                            "fullName": fullName,
+                        }
+                    ))
+
     destService.createRecords(directoryRecords)
 
-    (yield destService.augmentService.addAugmentRecords(augmentRecords))
+    yield destService.augmentService.addAugmentRecords(augmentRecords)
 
 
 

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/shell/directory.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/shell/directory.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/shell/directory.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -83,16 +83,22 @@
     add("First Name", record.firstName)
     add("Last Name" , record.lastName )
 
-    for email in record.emailAddresses:
-        add("Email Address", email)
+    try:
+        for email in record.emailAddresses:
+            add("Email Address", email)
+    except AttributeError:
+        pass
 
-    for cua in record.calendarUserAddresses:
-        add("Calendar User Address", cua)
+    try:
+        for cua in record.calendarUserAddresses:
+            add("Calendar User Address", cua)
+    except AttributeError:
+        pass
 
     add("Server ID"           , record.serverID)
     add("Enabled"             , record.enabled)
-    add("Enabled for Calendar", record.enabledForCalendaring)
-    add("Enabled for Contacts", record.enabledForAddressBooks)
+    add("Enabled for Calendar", record.hasCalendars)
+    add("Enabled for Contacts", record.hasContacts)
 
     return succeed(table.toString())
 

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/shell/terminal.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/shell/terminal.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/shell/terminal.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -51,7 +51,6 @@
 from twistedcaldav.stdconfig import DEFAULT_CONFIG_FILE
 
 from calendarserver.tools.cmdline import utilityMain, WorkerService
-from calendarserver.tools.util import getDirectory
 from calendarserver.tools.shell.cmd import Commands, UsageError as CommandUsageError
 
 log = Logger()
@@ -116,9 +115,9 @@
     @type config: L{twistedcaldav.config.Config}
     """
 
-    def __init__(self, store, directory, options, reactor, config):
+    def __init__(self, store, options, reactor, config):
         super(ShellService, self).__init__(store)
-        self.directory = directory
+        self.directory = store.directoryService()
         self.options = options
         self.reactor = reactor
         self.config = config
@@ -434,8 +433,7 @@
 
     def makeService(store):
         from twistedcaldav.config import config
-        directory = getDirectory()
-        return ShellService(store, directory, options, reactor, config)
+        return ShellService(store, options, reactor, config)
 
     print("Initializing shell...")
 

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/shell/test/test_vfs.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/shell/test/test_vfs.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/shell/test/test_vfs.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -18,32 +18,53 @@
 from twisted.trial.unittest import TestCase
 from twisted.internet.defer import succeed, inlineCallbacks
 
+# from twext.who.test.test_xml import xmlService
+
+# from txdav.common.datastore.test.util import buildStore
+
 from calendarserver.tools.shell.vfs import ListEntry
 from calendarserver.tools.shell.vfs import File, Folder
-from calendarserver.tools.shell.vfs import UIDsFolder
-from calendarserver.tools.shell.terminal import ShellService
-from twistedcaldav.directory.test.test_xmlfile import XMLFileBase
-from txdav.common.datastore.test.util import buildStore
+# from calendarserver.tools.shell.vfs import UIDsFolder
+# from calendarserver.tools.shell.terminal import ShellService
 
 
+
 class TestListEntry(TestCase):
     def test_toString(self):
-        self.assertEquals(ListEntry(None, File  , "thingo").toString(), "thingo")
-        self.assertEquals(ListEntry(None, File  , "thingo", Foo="foo").toString(), "thingo")
-        self.assertEquals(ListEntry(None, Folder, "thingo").toString(), "thingo/")
-        self.assertEquals(ListEntry(None, Folder, "thingo", Foo="foo").toString(), "thingo/")
+        self.assertEquals(
+            ListEntry(None, File, "thingo").toString(),
+            "thingo"
+        )
+        self.assertEquals(
+            ListEntry(None, File, "thingo", Foo="foo").toString(),
+            "thingo"
+        )
+        self.assertEquals(
+            ListEntry(None, Folder, "thingo").toString(),
+            "thingo/"
+        )
+        self.assertEquals(
+            ListEntry(None, Folder, "thingo", Foo="foo").toString(),
+            "thingo/"
+        )
 
 
     def test_fieldNamesImplicit(self):
         # This test assumes File doesn't set list.fieldNames.
         assert not hasattr(File.list, "fieldNames")
 
-        self.assertEquals(set(ListEntry(File(None, ()), File, "thingo").fieldNames), set(("Name",)))
+        self.assertEquals(
+            set(ListEntry(File(None, ()), File, "thingo").fieldNames),
+            set(("Name",))
+        )
 
 
     def test_fieldNamesExplicit(self):
         def fieldNames(fileClass):
-            return ListEntry(fileClass(None, ()), fileClass, "thingo", Flavor="Coconut", Style="Hard")
+            return ListEntry(
+                fileClass(None, ()), fileClass, "thingo",
+                Flavor="Coconut", Style="Hard"
+            )
 
         # Full list
         class MyFile1(File):
@@ -83,14 +104,24 @@
 
         # Name first, rest sorted by field name
         self.assertEquals(
-            tuple(ListEntry(File(None, ()), File, "thingo", Flavor="Coconut", Style="Hard").toFields()),
+            tuple(
+                ListEntry(
+                    File(None, ()), File, "thingo",
+                    Flavor="Coconut", Style="Hard"
+                ).toFields()
+            ),
             ("thingo", "Coconut", "Hard")
         )
 
 
     def test_toFieldsExplicit(self):
         def fields(fileClass):
-            return tuple(ListEntry(fileClass(None, ()), fileClass, "thingo", Flavor="Coconut", Style="Hard").toFields())
+            return tuple(
+                ListEntry(
+                    fileClass(None, ()), fileClass, "thingo",
+                    Flavor="Coconut", Style="Hard"
+                ).toFields()
+            )
 
         # Full list
         class MyFile1(File):
@@ -125,34 +156,23 @@
 
 
 
-class DirectoryStubber(XMLFileBase):
-    """
-    Object which creates a stub L{IDirectoryService}.
-    """
-    def __init__(self, testCase):
-        self.testCase = testCase
-
-
-    def mktemp(self):
-        return self.testCase.mktemp()
-
-
-
 class UIDsFolderTests(TestCase):
     """
     L{UIDsFolder} contains all principals and is keyed by UID.
     """
 
-    @inlineCallbacks
-    def setUp(self):
-        """
-        Create a L{UIDsFolder}.
-        """
-        directory = DirectoryStubber(self).service()
-        self.svc = ShellService(store=(yield buildStore(self, None, directoryService=directory)),
-                                directory=directory,
-                                options=None, reactor=None, config=None)
-        self.folder = UIDsFolder(self.svc, ())
+    # @inlineCallbacks
+    # def setUp(self):
+    #     """
+    #     Create a L{UIDsFolder}.
+    #     """
+    #     directory = xmlService(self.mktemp())
+    #     self.svc = ShellService(
+    #         store=(yield buildStore(self, None, directoryService=directory)),
+    #         directory=directory,
+    #         options=None, reactor=None, config=None
+    #     )
+    #     self.folder = UIDsFolder(self.svc, ())
 
 
     @inlineCallbacks
@@ -171,8 +191,20 @@
         listing = list((yield self.folder.list()))
         self.assertEquals(
             [x.fields for x in listing],
-            [{"Record Type": "users", "Short Name": "wsanchez",
-              "Full Name": "Wilfredo Sanchez", "Name": wsanchez},
-              {"Record Type": "users", "Short Name": "dreid",
-              "Full Name": "David Reid", "Name": dreid}]
+            [
+                {
+                    "Record Type": "users",
+                    "Short Name": "wsanchez",
+                    "Full Name": "Wilfredo Sanchez",
+                    "Name": wsanchez
+                },
+                {
+                    "Record Type": "users",
+                    "Short Name": "dreid",
+                    "Full Name": "David Reid",
+                    "Name": dreid
+                },
+            ]
         )
+
+    test_list.todo = "setup() needs to be reimplemented"

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/shell/vfs.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/shell/vfs.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/shell/vfs.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -334,7 +334,9 @@
     def list(self):
         names = set()
 
-        for record in self.service.directory.listRecords(self.recordType):
+        for record in self.service.directory.recordsWithRecordType(
+            self.recordType
+        ):
             for shortName in record.shortNames:
                 if shortName in names:
                     continue
@@ -411,7 +413,7 @@
                 if (
                     self.record is not None and
                     self.service.config.EnableCalDAV and
-                    self.record.enabledForCalendaring
+                    self.record.hasCalendars
                 ):
                     create = True
                 else:

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/gateway/augments.xml
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/gateway/augments.xml	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/gateway/augments.xml	2014-04-04 17:20:27 UTC (rev 13158)
@@ -20,12 +20,6 @@
 
 <augments>
   <record>
-    <uid>Default</uid>
-    <enable>true</enable>
-    <enable-calendar>true</enable-calendar>
-    <enable-addressbook>true</enable-addressbook>
-  </record>
-  <record>
     <uid>user01</uid>
     <enable>true</enable>
     <enable-calendar>true</enable-calendar>

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/gateway/caldavd.plist
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/gateway/caldavd.plist	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/gateway/caldavd.plist	2014-04-04 17:20:27 UTC (rev 13158)
@@ -85,29 +85,29 @@
     <key>ServerRoot</key>
     <string>%(ServerRoot)s</string>
 
+    <!-- Data root -->
+    <key>DataRoot</key>
+    <string>%(DataRoot)s</string>
+
     <!-- Database root -->
     <key>DatabaseRoot</key>
     <string>%(DatabaseRoot)s</string>
 
-    <!-- Data root -->
-    <key>DataRoot</key>
-    <string>Data</string>
-
     <!-- Document root -->
     <key>DocumentRoot</key>
-    <string>Documents</string>
+    <string>%(DocumentRoot)s</string>
 
     <!-- Configuration root -->
     <key>ConfigRoot</key>
-    <string>config</string>
+    <string>%(ConfigRoot)s</string>
 
     <!-- Log root -->
     <key>LogRoot</key>
-    <string>Logs</string>
+    <string>%(LogRoot)s</string>
 
     <!-- Run root -->
     <key>RunRoot</key>
-    <string>Logs/state</string>
+    <string>%(RunRoot)s</string>
 
     <!-- Child aliases -->
     <key>Aliases</key>
@@ -147,7 +147,7 @@
     <dict>
       <key>type</key>
       <string>twistedcaldav.directory.xmlfile.XMLDirectoryService</string>
-      
+
       <key>params</key>
       <dict>
         <key>xmlFile</key>
@@ -167,7 +167,7 @@
       <true/>
       <key>type</key>
       <string>twistedcaldav.directory.xmlfile.XMLDirectoryService</string>
-      
+
       <key>params</key>
       <dict>
         <key>xmlFile</key>
@@ -180,14 +180,14 @@
         </array>
       </dict>
     </dict>
-    
+
     <!-- Open Directory Service (Mac OS X) -->
     <!--
     <key>DirectoryService</key>
     <dict>
       <key>type</key>
       <string>twistedcaldav.directory.appleopendirectory.OpenDirectoryService</string>
-      
+
       <key>params</key>
       <dict>
         <key>node</key>
@@ -211,7 +211,7 @@
     <dict>
       <key>type</key>
       <string>twistedcaldav.directory.augment.AugmentXMLDB</string>
-      
+
       <key>params</key>
       <dict>
         <key>xmlFiles</key>
@@ -220,14 +220,14 @@
         </array>
       </dict>
     </dict>
-    
+
     <!-- Sqlite Augment Service -->
     <!--
     <key>AugmentService</key>
     <dict>
       <key>type</key>
       <string>twistedcaldav.directory.augment.AugmentSqliteDB</string>
-      
+
       <key>params</key>
       <dict>
         <key>dbpath</key>
@@ -242,7 +242,7 @@
     <dict>
       <key>type</key>
       <string>twistedcaldav.directory.augment.AugmentPostgreSQLDB</string>
-      
+
       <key>params</key>
       <dict>
         <key>host</key>
@@ -258,7 +258,7 @@
     <dict>
       <key>type</key>
       <string>twistedcaldav.directory.calendaruserproxy.ProxySqliteDB</string>
-      
+
       <key>params</key>
       <dict>
         <key>dbpath</key>
@@ -272,7 +272,7 @@
     <dict>
       <key>type</key>
       <string>twistedcaldav.directory.calendaruserproxy.ProxyPostgreSQLDB</string>
-      
+
       <key>params</key>
       <dict>
         <key>host</key>
@@ -515,6 +515,65 @@
           </dict>
         </dict>
 
+        <key>SimpleLineNotifier</key>
+        <dict>
+          <!-- Simple line notification service (for testing) -->
+          <key>Service</key>
+          <string>twistedcaldav.notify.SimpleLineNotifierService</string>
+          <key>Enabled</key>
+          <false/>
+          <key>Port</key>
+          <integer>62308</integer>
+        </dict>
+
+        <key>XMPPNotifier</key>
+        <dict>
+          <!-- XMPP notification service -->
+          <key>Service</key>
+          <string>twistedcaldav.notify.XMPPNotifierService</string>
+          <key>Enabled</key>
+          <false/>
+
+          <!-- XMPP host and port to contact -->
+          <key>Host</key>
+          <string>xmpp.host.name</string>
+          <key>Port</key>
+          <integer>5222</integer>
+
+          <!-- Jabber ID and password for the server -->
+          <key>JID</key>
+          <string>jid at xmpp.host.name/resource</string>
+          <key>Password</key>
+          <string>password_goes_here</string>
+
+          <!-- PubSub service address -->
+          <key>ServiceAddress</key>
+          <string>pubsub.xmpp.host.name</string>
+
+          <key>NodeConfiguration</key>
+          <dict>
+            <key>pubsub#deliver_payloads</key>
+            <string>1</string>
+            <key>pubsub#persist_items</key>
+            <string>1</string>
+          </dict>
+
+          <!-- Sends a presence notification to XMPP server at this interval (prevents disconnect) -->
+          <key>KeepAliveSeconds</key>
+          <integer>120</integer>
+
+          <!-- Sends a pubsub publish to a particular heartbeat node at this interval -->
+          <key>HeartbeatMinutes</key>
+          <integer>30</integer>
+
+          <!-- List of glob-like expressions defining which XMPP JIDs can converse with the server (for debugging) -->
+          <key>AllowedJIDs</key>
+          <array>
+            <!--
+            <string>*.example.com</string>
+             -->
+          </array>
+        </dict>
       </dict>
     </dict>
 
@@ -651,6 +710,7 @@
 	<key>UsePackageTimezones</key>
 	<true/>
 
+
     <!--
         Miscellaneous items
       -->
@@ -666,7 +726,7 @@
     <!-- Support for Content-Encoding compression options as specified in RFC2616 Section 3.5 -->
     <key>ResponseCompression</key>
     <false/>
-    
+
     <!-- The retry-after value (in seconds) to return with a 503 error. -->
     <key>HTTPRetryAfter</key>
     <integer>180</integer>
@@ -705,7 +765,6 @@
     <key>ResponseCacheTimeout</key>
     <integer>30</integer> <!-- in minutes -->
 
-
     <!-- For unit tests, enable SharedConnectionPool so we don't use up shared memory -->
     <key>SharedConnectionPool</key>
     <true/>

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/gateway/resources-locations.xml
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/gateway/resources-locations.xml	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/gateway/resources-locations.xml	2014-04-04 17:20:27 UTC (rev 13158)
@@ -18,7 +18,110 @@
 
 <!DOCTYPE accounts SYSTEM "accounts.dtd">
 
-<accounts realm="Test Realm">
+<directory realm="Test Realm">
+  <record type="location">
+    <short-name>location01</short-name>
+    <uid>location01</uid>
+    <full-name>Room 01</full-name>
+  </record>
+  <record type="location">
+    <short-name>location02</short-name>
+    <uid>location02</uid>
+    <full-name>Room 02</full-name>
+  </record>
+  <record type="location">
+    <short-name>location03</short-name>
+    <uid>location03</uid>
+    <full-name>Room 03</full-name>
+  </record>
+  <record type="location">
+    <short-name>location04</short-name>
+    <uid>location04</uid>
+    <full-name>Room 04</full-name>
+  </record>
+  <record type="location">
+    <short-name>location05</short-name>
+    <uid>location05</uid>
+    <full-name>Room 05</full-name>
+  </record>
+  <record type="location">
+    <short-name>location06</short-name>
+    <uid>location06</uid>
+    <full-name>Room 06</full-name>
+  </record>
+  <record type="location">
+    <short-name>location07</short-name>
+    <uid>location07</uid>
+    <full-name>Room 07</full-name>
+  </record>
+  <record type="location">
+    <short-name>location08</short-name>
+    <uid>location08</uid>
+    <full-name>Room 08</full-name>
+  </record>
+  <record type="location">
+    <short-name>location09</short-name>
+    <uid>location09</uid>
+    <full-name>Room 09</full-name>
+  </record>
+  <record type="location">
+    <short-name>location10</short-name>
+    <uid>location10</uid>
+    <full-name>Room 10</full-name>
+  </record>
+
+  <record type="resource">
+    <short-name>resource01</short-name>
+    <uid>resource01</uid>
+    <full-name>Resource 01</full-name>
+  </record>
+  <record type="resource">
+    <short-name>resource02</short-name>
+    <uid>resource02</uid>
+    <full-name>Resource 02</full-name>
+  </record>
+  <record type="resource">
+    <short-name>resource03</short-name>
+    <uid>resource03</uid>
+    <full-name>Resource 03</full-name>
+  </record>
+  <record type="resource">
+    <short-name>resource04</short-name>
+    <uid>resource04</uid>
+    <full-name>Resource 04</full-name>
+  </record>
+  <record type="resource">
+    <short-name>resource05</short-name>
+    <uid>resource05</uid>
+    <full-name>Resource 05</full-name>
+  </record>
+  <record type="resource">
+    <short-name>resource06</short-name>
+    <uid>resource06</uid>
+    <full-name>Resource 06</full-name>
+  </record>
+  <record type="resource">
+    <short-name>resource07</short-name>
+    <uid>resource07</uid>
+    <full-name>Resource 07</full-name>
+  </record>
+  <record type="resource">
+    <short-name>resource08</short-name>
+    <uid>resource08</uid>
+    <full-name>Resource 08</full-name>
+  </record>
+  <record type="resource">
+    <short-name>resource09</short-name>
+    <uid>resource09</uid>
+    <full-name>Resource 09</full-name>
+  </record>
+  <record type="resource">
+    <short-name>resource10</short-name>
+    <uid>resource10</uid>
+    <full-name>Resource 10</full-name>
+  </record>
+
+  <!--
   <location repeat="10">
     <uid>location%02d</uid>
     <guid>location%02d</guid>
@@ -31,4 +134,5 @@
     <password>resource%02d</password>
     <name>Resource %02d</name>
   </resource>
-</accounts>
+-->
+</directory>

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/gateway/users-groups.xml
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/gateway/users-groups.xml	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/gateway/users-groups.xml	2014-04-04 17:20:27 UTC (rev 13158)
@@ -18,7 +18,95 @@
 
 <!DOCTYPE accounts SYSTEM "accounts.dtd">
 
-<accounts realm="Test Realm">
+<directory realm="Test Realm">
+ <record type="user">
+    <short-name>user01</short-name>
+    <uid>user01</uid>
+    <password>user01</password>
+    <full-name>User 01</full-name>
+    <email>user01 at example.com</email>
+  </record>
+
+  <record type="user">
+    <short-name>user02</short-name>
+    <uid>user02</uid>
+    <password>user02</password>
+    <full-name>User 02</full-name>
+    <email>user02 at example.com</email>
+  </record>
+
+  <record type="user">
+    <short-name>user03</short-name>
+    <uid>user03</uid>
+    <password>user03</password>
+    <full-name>User 03</full-name>
+    <email>user03 at example.com</email>
+  </record>
+
+  <record type="user">
+    <short-name>user04</short-name>
+    <uid>user04</uid>
+    <password>user04</password>
+    <full-name>User 04</full-name>
+    <email>user04 at example.com</email>
+  </record>
+
+  <record type="user">
+    <short-name>user05</short-name>
+    <uid>user05</uid>
+    <password>user05</password>
+    <full-name>User 05</full-name>
+    <email>user05 at example.com</email>
+  </record>
+
+  <record type="user">
+    <short-name>user06</short-name>
+    <uid>user06</uid>
+    <password>user06</password>
+    <full-name>User 06</full-name>
+    <email>user06 at example.com</email>
+  </record>
+
+  <record type="user">
+    <short-name>user07</short-name>
+    <uid>user07</uid>
+    <password>user07</password>
+    <full-name>User 07</full-name>
+    <email>user07 at example.com</email>
+  </record>
+
+  <record type="user">
+    <short-name>user08</short-name>
+    <uid>user08</uid>
+    <password>user08</password>
+    <full-name>User 08</full-name>
+    <email>user08 at example.com</email>
+  </record>
+
+  <record type="user">
+    <short-name>user09</short-name>
+    <uid>user09</uid>
+    <password>user09</password>
+    <full-name>User 09</full-name>
+    <email>user09 at example.com</email>
+  </record>
+
+  <record type="user">
+    <short-name>user10</short-name>
+    <uid>user10</uid>
+    <password>user10</password>
+    <full-name>User 10</full-name>
+    <email>user10 at example.com</email>
+  </record>
+
+  <record type="group">
+    <uid>e5a6142c-4189-4e9e-90b0-9cd0268b314b</uid>
+    <short-name>testgroup1</short-name>
+    <full-name>Group 01</full-name>
+      <member-uid type="users">user01</member-uid>
+      <member-uid type="users">user02</member-uid>
+  </record>
+  <!--
   <user repeat="10">
     <uid>user%02d</uid>
     <guid>user%02d</guid>
@@ -37,13 +125,5 @@
       <member type="users">user02</member>
     </members>
   </group>
-  <group>
-    <uid>testgroup2</uid>
-    <guid>f5a6142c-4189-4e9e-90b0-9cd0268b314b</guid>
-    <password>test</password>
-    <name>Group 02</name>
-    <members>
-      <member type="users">user01</member>
-    </members>
-  </group>
-</accounts>
+  -->
+</directory>

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/principals/caldavd.plist
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/principals/caldavd.plist	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/principals/caldavd.plist	2014-04-04 17:20:27 UTC (rev 13158)
@@ -139,7 +139,7 @@
     <dict>
       <key>type</key>
       <string>twistedcaldav.directory.xmlfile.XMLDirectoryService</string>
-      
+
       <key>params</key>
       <dict>
         <key>xmlFile</key>
@@ -159,7 +159,7 @@
       <true/>
       <key>type</key>
       <string>twistedcaldav.directory.xmlfile.XMLDirectoryService</string>
-      
+
       <key>params</key>
       <dict>
         <key>xmlFile</key>
@@ -168,17 +168,18 @@
         <array>
             <string>resources</string>
             <string>locations</string>
+            <string>addresses</string>
         </array>
       </dict>
     </dict>
-    
+
     <!-- Open Directory Service (Mac OS X) -->
     <!--
     <key>DirectoryService</key>
     <dict>
       <key>type</key>
       <string>twistedcaldav.directory.appleopendirectory.OpenDirectoryService</string>
-      
+
       <key>params</key>
       <dict>
         <key>node</key>
@@ -202,7 +203,7 @@
     <dict>
       <key>type</key>
       <string>twistedcaldav.directory.augment.AugmentXMLDB</string>
-      
+
       <key>params</key>
       <dict>
         <key>xmlFiles</key>
@@ -211,14 +212,14 @@
         </array>
       </dict>
     </dict>
-    
+
     <!-- Sqlite Augment Service -->
     <!--
     <key>AugmentService</key>
     <dict>
       <key>type</key>
       <string>twistedcaldav.directory.augment.AugmentSqliteDB</string>
-      
+
       <key>params</key>
       <dict>
         <key>dbpath</key>
@@ -233,7 +234,7 @@
     <dict>
       <key>type</key>
       <string>twistedcaldav.directory.augment.AugmentPostgreSQLDB</string>
-      
+
       <key>params</key>
       <dict>
         <key>host</key>
@@ -249,7 +250,7 @@
     <dict>
       <key>type</key>
       <string>twistedcaldav.directory.calendaruserproxy.ProxySqliteDB</string>
-      
+
       <key>params</key>
       <dict>
         <key>dbpath</key>
@@ -263,7 +264,7 @@
     <dict>
       <key>type</key>
       <string>twistedcaldav.directory.calendaruserproxy.ProxyPostgreSQLDB</string>
-      
+
       <key>params</key>
       <dict>
         <key>host</key>
@@ -692,7 +693,7 @@
     <!-- Support for Content-Encoding compression options as specified in RFC2616 Section 3.5 -->
     <key>ResponseCompression</key>
     <false/>
-    
+
     <!-- The retry-after value (in seconds) to return with a 503 error. -->
     <key>HTTPRetryAfter</key>
     <integer>180</integer>

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/principals/resources-locations.xml
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/principals/resources-locations.xml	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/principals/resources-locations.xml	2014-04-04 17:20:27 UTC (rev 13158)
@@ -18,7 +18,110 @@
 
 <!DOCTYPE accounts SYSTEM "accounts.dtd">
 
-<accounts realm="Test Realm">
+<directory realm="Test Realm">
+  <record type="location">
+    <short-name>location01</short-name>
+    <uid>location01</uid>
+    <full-name>Room 01</full-name>
+  </record>
+  <record type="location">
+    <short-name>location02</short-name>
+    <uid>location02</uid>
+    <full-name>Room 02</full-name>
+  </record>
+  <record type="location">
+    <short-name>location03</short-name>
+    <uid>location03</uid>
+    <full-name>Room 03</full-name>
+  </record>
+  <record type="location">
+    <short-name>location04</short-name>
+    <uid>location04</uid>
+    <full-name>Room 04</full-name>
+  </record>
+  <record type="location">
+    <short-name>location05</short-name>
+    <uid>location05</uid>
+    <full-name>Room 05</full-name>
+  </record>
+  <record type="location">
+    <short-name>location06</short-name>
+    <uid>location06</uid>
+    <full-name>Room 06</full-name>
+  </record>
+  <record type="location">
+    <short-name>location07</short-name>
+    <uid>location07</uid>
+    <full-name>Room 07</full-name>
+  </record>
+  <record type="location">
+    <short-name>location08</short-name>
+    <uid>location08</uid>
+    <full-name>Room 08</full-name>
+  </record>
+  <record type="location">
+    <short-name>location09</short-name>
+    <uid>location09</uid>
+    <full-name>Room 09</full-name>
+  </record>
+  <record type="location">
+    <short-name>location10</short-name>
+    <uid>location10</uid>
+    <full-name>Room 10</full-name>
+  </record>
+
+  <record type="resource">
+    <short-name>resource01</short-name>
+    <uid>resource01</uid>
+    <full-name>Resource 01</full-name>
+  </record>
+  <record type="resource">
+    <short-name>resource02</short-name>
+    <uid>resource02</uid>
+    <full-name>Resource 02</full-name>
+  </record>
+  <record type="resource">
+    <short-name>resource03</short-name>
+    <uid>resource03</uid>
+    <full-name>Resource 03</full-name>
+  </record>
+  <record type="resource">
+    <short-name>resource04</short-name>
+    <uid>resource04</uid>
+    <full-name>Resource 04</full-name>
+  </record>
+  <record type="resource">
+    <short-name>resource05</short-name>
+    <uid>resource05</uid>
+    <full-name>Resource 05</full-name>
+  </record>
+  <record type="resource">
+    <short-name>resource06</short-name>
+    <uid>resource06</uid>
+    <full-name>Resource 06</full-name>
+  </record>
+  <record type="resource">
+    <short-name>resource07</short-name>
+    <uid>resource07</uid>
+    <full-name>Resource 07</full-name>
+  </record>
+  <record type="resource">
+    <short-name>resource08</short-name>
+    <uid>resource08</uid>
+    <full-name>Resource 08</full-name>
+  </record>
+  <record type="resource">
+    <short-name>resource09</short-name>
+    <uid>resource09</uid>
+    <full-name>Resource 09</full-name>
+  </record>
+  <record type="resource">
+    <short-name>resource10</short-name>
+    <uid>resource10</uid>
+    <full-name>Resource 10</full-name>
+  </record>
+
+  <!--
   <location repeat="10">
     <uid>location%02d</uid>
     <guid>location%02d</guid>
@@ -31,4 +134,5 @@
     <password>resource%02d</password>
     <name>Resource %02d</name>
   </resource>
-</accounts>
+-->
+</directory>

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/principals/users-groups.xml
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/principals/users-groups.xml	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/principals/users-groups.xml	2014-04-04 17:20:27 UTC (rev 13158)
@@ -18,7 +18,95 @@
 
 <!DOCTYPE accounts SYSTEM "accounts.dtd">
 
-<accounts realm="Test Realm">
+<directory realm="Test Realm">
+ <record type="user">
+    <short-name>user01</short-name>
+    <uid>user01</uid>
+    <password>user01</password>
+    <full-name>User 01</full-name>
+    <email>user01 at example.com</email>
+  </record>
+
+  <record type="user">
+    <short-name>user02</short-name>
+    <uid>user02</uid>
+    <password>user02</password>
+    <full-name>User 02</full-name>
+    <email>user02 at example.com</email>
+  </record>
+
+  <record type="user">
+    <short-name>user03</short-name>
+    <uid>user03</uid>
+    <password>user03</password>
+    <full-name>User 03</full-name>
+    <email>user03 at example.com</email>
+  </record>
+
+  <record type="user">
+    <short-name>user04</short-name>
+    <uid>user04</uid>
+    <password>user04</password>
+    <full-name>User 04</full-name>
+    <email>user04 at example.com</email>
+  </record>
+
+  <record type="user">
+    <short-name>user05</short-name>
+    <uid>user05</uid>
+    <password>user05</password>
+    <full-name>User 05</full-name>
+    <email>user05 at example.com</email>
+  </record>
+
+  <record type="user">
+    <short-name>user06</short-name>
+    <uid>user06</uid>
+    <password>user06</password>
+    <full-name>User 06</full-name>
+    <email>user06 at example.com</email>
+  </record>
+
+  <record type="user">
+    <short-name>user07</short-name>
+    <uid>user07</uid>
+    <password>user07</password>
+    <full-name>User 07</full-name>
+    <email>user07 at example.com</email>
+  </record>
+
+  <record type="user">
+    <short-name>user08</short-name>
+    <uid>user08</uid>
+    <password>user08</password>
+    <full-name>User 08</full-name>
+    <email>user08 at example.com</email>
+  </record>
+
+  <record type="user">
+    <short-name>user09</short-name>
+    <uid>user09</uid>
+    <password>user09</password>
+    <full-name>User 09</full-name>
+    <email>user09 at example.com</email>
+  </record>
+
+  <record type="user">
+    <short-name>user10</short-name>
+    <uid>user10</uid>
+    <password>user10</password>
+    <full-name>User 10</full-name>
+    <email>user10 at example.com</email>
+  </record>
+
+  <record type="group">
+    <uid>e5a6142c-4189-4e9e-90b0-9cd0268b314b</uid>
+    <short-name>testgroup1</short-name>
+    <full-name>Group 01</full-name>
+      <member-uid type="users">user01</member-uid>
+      <member-uid type="users">user02</member-uid>
+  </record>
+  <!--
   <user repeat="10">
     <uid>user%02d</uid>
     <guid>user%02d</guid>
@@ -37,4 +125,5 @@
       <member type="users">user02</member>
     </members>
   </group>
-</accounts>
+  -->
+</directory>

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_agent.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_agent.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_agent.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -16,93 +16,31 @@
 
 try:
     from calendarserver.tools.agent import AgentRealm
-    from calendarserver.tools.agent import CustomDigestCredentialFactory
-    from calendarserver.tools.agent import DirectoryServiceChecker
     from calendarserver.tools.agent import InactivityDetector
     from twistedcaldav.test.util import TestCase
-    from twisted.internet.defer import inlineCallbacks
     from twisted.internet.task import Clock
-    from twisted.cred.error import UnauthorizedLogin
     from twisted.web.resource import IResource
     from twisted.web.resource import ForbiddenResource
-    RUN_TESTS = True
+
 except ImportError:
-    RUN_TESTS = False
+    pass
 
-
-
-if RUN_TESTS:
+else:
     class AgentTestCase(TestCase):
 
-        def test_CustomDigestCredentialFactory(self):
-            f = CustomDigestCredentialFactory("md5", "/Local/Default")
-            challenge = f.getChallenge(FakeRequest())
-            self.assertTrue("qop" not in challenge)
-            self.assertEquals(challenge["algorithm"], "md5")
-            self.assertEquals(challenge["realm"], "/Local/Default")
-
-        @inlineCallbacks
-        def test_DirectoryServiceChecker(self):
-            c = DirectoryServiceChecker("/Local/Default")
-            fakeOpenDirectory = FakeOpenDirectory()
-            c.directoryModule = fakeOpenDirectory
-
-            fields = {
-                "username" : "foo",
-                "realm" : "/Local/Default",
-                "nonce" : 1,
-                "uri" : "/gateway",
-                "response" : "abc",
-                "algorithm" : "md5",
-            }
-            creds = FakeCredentials("foo", fields)
-
-            # Record does not exist:
-            fakeOpenDirectory.returnThisRecord(None)
-            try:
-                yield c.requestAvatarId(creds)
-            except UnauthorizedLogin:
-                pass
-            else:
-                self.fail("Didn't raise UnauthorizedLogin")
-
-            # Record exists, but invalid credentials
-            fakeOpenDirectory.returnThisRecord("fooRecord")
-            fakeOpenDirectory.returnThisAuthResponse(False)
-            try:
-                yield c.requestAvatarId(creds)
-            except UnauthorizedLogin:
-                pass
-            else:
-                self.fail("Didn't raise UnauthorizedLogin")
-
-            # Record exists, valid credentials
-            fakeOpenDirectory.returnThisRecord("fooRecord")
-            fakeOpenDirectory.returnThisAuthResponse(True)
-            avatar = (yield c.requestAvatarId(creds))
-            self.assertEquals(avatar, "foo")
-
-            # Record exists, but missing fields in credentials
-            del creds.fields["nonce"]
-            fakeOpenDirectory.returnThisRecord("fooRecord")
-            fakeOpenDirectory.returnThisAuthResponse(False)
-            try:
-                yield c.requestAvatarId(creds)
-            except UnauthorizedLogin:
-                pass
-            else:
-                self.fail("Didn't raise UnauthorizedLogin")
-
-
         def test_AgentRealm(self):
             realm = AgentRealm("root", ["abc"])
 
             # Valid avatar
-            _ignore_interface, resource, ignored = realm.requestAvatar("abc", None, IResource)
+            _ignore_interface, resource, ignored = realm.requestAvatar(
+                "abc", None, IResource
+            )
             self.assertEquals(resource, "root")
 
             # Not allowed avatar
-            _ignore_interface, resource, ignored = realm.requestAvatar("def", None, IResource)
+            _ignore_interface, resource, ignored = realm.requestAvatar(
+                "def", None, IResource
+            )
             self.assertTrue(isinstance(resource, ForbiddenResource))
 
             # Interface unhandled
@@ -120,6 +58,7 @@
             clock = Clock()
 
             self.inactivityReached = False
+
             def becameInactive():
                 self.inactivityReached = True
 
@@ -162,8 +101,9 @@
         def returnThisAuthResponse(self, response):
             self.authResponse = response
 
-        def authenticateUserDigest(self, ignored, node, username, challenge, response,
-            method):
+        def authenticateUserDigest(
+            self, ignored, node, username, challenge, response, method
+        ):
             return self.authResponse
 
         ODNSerror = "Error"

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_calverify.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_calverify.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_calverify.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -35,7 +35,6 @@
 from txdav.common.datastore.test.util import populateCalendarsFrom
 
 from StringIO import StringIO
-import os
 
 
 OK_ICS = """BEGIN:VCALENDAR
@@ -471,20 +470,7 @@
 
     number_to_process = len(requirements["home1"]["calendar_1"])
 
-    def configure(self):
-        super(CalVerifyDataTests, self).configure()
-        self.patch(config.DirectoryService.params, "xmlFile",
-            os.path.join(
-                os.path.dirname(__file__), "calverify", "accounts.xml"
-            )
-        )
-        self.patch(config.ResourceService.params, "xmlFile",
-            os.path.join(
-                os.path.dirname(__file__), "calverify", "resources.xml"
-            )
-        )
 
-
     @inlineCallbacks
     def populate(self):
 
@@ -944,25 +930,7 @@
     uuid3 = "AC478592-7783-44D1-B2AE-52359B4E8415"
     uuidl1 = "75EA36BE-F71B-40F9-81F9-CF59BF40CA8F"
 
-    def configure(self):
-        super(CalVerifyMismatchTestsBase, self).configure()
-        self.patch(config.DirectoryService.params, "xmlFile",
-            os.path.join(
-                os.path.dirname(__file__), "calverify", "accounts.xml"
-            )
-        )
-        self.patch(config.ResourceService.params, "xmlFile",
-            os.path.join(
-                os.path.dirname(__file__), "calverify", "resources.xml"
-            )
-        )
-        self.patch(config.AugmentService.params, "xmlFiles",
-            [os.path.join(
-                os.path.dirname(__file__), "calverify", "augments.xml"
-            ), ]
-        )
 
-
     @inlineCallbacks
     def populate(self):
 

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_gateway.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_gateway.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_gateway.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -25,16 +25,46 @@
 from twisted.internet.defer import inlineCallbacks, Deferred, returnValue
 
 from twistedcaldav.config import config
-from twistedcaldav.test.util import TestCase, CapturingProcessProtocol
-from calendarserver.tools.util import getDirectory
+from twistedcaldav.test.util import StoreTestCase, CapturingProcessProtocol
 import plistlib
+from twistedcaldav.memcacheclient import ClientFactory
+from twistedcaldav import memcacher
+from txdav.who.idirectory import AutoScheduleMode
 
 
-class RunCommandTestCase(TestCase):
+class RunCommandTestCase(StoreTestCase):
 
-    def setUp(self):
-        super(RunCommandTestCase, self).setUp()
+    def configure(self):
+        """
+        Override the standard StoreTestCase configuration
+        """
+        self.serverRoot = self.mktemp()
+        os.mkdir(self.serverRoot)
+        absoluteServerRoot = os.path.abspath(self.serverRoot)
 
+        configRoot = os.path.join(absoluteServerRoot, "Config")
+        if not os.path.exists(configRoot):
+            os.makedirs(configRoot)
+
+        dataRoot = os.path.join(absoluteServerRoot, "Data")
+        if not os.path.exists(dataRoot):
+            os.makedirs(dataRoot)
+
+        documentRoot = os.path.join(absoluteServerRoot, "Documents")
+        if not os.path.exists(documentRoot):
+            os.makedirs(documentRoot)
+
+        logRoot = os.path.join(absoluteServerRoot, "Logs")
+        if not os.path.exists(logRoot):
+            os.makedirs(logRoot)
+
+        runRoot = os.path.join(absoluteServerRoot, "Run")
+        if not os.path.exists(runRoot):
+            os.makedirs(runRoot)
+
+        config.reset()
+        self.configInit()
+
         testRoot = os.path.join(os.path.dirname(__file__), "gateway")
         templateName = os.path.join(testRoot, "caldavd.plist")
         templateFile = open(templateName)
@@ -43,41 +73,79 @@
 
         databaseRoot = os.path.abspath("_spawned_scripts_db" + str(os.getpid()))
         newConfig = template % {
-            "ServerRoot" : os.path.abspath(config.ServerRoot),
-            "DatabaseRoot" : databaseRoot,
-            "WritablePlist" : os.path.join(os.path.abspath(config.ConfigRoot), "caldavd-writable.plist"),
+            "ServerRoot": absoluteServerRoot,
+            "DataRoot": dataRoot,
+            "DatabaseRoot": databaseRoot,
+            "DocumentRoot": documentRoot,
+            "ConfigRoot": configRoot,
+            "LogRoot": logRoot,
+            "RunRoot": runRoot,
+            "WritablePlist": os.path.join(
+                os.path.abspath(configRoot), "caldavd-writable.plist"
+            ),
         }
-        configFilePath = FilePath(os.path.join(config.ConfigRoot, "caldavd.plist"))
+        configFilePath = FilePath(
+            os.path.join(configRoot, "caldavd.plist")
+        )
+
         configFilePath.setContent(newConfig)
 
         self.configFileName = configFilePath.path
         config.load(self.configFileName)
 
-        origUsersFile = FilePath(os.path.join(os.path.dirname(__file__),
-            "gateway", "users-groups.xml"))
-        copyUsersFile = FilePath(os.path.join(config.DataRoot, "accounts.xml"))
+        config.Memcached.Pools.Default.ClientEnabled = False
+        config.Memcached.Pools.Default.ServerEnabled = False
+        ClientFactory.allowTestCache = True
+        memcacher.Memcacher.allowTestCache = True
+        memcacher.Memcacher.memoryCacheInstance = None
+        config.DirectoryAddressBook.Enabled = False
+        config.UsePackageTimezones = True
+
+        origUsersFile = FilePath(
+            os.path.join(
+                os.path.dirname(__file__),
+                "gateway",
+                "users-groups.xml"
+            )
+        )
+        copyUsersFile = FilePath(
+            os.path.join(config.DataRoot, "accounts.xml")
+        )
         origUsersFile.copyTo(copyUsersFile)
 
-        origResourcesFile = FilePath(os.path.join(os.path.dirname(__file__),
-            "gateway", "resources-locations.xml"))
-        copyResourcesFile = FilePath(os.path.join(config.DataRoot, "resources.xml"))
+        origResourcesFile = FilePath(
+            os.path.join(
+                os.path.dirname(__file__),
+                "gateway",
+                "resources-locations.xml"
+            )
+        )
+        copyResourcesFile = FilePath(
+            os.path.join(config.DataRoot, "resources.xml")
+        )
         origResourcesFile.copyTo(copyResourcesFile)
 
-        origAugmentFile = FilePath(os.path.join(os.path.dirname(__file__),
-            "gateway", "augments.xml"))
+        origAugmentFile = FilePath(
+            os.path.join(
+                os.path.dirname(__file__),
+                "gateway",
+                "augments.xml"
+            )
+        )
         copyAugmentFile = FilePath(os.path.join(config.DataRoot, "augments.xml"))
         origAugmentFile.copyTo(copyAugmentFile)
 
-        # Make sure trial puts the reactor in the right state, by letting it
-        # run one reactor iteration.  (Ignore me, please.)
-        d = Deferred()
-        reactor.callLater(0, d.callback, True)
-        return d
+        # # Make sure trial puts the reactor in the right state, by letting it
+        # # run one reactor iteration.  (Ignore me, please.)
+        # d = Deferred()
+        # reactor.callLater(0, d.callback, True)
+        # return d
 
 
     @inlineCallbacks
-    def runCommand(self, command, error=False,
-        script="calendarserver_command_gateway"):
+    def runCommand(
+        self, command, error=False, script="calendarserver_command_gateway"
+    ):
         """
         Run the given command by feeding it as standard input to
         calendarserver_command_gateway in a subprocess.
@@ -86,7 +154,9 @@
         if isinstance(command, unicode):
             command = command.encode("utf-8")
 
-        sourceRoot = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
+        sourceRoot = os.path.dirname(
+            os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
+        )
         cmd = os.path.join(sourceRoot, "bin", script)
 
         args = [cmd, "-f", self.configFileName]
@@ -110,6 +180,12 @@
 
 class GatewayTestCase(RunCommandTestCase):
 
+    def _flush(self):
+        # Flush both XML directories
+        self.directory._directory.services[0].flush()
+        self.directory._directory.services[1].flush()
+
+
     @inlineCallbacks
     def test_getLocationAndResourceList(self):
         results = yield self.runCommand(command_getLocationAndResourceList)
@@ -125,14 +201,18 @@
     @inlineCallbacks
     def test_getLocationAttributes(self):
         yield self.runCommand(command_createLocation)
+
+        # Tell the resources services to flush its cache and re-read XML
+        self._flush()
+
         results = yield self.runCommand(command_getLocationAttributes)
-        self.assertEquals(results["result"]["Capacity"], "40")
-        self.assertEquals(results["result"]["Description"], "Test Description")
+        # self.assertEquals(results["result"]["Capacity"], "40")
+        # self.assertEquals(results["result"]["Description"], "Test Description")
         self.assertEquals(results["result"]["RecordName"], ["createdlocation01"])
         self.assertEquals(results["result"]["RealName"],
             "Created Location 01 %s %s" % (unichr(208), u"\ud83d\udca3"))
-        self.assertEquals(results["result"]["Comment"], "Test Comment")
-        self.assertEquals(results["result"]["AutoSchedule"], True)
+        # self.assertEquals(results["result"]["Comment"], "Test Comment")
+        self.assertEquals(results["result"]["AutoScheduleMode"], u"acceptIfFree")
         self.assertEquals(results["result"]["AutoAcceptGroup"], "E5A6142C-4189-4E9E-90B0-9CD0268B314B")
         self.assertEquals(set(results["result"]["ReadProxies"]), set(['user03', 'user04']))
         self.assertEquals(set(results["result"]["WriteProxies"]), set(['user05', 'user06']))
@@ -147,32 +227,37 @@
     @inlineCallbacks
     def test_getResourceAttributes(self):
         yield self.runCommand(command_createResource)
+
+        # Tell the resources services to flush its cache and re-read XML
+        self._flush()
+
         results = yield self.runCommand(command_getResourceAttributes)
-        self.assertEquals(results["result"]["Comment"], "Test Comment")
-        self.assertEquals(results["result"]["Type"], "Computer")
+        # self.assertEquals(results["result"]["Comment"], "Test Comment")
+        # self.assertEquals(results["result"]["Type"], "Computer")
         self.assertEquals(set(results["result"]["ReadProxies"]), set(['user03', 'user04']))
         self.assertEquals(set(results["result"]["WriteProxies"]), set(['user05', 'user06']))
 
 
     @inlineCallbacks
     def test_createAddress(self):
-        directory = getDirectory()
 
-        record = directory.recordWithUID("C701069D-9CA1-4925-A1A9-5CD94767B74B")
+        record = yield self.directory.recordWithUID("C701069D-9CA1-4925-A1A9-5CD94767B74B")
         self.assertEquals(record, None)
         yield self.runCommand(command_createAddress)
 
-        directory.flushCaches()
+        # Tell the resources services to flush its cache and re-read XML
+        self._flush()
 
-        record = directory.recordWithUID("C701069D-9CA1-4925-A1A9-5CD94767B74B")
-        self.assertEquals(record.fullName.decode("utf-8"),
-            "Created Address 01 %s %s" % (unichr(208), u"\ud83d\udca3"))
+        record = yield self.directory.recordWithUID("C701069D-9CA1-4925-A1A9-5CD94767B74B")
+        self.assertEquals(
+            record.displayName,
+            "Created Address 01 %s %s" % (unichr(208), u"\ud83d\udca3")
+        )
 
-        self.assertNotEquals(record, None)
 
-        self.assertEquals(record.extras["abbreviatedName"], "Addr1")
-        self.assertEquals(record.extras["streetAddress"], "1 Infinite Loop\nCupertino, 95014\nCA")
-        self.assertEquals(record.extras["geo"], "geo:37.331,-122.030")
+        self.assertEquals(record.abbreviatedName, "Addr1")
+        self.assertEquals(record.streetAddress, "1 Infinite Loop\nCupertino, 95014\nCA")
+        self.assertEquals(record.geographicLocation, "geo:37.331,-122.030")
 
         results = yield self.runCommand(command_getAddressList)
         self.assertEquals(len(results["result"]), 1)
@@ -185,7 +270,7 @@
         results = yield self.runCommand(command_getAddressAttributes)
         self.assertEquals(results["result"]["RealName"], u'Updated Address')
         self.assertEquals(results["result"]["StreetAddress"], u'Updated Street Address')
-        self.assertEquals(results["result"]["Geo"], u'Updated Geo')
+        self.assertEquals(results["result"]["GeographicLocation"], u'Updated Geo')
 
         results = yield self.runCommand(command_deleteAddress)
 
@@ -195,29 +280,23 @@
 
     @inlineCallbacks
     def test_createLocation(self):
-        directory = getDirectory()
 
-        record = directory.recordWithUID("836B1B66-2E9A-4F46-8B1C-3DD6772C20B2")
+        record = yield self.directory.recordWithUID("836B1B66-2E9A-4F46-8B1C-3DD6772C20B2")
         self.assertEquals(record, None)
         yield self.runCommand(command_createLocation)
 
-        directory.flushCaches()
+        # Tell the resources services to flush its cache and re-read XML
+        self._flush()
 
-        # This appears to be necessary in order for record.autoSchedule to
-        # reflect the change prior to the directory record expiration
-        augmentService = directory.serviceForRecordType(directory.recordType_locations).augmentService
-        augmentService.refresh()
+        record = yield self.directory.recordWithUID("836B1B66-2E9A-4F46-8B1C-3DD6772C20B2")
+        self.assertEquals(record.fullNames[0],
+            u"Created Location 01 %s %s" % (unichr(208), u"\ud83d\udca3"))
 
-        record = directory.recordWithUID("836B1B66-2E9A-4F46-8B1C-3DD6772C20B2")
-        self.assertEquals(record.fullName.decode("utf-8"),
-            "Created Location 01 %s %s" % (unichr(208), u"\ud83d\udca3"))
-
         self.assertNotEquals(record, None)
-        self.assertEquals(record.autoSchedule, True)
+        # self.assertEquals(record.autoScheduleMode, "")
 
-        self.assertEquals(record.extras["comment"], "Test Comment")
-        self.assertEquals(record.extras["floor"], "First")
-        self.assertEquals(record.extras["capacity"], "40")
+        self.assertEquals(record.floor, u"First")
+        # self.assertEquals(record.extras["capacity"], "40")
 
         results = yield self.runCommand(command_getLocationAttributes)
         self.assertEquals(set(results["result"]["ReadProxies"]), set(['user03', 'user04']))
@@ -226,88 +305,104 @@
 
     @inlineCallbacks
     def test_setLocationAttributes(self):
-        directory = getDirectory()
 
         yield self.runCommand(command_createLocation)
         yield self.runCommand(command_setLocationAttributes)
-        directory.flushCaches()
 
-        # This appears to be necessary in order for record.autoSchedule to
-        # reflect the change
-        augmentService = directory.serviceForRecordType(directory.recordType_locations).augmentService
-        augmentService.refresh()
+        # Tell the resources services to flush its cache and re-read XML
+        self._flush()
 
-        record = directory.recordWithUID("836B1B66-2E9A-4F46-8B1C-3DD6772C20B2")
+        record = yield self.directory.recordWithUID("836B1B66-2E9A-4F46-8B1C-3DD6772C20B2")
 
-        self.assertEquals(record.extras["comment"], "Updated Test Comment")
-        self.assertEquals(record.extras["floor"], "Second")
-        self.assertEquals(record.extras["capacity"], "41")
-        self.assertEquals(record.extras["streetAddress"], "2 Infinite Loop\nCupertino, 95014\nCA")
-        self.assertEquals(record.autoSchedule, True)
+        # self.assertEquals(record.extras["comment"], "Updated Test Comment")
+        self.assertEquals(record.floor, "Second")
+        # self.assertEquals(record.extras["capacity"], "41")
+        self.assertEquals(record.autoScheduleMode, AutoScheduleMode.acceptIfFree)
         self.assertEquals(record.autoAcceptGroup, "F5A6142C-4189-4E9E-90B0-9CD0268B314B")
 
         results = yield self.runCommand(command_getLocationAttributes)
-        self.assertEquals(results["result"]["AutoSchedule"], True)
+        self.assertEquals(results["result"]["AutoScheduleMode"], "acceptIfFree")
         self.assertEquals(results["result"]["AutoAcceptGroup"], "F5A6142C-4189-4E9E-90B0-9CD0268B314B")
         self.assertEquals(set(results["result"]["ReadProxies"]), set(['user03']))
         self.assertEquals(set(results["result"]["WriteProxies"]), set(['user05', 'user06', 'user07']))
 
 
     @inlineCallbacks
+    def test_setAddressOnLocation(self):
+        yield self.runCommand(command_createLocation)
+        yield self.runCommand(command_createAddress)
+        yield self.runCommand(command_setAddressOnLocation)
+        results = yield self.runCommand(command_getLocationAttributes)
+        self.assertEquals(results["result"]["AssociatedAddress"], "C701069D-9CA1-4925-A1A9-5CD94767B74B")
+        self._flush()
+        record = yield self.directory.recordWithUID("836B1B66-2E9A-4F46-8B1C-3DD6772C20B2")
+        self.assertEquals(record.associatedAddress, "C701069D-9CA1-4925-A1A9-5CD94767B74B")
+        yield self.runCommand(command_removeAddressFromLocation)
+        results = yield self.runCommand(command_getLocationAttributes)
+        self.assertEquals(results["result"]["AssociatedAddress"], "")
+        self._flush()
+        record = yield self.directory.recordWithUID("836B1B66-2E9A-4F46-8B1C-3DD6772C20B2")
+        self.assertEquals(record.associatedAddress, u"")
+
+
+    @inlineCallbacks
     def test_destroyLocation(self):
-        directory = getDirectory()
 
-        record = directory.recordWithUID("location01")
+        record = yield self.directory.recordWithUID("location01")
         self.assertNotEquals(record, None)
 
         yield self.runCommand(command_deleteLocation)
 
-        directory.flushCaches()
-        record = directory.recordWithUID("location01")
+        # Tell the resources services to flush its cache and re-read XML
+        self._flush()
+
+        record = yield self.directory.recordWithUID("location01")
         self.assertEquals(record, None)
 
 
     @inlineCallbacks
     def test_createResource(self):
-        directory = getDirectory()
 
-        record = directory.recordWithUID("AF575A61-CFA6-49E1-A0F6-B5662C9D9801")
+        record = yield self.directory.recordWithUID("AF575A61-CFA6-49E1-A0F6-B5662C9D9801")
         self.assertEquals(record, None)
 
         yield self.runCommand(command_createResource)
 
-        directory.flushCaches()
-        record = directory.recordWithUID("AF575A61-CFA6-49E1-A0F6-B5662C9D9801")
+        # Tell the resources services to flush its cache and re-read XML
+        self._flush()
+
+        record = yield self.directory.recordWithUID("AF575A61-CFA6-49E1-A0F6-B5662C9D9801")
         self.assertNotEquals(record, None)
 
 
     @inlineCallbacks
     def test_setResourceAttributes(self):
-        directory = getDirectory()
 
         yield self.runCommand(command_createResource)
-        directory.flushCaches()
-        record = directory.recordWithUID("AF575A61-CFA6-49E1-A0F6-B5662C9D9801")
-        self.assertEquals(record.fullName, "Laptop 1")
+        record = yield self.directory.recordWithUID("AF575A61-CFA6-49E1-A0F6-B5662C9D9801")
+        self.assertEquals(record.displayName, "Laptop 1")
 
         yield self.runCommand(command_setResourceAttributes)
 
-        directory.flushCaches()
-        record = directory.recordWithUID("AF575A61-CFA6-49E1-A0F6-B5662C9D9801")
-        self.assertEquals(record.fullName, "Updated Laptop 1")
+        # Tell the resources services to flush its cache and re-read XML
+        self._flush()
 
+        record = yield self.directory.recordWithUID("AF575A61-CFA6-49E1-A0F6-B5662C9D9801")
+        self.assertEquals(record.displayName, "Updated Laptop 1")
 
+
     @inlineCallbacks
     def test_destroyResource(self):
-        directory = getDirectory()
 
-        record = directory.recordWithUID("resource01")
+        record = yield self.directory.recordWithUID("resource01")
         self.assertNotEquals(record, None)
 
         yield self.runCommand(command_deleteResource)
 
-        directory.flushCaches()
-        record = directory.recordWithUID("resource01")
+        # Tell the resources services to flush its cache and re-read XML
+        self._flush()
+
+        record = yield self.directory.recordWithUID("resource01")
         self.assertEquals(record, None)
 
 
@@ -338,9 +433,10 @@
         """
         Verify readConfig returns with only the writable keys
         """
-        results = yield self.runCommand(command_readConfig,
-            script="calendarserver_config")
-
+        results = yield self.runCommand(
+            command_readConfig,
+            script="calendarserver_config"
+        )
         self.assertEquals(results["result"]["RedirectHTTPToHTTPS"], False)
         self.assertEquals(results["result"]["EnableSearchAddressBook"], False)
         self.assertEquals(results["result"]["EnableCalDAV"], True)
@@ -360,8 +456,10 @@
         """
         Verify writeConfig updates the writable plist file only
         """
-        results = yield self.runCommand(command_writeConfig,
-            script="calendarserver_config")
+        results = yield self.runCommand(
+            command_writeConfig,
+            script="calendarserver_config"
+        )
 
         self.assertEquals(results["result"]["EnableCalDAV"], False)
         self.assertEquals(results["result"]["EnableCardDAV"], False)
@@ -383,9 +481,9 @@
         <key>command</key>
         <string>addReadProxy</string>
         <key>Principal</key>
-        <string>locations:location01</string>
+        <string>location01</string>
         <key>Proxy</key>
-        <string>users:user03</string>
+        <string>user03</string>
 </dict>
 </plist>
 """
@@ -397,9 +495,9 @@
         <key>command</key>
         <string>addWriteProxy</string>
         <key>Principal</key>
-        <string>locations:location01</string>
+        <string>location01</string>
         <key>Proxy</key>
-        <string>users:user01</string>
+        <string>user01</string>
 </dict>
 </plist>
 """
@@ -422,7 +520,7 @@
         </array>
         <key>StreetAddress</key>
         <string>1 Infinite Loop\nCupertino, 95014\nCA</string>
-        <key>Geo</key>
+        <key>GeographicLocation</key>
         <string>geo:37.331,-122.030</string>
 </dict>
 </plist>
@@ -435,8 +533,8 @@
 <dict>
         <key>command</key>
         <string>createLocation</string>
-        <key>AutoSchedule</key>
-        <true/>
+        <key>AutoScheduleMode</key>
+        <string>acceptIfFree</string>
         <key>AutoAcceptGroup</key>
         <string>E5A6142C-4189-4E9E-90B0-9CD0268B314B</string>
         <key>GeneratedUID</key>
@@ -453,19 +551,21 @@
         <string>Test Description</string>
         <key>Floor</key>
         <string>First</string>
+        <!--
         <key>Capacity</key>
         <string>40</string>
+        -->
         <key>AssociatedAddress</key>
         <string>C701069D-9CA1-4925-A1A9-5CD94767B74B</string>
         <key>ReadProxies</key>
         <array>
-            <string>users:user03</string>
-            <string>users:user04</string>
+            <string>user03</string>
+            <string>user04</string>
         </array>
         <key>WriteProxies</key>
         <array>
-            <string>users:user05</string>
-            <string>users:user06</string>
+            <string>user05</string>
+            <string>user06</string>
         </array>
 </dict>
 </plist>
@@ -478,31 +578,33 @@
 <dict>
         <key>command</key>
         <string>createResource</string>
-        <key>AutoSchedule</key>
-        <true/>
+        <key>AutoScheduleMode</key>
+        <string>declineIfBusy</string>
         <key>GeneratedUID</key>
         <string>AF575A61-CFA6-49E1-A0F6-B5662C9D9801</string>
         <key>RealName</key>
         <string>Laptop 1</string>
+        <!--
         <key>Comment</key>
         <string>Test Comment</string>
         <key>Description</key>
         <string>Test Description</string>
         <key>Type</key>
         <string>Computer</string>
+        -->
         <key>RecordName</key>
         <array>
                 <string>laptop1</string>
         </array>
         <key>ReadProxies</key>
         <array>
-            <string>users:user03</string>
-            <string>users:user04</string>
+            <string>user03</string>
+            <string>user04</string>
         </array>
         <key>WriteProxies</key>
         <array>
-            <string>users:user05</string>
-            <string>users:user06</string>
+            <string>user05</string>
+            <string>user06</string>
         </array>
 </dict>
 </plist>
@@ -617,9 +719,9 @@
         <key>command</key>
         <string>removeReadProxy</string>
         <key>Principal</key>
-        <string>locations:location01</string>
+        <string>location01</string>
         <key>Proxy</key>
-        <string>users:user03</string>
+        <string>user03</string>
 </dict>
 </plist>
 """
@@ -631,9 +733,9 @@
         <key>command</key>
         <string>removeWriteProxy</string>
         <key>Principal</key>
-        <string>locations:location01</string>
+        <string>location01</string>
         <key>Proxy</key>
-        <string>users:user01</string>
+        <string>user01</string>
 </dict>
 </plist>
 """
@@ -662,24 +764,53 @@
         <string>Updated Test Description</string>
         <key>Floor</key>
         <string>Second</string>
+        <!--
         <key>Capacity</key>
         <string>41</string>
-        <key>StreetAddress</key>
-        <string>2 Infinite Loop\nCupertino, 95014\nCA</string>
+        -->
         <key>ReadProxies</key>
         <array>
-            <string>users:user03</string>
+            <string>user03</string>
         </array>
         <key>WriteProxies</key>
         <array>
-            <string>users:user05</string>
-            <string>users:user06</string>
-            <string>users:user07</string>
+            <string>user05</string>
+            <string>user06</string>
+            <string>user07</string>
         </array>
 </dict>
 </plist>
 """
 
+command_setAddressOnLocation = """<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+        <key>command</key>
+        <string>setLocationAttributes</string>
+        <key>GeneratedUID</key>
+        <string>836B1B66-2E9A-4F46-8B1C-3DD6772C20B2</string>
+        <key>AssociatedAddress</key>
+        <string>C701069D-9CA1-4925-A1A9-5CD94767B74B</string>
+</dict>
+</plist>
+"""
+
+command_removeAddressFromLocation = """<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+        <key>command</key>
+        <string>setLocationAttributes</string>
+        <key>GeneratedUID</key>
+        <string>836B1B66-2E9A-4F46-8B1C-3DD6772C20B2</string>
+        <key>AssociatedAddress</key>
+        <string></string>
+</dict>
+</plist>
+"""
+
+
 command_getLocationAttributes = """<?xml version="1.0" encoding="UTF-8"?>
 <!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
 <plist version="1.0">
@@ -716,7 +847,7 @@
         <string>Updated Address</string>
         <key>StreetAddress</key>
         <string>Updated Street Address</string>
-        <key>Geo</key>
+        <key>GeographicLocation</key>
         <string>Updated Geo</string>
 
 </dict>
@@ -730,8 +861,8 @@
 <dict>
         <key>command</key>
         <string>setResourceAttributes</string>
-        <key>AutoSchedule</key>
-        <false/>
+        <key>AutoScheduleMode</key>
+        <string>acceptIfFree</string>
         <key>GeneratedUID</key>
         <string>AF575A61-CFA6-49E1-A0F6-B5662C9D9801</string>
         <key>RealName</key>

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_principals.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_principals.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_principals.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -17,29 +17,27 @@
 import os
 import sys
 
+from calendarserver.tools.principals import (
+    parseCreationArgs, matchStrings,
+    recordForPrincipalID, getProxies, setProxies
+)
 from twext.python.filepath import CachingFilePath as FilePath
 from twisted.internet import reactor
 from twisted.internet.defer import inlineCallbacks, Deferred, returnValue
-
 from twistedcaldav.config import config
-from twistedcaldav.directory.directory import DirectoryError
-from twistedcaldav.directory import calendaruserproxy
+from twistedcaldav.test.util import (
+    TestCase, StoreTestCase, CapturingProcessProtocol, ErrorOutput
+)
 
-from twistedcaldav.test.util import TestCase, CapturingProcessProtocol, \
-    ErrorOutput
 
-from calendarserver.tap.util import directoryFromConfig
-from calendarserver.tools.principals import (parseCreationArgs, matchStrings,
-    updateRecord, principalForPrincipalID, getProxies, setProxies)
 
-
 class ManagePrincipalsTestCase(TestCase):
 
     def setUp(self):
         super(ManagePrincipalsTestCase, self).setUp()
 
-        # Since this test operates on proxy db, we need to assign the service:
-        calendaruserproxy.ProxyDBService = calendaruserproxy.ProxySqliteDB(os.path.abspath(self.mktemp()))
+        # # Since this test operates on proxy db, we need to assign the service:
+        # calendaruserproxy.ProxyDBService = calendaruserproxy.ProxySqliteDB(os.path.abspath(self.mktemp()))
 
         testRoot = os.path.join(os.path.dirname(__file__), "principals")
         templateName = os.path.join(testRoot, "caldavd.plist")
@@ -49,11 +47,11 @@
 
         databaseRoot = os.path.abspath("_spawned_scripts_db" + str(os.getpid()))
         newConfig = template % {
-            "ServerRoot" : os.path.abspath(config.ServerRoot),
-            "DataRoot" : os.path.abspath(config.DataRoot),
-            "DatabaseRoot" : databaseRoot,
-            "DocumentRoot" : os.path.abspath(config.DocumentRoot),
-            "LogRoot" : os.path.abspath(config.LogRoot),
+            "ServerRoot": os.path.abspath(config.ServerRoot),
+            "DataRoot": os.path.abspath(config.DataRoot),
+            "DatabaseRoot": databaseRoot,
+            "DocumentRoot": os.path.abspath(config.DocumentRoot),
+            "LogRoot": os.path.abspath(config.LogRoot),
         }
         configFilePath = FilePath(os.path.join(config.ConfigRoot, "caldavd.plist"))
         configFilePath.setContent(newConfig)
@@ -61,18 +59,33 @@
         self.configFileName = configFilePath.path
         config.load(self.configFileName)
 
-        origUsersFile = FilePath(os.path.join(os.path.dirname(__file__),
-            "principals", "users-groups.xml"))
+        origUsersFile = FilePath(
+            os.path.join(
+                os.path.dirname(__file__),
+                "principals",
+                "users-groups.xml"
+            )
+        )
         copyUsersFile = FilePath(os.path.join(config.DataRoot, "accounts.xml"))
         origUsersFile.copyTo(copyUsersFile)
 
-        origResourcesFile = FilePath(os.path.join(os.path.dirname(__file__),
-            "principals", "resources-locations.xml"))
+        origResourcesFile = FilePath(
+            os.path.join(
+                os.path.dirname(__file__),
+                "principals",
+                "resources-locations.xml"
+            )
+        )
         copyResourcesFile = FilePath(os.path.join(config.DataRoot, "resources.xml"))
         origResourcesFile.copyTo(copyResourcesFile)
 
-        origAugmentFile = FilePath(os.path.join(os.path.dirname(__file__),
-            "principals", "augments.xml"))
+        origAugmentFile = FilePath(
+            os.path.join(
+                os.path.dirname(__file__),
+                "principals",
+                "augments.xml"
+            )
+        )
         copyAugmentFile = FilePath(os.path.join(config.DataRoot, "augments.xml"))
         origAugmentFile.copyTo(copyAugmentFile)
 
@@ -114,6 +127,7 @@
         self.assertTrue("users" in results)
         self.assertTrue("locations" in results)
         self.assertTrue("resources" in results)
+        self.assertTrue("addresses" in results)
 
 
     @inlineCallbacks
@@ -133,28 +147,36 @@
 
     @inlineCallbacks
     def test_addRemove(self):
-        results = yield self.runCommand("--add", "resources", "New Resource",
-            "newresource", "edaa6ae6-011b-4d89-ace3-6b688cdd91d9")
+        results = yield self.runCommand(
+            "--add", "resources",
+            "New Resource", "newresource", "newresourceuid"
+        )
         self.assertTrue("Added 'New Resource'" in results)
 
-        results = yield self.runCommand("--get-auto-schedule",
-            "resources:newresource")
-        self.assertTrue(results.startswith('Auto-schedule for "New Resource" (resources:newresource) is true'))
+        results = yield self.runCommand(
+            "--get-auto-schedule-mode",
+            "resources:newresource"
+        )
+        self.assertTrue(
+            results.startswith(
+                'Auto-schedule mode for "New Resource" newresourceuid (resource) newresource is accept if free, decline if busy'
+            )
+        )
 
-        results = yield self.runCommand("--get-auto-schedule-mode",
-            "resources:newresource")
-        self.assertTrue(results.startswith('Auto-schedule mode for "New Resource" (resources:newresource) is default'))
-
         results = yield self.runCommand("--list-principals=resources")
         self.assertTrue("newresource" in results)
 
-        results = yield self.runCommand("--add", "resources", "New Resource",
-            "newresource1", "edaa6ae6-011b-4d89-ace3-6b688cdd91d9")
-        self.assertTrue("Duplicate guid" in results)
+        results = yield self.runCommand(
+            "--add", "resources", "New Resource",
+            "newresource1", "newresourceuid"
+        )
+        self.assertTrue("UID already in use: newresourceuid" in results)
 
-        results = yield self.runCommand("--add", "resources", "New Resource",
-            "newresource", "fdaa6ae6-011b-4d89-ace3-6b688cdd91d9")
-        self.assertTrue("Duplicate shortName" in results)
+        results = yield self.runCommand(
+            "--add", "resources", "New Resource",
+            "newresource", "uniqueuid"
+        )
+        self.assertTrue("Record name already in use" in results)
 
         results = yield self.runCommand("--remove", "resources:newresource")
         self.assertTrue("Removed 'New Resource'" in results)
@@ -165,29 +187,13 @@
 
     def test_parseCreationArgs(self):
 
-        self.assertEquals(("full name", None, None),
-            parseCreationArgs(("full name",)))
-
-        self.assertEquals(("full name", "short name", None),
-            parseCreationArgs(("full name", "short name")))
-
-        guid = "02C3DE93-E655-4856-47B76B8BB1A7BDCE"
-
-        self.assertEquals(("full name", "short name", guid),
-            parseCreationArgs(("full name", "short name", guid)))
-
-        self.assertEquals(("full name", "short name", guid),
-            parseCreationArgs(("full name", guid, "short name")))
-
-        self.assertEquals(("full name", None, guid),
-            parseCreationArgs(("full name", guid)))
-
-        self.assertRaises(
-            ValueError,
-            parseCreationArgs, ("full name", "non guid", "non guid")
+        self.assertEquals(
+            ("full name", "short name", "uid"),
+            parseCreationArgs(("full name", "short name", "uid"))
         )
 
 
+
     def test_matchStrings(self):
         self.assertEquals("abc", matchStrings("a", ("abc", "def")))
         self.assertEquals("def", matchStrings("de", ("abc", "def")))
@@ -199,161 +205,126 @@
 
     @inlineCallbacks
     def test_modifyWriteProxies(self):
-        results = yield self.runCommand("--add-write-proxy=users:user01",
-            "locations:location01")
-        self.assertTrue(results.startswith('Added "Test User 01" (users:user01) as a write proxy for "Room 01" (locations:location01)'))
+        results = yield self.runCommand(
+            "--add-write-proxy=users:user01", "locations:location01"
+        )
+        self.assertTrue(
+            results.startswith('Added "User 01" user01 (user) user01 as a write proxy for "Room 01" location01 (location) location01')
+        )
 
-        results = yield self.runCommand("--list-write-proxies",
-            "locations:location01")
-        self.assertTrue("Test User 01" in results)
+        results = yield self.runCommand(
+            "--list-write-proxies", "locations:location01"
+        )
+        self.assertTrue("User 01" in results)
 
-        results = yield self.runCommand("--remove-proxy=users:user01",
-            "locations:location01")
+        results = yield self.runCommand(
+            "--remove-proxy=users:user01", "locations:location01"
+        )
 
-        results = yield self.runCommand("--list-write-proxies",
-            "locations:location01")
-        self.assertTrue('No write proxies for "Room 01" (locations:location01)' in results)
+        results = yield self.runCommand(
+            "--list-write-proxies", "locations:location01"
+        )
+        self.assertTrue(
+            'No write proxies for "Room 01" location01 (location) location01' in results
+        )
 
 
     @inlineCallbacks
     def test_modifyReadProxies(self):
-        results = yield self.runCommand("--add-read-proxy=users:user01",
-            "locations:location01")
-        self.assertTrue(results.startswith('Added "Test User 01" (users:user01) as a read proxy for "Room 01" (locations:location01)'))
+        results = yield self.runCommand(
+            "--add-read-proxy=users:user01", "locations:location01"
+        )
+        self.assertTrue(
+            results.startswith('Added "User 01" user01 (user) user01 as a read proxy for "Room 01" location01 (location) location01')
+        )
 
-        results = yield self.runCommand("--list-read-proxies",
-            "locations:location01")
-        self.assertTrue("Test User 01" in results)
+        results = yield self.runCommand(
+            "--list-read-proxies", "locations:location01"
+        )
+        self.assertTrue("User 01" in results)
 
-        results = yield self.runCommand("--remove-proxy=users:user01",
-            "locations:location01")
+        results = yield self.runCommand(
+            "--remove-proxy=users:user01", "locations:location01"
+        )
 
-        results = yield self.runCommand("--list-read-proxies",
-            "locations:location01")
-        self.assertTrue('No read proxies for "Room 01" (locations:location01)' in results)
+        results = yield self.runCommand(
+            "--list-read-proxies", "locations:location01"
+        )
+        self.assertTrue(
+            'No read proxies for "Room 01" location01 (location) location01' in results
+        )
 
 
     @inlineCallbacks
-    def test_autoSchedule(self):
-        results = yield self.runCommand("--get-auto-schedule",
-            "locations:location01")
-        self.assertTrue(results.startswith('Auto-schedule for "Room 01" (locations:location01) is false'))
-
-        results = yield self.runCommand("--set-auto-schedule=true",
-            "locations:location01")
-        self.assertTrue(results.startswith('Setting auto-schedule to true for "Room 01" (locations:location01)'))
-
-        results = yield self.runCommand("--get-auto-schedule",
-            "locations:location01")
-        self.assertTrue(results.startswith('Auto-schedule for "Room 01" (locations:location01) is true'))
-
-        results = yield self.runCommand("--set-auto-schedule=true",
-            "users:user01")
-        self.assertTrue(results.startswith('Enabling auto-schedule for (users)user01 is not allowed.'))
-
-
-    @inlineCallbacks
     def test_autoScheduleMode(self):
-        results = yield self.runCommand("--get-auto-schedule-mode",
-            "locations:location01")
-        self.assertTrue(results.startswith('Auto-schedule mode for "Room 01" (locations:location01) is default'))
+        results = yield self.runCommand(
+            "--get-auto-schedule-mode", "locations:location01"
+        )
+        self.assertTrue(
+            results.startswith('Auto-schedule mode for "Room 01" location01 (location) location01 is accept if free, decline if busy')
+        )
 
-        results = yield self.runCommand("--set-auto-schedule-mode=accept-if-free",
-            "locations:location01")
-        self.assertTrue(results.startswith('Setting auto-schedule mode to accept-if-free for "Room 01" (locations:location01)'))
+        results = yield self.runCommand(
+            "--set-auto-schedule-mode=accept-if-free", "locations:location01"
+        )
+        self.assertTrue(
+            results.startswith('Setting auto-schedule-mode to accept if free for "Room 01" location01 (location) location01')
+        )
 
-        results = yield self.runCommand("--get-auto-schedule-mode",
-            "locations:location01")
-        self.assertTrue(results.startswith('Auto-schedule mode for "Room 01" (locations:location01) is accept-if-free'))
+        results = yield self.runCommand(
+            "--get-auto-schedule-mode",
+            "locations:location01"
+        )
+        self.assertTrue(
+            results.startswith('Auto-schedule mode for "Room 01" location01 (location) location01 is accept if free')
+        )
 
-        results = yield self.runCommand("--set-auto-schedule-mode=decline-if-busy",
-            "users:user01")
-        self.assertTrue(results.startswith('Setting auto-schedule mode for (users)user01 is not allowed.'))
+        results = yield self.runCommand(
+            "--set-auto-schedule-mode=decline-if-busy", "users:user01"
+        )
+        self.assertTrue(results.startswith('Setting auto-schedule-mode for "User 01" user01 (user) user01 is not allowed.'))
 
         try:
-            results = yield self.runCommand("--set-auto-schedule-mode=bogus",
-                "users:user01")
+            results = yield self.runCommand(
+                "--set-auto-schedule-mode=bogus",
+                "users:user01"
+            )
         except ErrorOutput:
             pass
         else:
             self.fail("Expected command failure")
 
 
-    @inlineCallbacks
-    def test_updateRecord(self):
-        directory = directoryFromConfig(config)
-        guid = "EEE28807-A8C5-46C8-A558-A08281C558A7"
 
-        (yield updateRecord(True, directory, "locations",
-            guid=guid, fullName="Test Location", shortNames=["testlocation", ],)
-        )
-        try:
-            (yield updateRecord(True, directory, "locations",
-                guid=guid, fullName="Test Location", shortNames=["testlocation", ],)
-            )
-        except DirectoryError:
-            # We're expecting an error for trying to create a record with
-            # an existing GUID
-            pass
-        else:
-            raise self.failureException("Duplicate guid expected")
+class SetProxiesTestCase(StoreTestCase):
 
-        record = directory.recordWithGUID(guid)
-        self.assertTrue(record is not None)
-        self.assertEquals(record.fullName, "Test Location")
-        self.assertTrue(record.autoSchedule)
-
-        (yield updateRecord(False, directory, "locations",
-            guid=guid, fullName="Changed", shortNames=["testlocation", ],)
-        )
-        record = directory.recordWithGUID(guid)
-        self.assertTrue(record is not None)
-        self.assertEquals(record.fullName, "Changed")
-
-        directory.destroyRecord("locations", guid=guid)
-        record = directory.recordWithGUID(guid)
-        self.assertTrue(record is None)
-
-        # Create a user, change autoSchedule
-        guid = "F0DE73A8-39D4-4830-8D32-1FA03ABA3470"
-        (yield updateRecord(True, directory, "users",
-            guid=guid, fullName="Test User", shortNames=["testuser", ],
-            autoSchedule=True)
-        )
-        record = directory.recordWithGUID(guid)
-        self.assertTrue(record is not None)
-        self.assertEquals(record.fullName, "Test User")
-        self.assertTrue(record.autoSchedule)
-
-        (yield updateRecord(False, directory, "users",
-            guid=guid, fullName="Test User", shortNames=["testuser", ],
-            autoSchedule=False)
-        )
-        record = directory.recordWithGUID(guid)
-        self.assertTrue(record is not None)
-        self.assertEquals(record.fullName, "Test User")
-        self.assertFalse(record.autoSchedule)
-
-
     @inlineCallbacks
     def test_setProxies(self):
         """
         Read and Write proxies can be set en masse
         """
-        directory = directoryFromConfig(config)
+        directory = self.directory
+        record = yield recordForPrincipalID(directory, "users:user01")
 
-        principal = principalForPrincipalID("users:user01", directory=directory)
-        readProxies, writeProxies = (yield getProxies(principal, directory=directory))
-        self.assertEquals(readProxies, []) # initially empty
-        self.assertEquals(writeProxies, []) # initially empty
+        readProxies, writeProxies = yield getProxies(record)
+        self.assertEquals(readProxies, [])  # initially empty
+        self.assertEquals(writeProxies, [])  # initially empty
 
-        (yield setProxies(None, principal, ["users:user03", "users:user04"], ["users:user05"], directory=directory))
-        readProxies, writeProxies = (yield getProxies(principal, directory=directory))
-        self.assertEquals(set(readProxies), set(["user03", "user04"]))
-        self.assertEquals(set(writeProxies), set(["user05"]))
+        readProxies = [
+            (yield recordForPrincipalID(directory, "users:user03")),
+            (yield recordForPrincipalID(directory, "users:user04")),
+        ]
+        writeProxies = [
+            (yield recordForPrincipalID(directory, "users:user05")),
+        ]
+        yield setProxies(record, readProxies, writeProxies)
 
+        readProxies, writeProxies = yield getProxies(record)
+        self.assertEquals(set([r.uid for r in readProxies]), set(["user03", "user04"]))
+        self.assertEquals(set([r.uid for r in writeProxies]), set(["user05"]))
+
         # Using None for a proxy list indicates a no-op
-        (yield setProxies(None, principal, [], None, directory=directory))
-        readProxies, writeProxies = (yield getProxies(principal, directory=directory))
-        self.assertEquals(readProxies, []) # now empty
-        self.assertEquals(set(writeProxies), set(["user05"])) # unchanged
+        yield setProxies(record, [], None)
+        readProxies, writeProxies = yield getProxies(record)
+        self.assertEquals(readProxies, [])  # now empty
+        self.assertEquals(set([r.uid for r in writeProxies]), set(["user05"]))  # unchanged

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_purge.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_purge.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_purge.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -17,7 +17,6 @@
 
 from calendarserver.tools.purge import PurgePrincipalService
 
-from twistedcaldav.config import config
 from twistedcaldav.ical import Component
 from twistedcaldav.test.util import StoreTestCase
 
@@ -30,7 +29,6 @@
 
 from txweb2.http_headers import MimeType
 
-import os
 
 
 future = DateTime.getNowUTC()
@@ -770,7 +768,7 @@
 DTSTART;TZID=US/Pacific:20100304T120000
 DTSTAMP:20100303T195203Z
 SEQUENCE:2
-X-APPLE-DROPBOX:/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/dropbox/F2F14D94-B944-43D9-8F6F-97F95B2764CA.dropbox
+X-APPLE-DROPBOX:/calendars/__uids__/C76DB741-5A2A-4239-8112-10CF152AFCA4/dropbox/F2F14D94-B944-43D9-8F6F-97F95B2764CA.dropbox
 END:VEVENT
 END:VCALENDAR
 """.replace("\n", "\r\n")
@@ -800,8 +798,8 @@
     """
     Tests for purging the data belonging to a given principal
     """
-    uid = "6423F94A-6B76-4A3A-815B-D52CFD77935D"
-    uid2 = "37DB0C90-4DB1-4932-BC69-3DAB66F374F5"
+    uid = "C76DB741-5A2A-4239-8112-10CF152AFCA4"
+    uid2 = "FFED7B62-2E08-496E-BD32-B2F95FFDDB6B"
 
     metadata = {
         "accessMode": "PUBLIC",
@@ -832,48 +830,35 @@
 
         # Add attachment to attachment.ics
         self._sqlCalendarStore._dropbox_ok = True
-        home = (yield txn.calendarHomeWithUID(self.uid))
-        calendar = (yield home.calendarWithName("calendar1"))
-        event = (yield calendar.calendarObjectWithName("attachment.ics"))
-        attachment = (yield event.createAttachmentWithName("attachment.txt"))
+        home = yield txn.calendarHomeWithUID(self.uid)
+        calendar = yield home.calendarWithName("calendar1")
+        event = yield calendar.calendarObjectWithName("attachment.ics")
+        attachment = yield event.createAttachmentWithName("attachment.txt")
         t = attachment.store(MimeType("text", "x-fixture"))
         t.write("attachment")
         t.write(" text")
-        (yield t.loseConnection())
+        yield t.loseConnection()
         self._sqlCalendarStore._dropbox_ok = False
 
         # Share calendars each way
-        home2 = (yield txn.calendarHomeWithUID(self.uid2))
-        calendar2 = (yield home2.calendarWithName("calendar2"))
-        self.sharedName = (yield calendar2.shareWith(home, _BIND_MODE_WRITE))
-        self.sharedName2 = (yield calendar.shareWith(home2, _BIND_MODE_WRITE))
+        home2 = yield txn.calendarHomeWithUID(self.uid2)
+        calendar2 = yield home2.calendarWithName("calendar2")
+        self.sharedName = yield calendar2.shareWith(home, _BIND_MODE_WRITE)
+        self.sharedName2 = yield calendar.shareWith(home2, _BIND_MODE_WRITE)
 
-        (yield txn.commit())
+        yield txn.commit()
 
         txn = self._sqlCalendarStore.newTransaction()
-        home = (yield txn.calendarHomeWithUID(self.uid))
-        calendar2 = (yield home.childWithName(self.sharedName))
+        home = yield txn.calendarHomeWithUID(self.uid)
+        calendar2 = yield home.childWithName(self.sharedName)
         self.assertNotEquals(calendar2, None)
-        home2 = (yield txn.calendarHomeWithUID(self.uid2))
-        calendar1 = (yield home2.childWithName(self.sharedName2))
+        home2 = yield txn.calendarHomeWithUID(self.uid2)
+        calendar1 = yield home2.childWithName(self.sharedName2)
         self.assertNotEquals(calendar1, None)
-        (yield txn.commit())
+        yield txn.commit()
 
 
-    def configure(self):
-        super(PurgePrincipalTests, self).configure()
-        self.patch(config.DirectoryService.params, "xmlFile",
-            os.path.join(
-                os.path.dirname(__file__), "purge", "accounts.xml"
-            )
-        )
-        self.patch(config.ResourceService.params, "xmlFile",
-            os.path.join(
-                os.path.dirname(__file__), "purge", "resources.xml"
-            )
-        )
 
-
     @inlineCallbacks
     def populate(self):
         yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
@@ -888,32 +873,39 @@
 
         # Now you see it
         txn = self._sqlCalendarStore.newTransaction()
-        home = (yield txn.calendarHomeWithUID(self.uid))
+        home = yield txn.calendarHomeWithUID(self.uid)
         self.assertNotEquals(home, None)
-        (yield txn.commit())
+        yield txn.commit()
 
-        count, ignored = (yield PurgePrincipalService.purgeUIDs(self.storeUnderTest(), self.directory,
+        count = (yield PurgePrincipalService.purgeUIDs(self.storeUnderTest(), self.directory,
             self.rootResource, (self.uid,), verbose=False, proxies=False, completely=True))
         self.assertEquals(count, 2) # 2 events
 
         # Now you don't
         txn = self._sqlCalendarStore.newTransaction()
-        home = (yield txn.calendarHomeWithUID(self.uid))
+        home = yield txn.calendarHomeWithUID(self.uid)
         self.assertEquals(home, None)
         # Verify calendar1 was unshared to uid2
-        home2 = (yield txn.calendarHomeWithUID(self.uid2))
+        home2 = yield txn.calendarHomeWithUID(self.uid2)
         self.assertEquals((yield home2.childWithName(self.sharedName)), None)
-        (yield txn.commit())
+        yield txn.commit()
 
-        count, ignored = (yield PurgePrincipalService.purgeUIDs(self.storeUnderTest(), self.directory,
-            self.rootResource, (self.uid,), verbose=False, proxies=False, completely=True))
+        count = yield PurgePrincipalService.purgeUIDs(
+            self.storeUnderTest(),
+            self.directory,
+            self.rootResource,
+            (self.uid,),
+            verbose=False,
+            proxies=False,
+            completely=True
+        )
         self.assertEquals(count, 0)
 
         # And you still don't (making sure it's not provisioned)
         txn = self._sqlCalendarStore.newTransaction()
-        home = (yield txn.calendarHomeWithUID(self.uid))
+        home = yield txn.calendarHomeWithUID(self.uid)
         self.assertEquals(home, None)
-        (yield txn.commit())
+        yield txn.commit()
 
 
     @inlineCallbacks
@@ -928,11 +920,11 @@
         txn = self._sqlCalendarStore.newTransaction()
         home = (yield txn.calendarHomeWithUID(self.uid))
         self.assertNotEquals(home, None)
-        (yield txn.commit())
+        yield txn.commit()
 
-        count, ignored = (yield PurgePrincipalService.purgeUIDs(self.storeUnderTest(), self.directory,
+        count = (yield PurgePrincipalService.purgeUIDs(self.storeUnderTest(), self.directory,
             self.rootResource, (self.uid,), verbose=False, proxies=False, completely=False))
-        self.assertEquals(count, 1) # 2 events
+        self.assertEquals(count, 1) # 1 event
 
         # Now you still see it
         txn = self._sqlCalendarStore.newTransaction()
@@ -941,14 +933,14 @@
         # Verify calendar1 was unshared to uid2
         home2 = (yield txn.calendarHomeWithUID(self.uid2))
         self.assertEquals((yield home2.childWithName(self.sharedName)), None)
-        (yield txn.commit())
+        yield txn.commit()
 
-        count, ignored = (yield PurgePrincipalService.purgeUIDs(self.storeUnderTest(), self.directory,
-            self.rootResource, (self.uid,), verbose=False, proxies=False, completely=False))
+        count = yield PurgePrincipalService.purgeUIDs(self.storeUnderTest(), self.directory,
+            self.rootResource, (self.uid,), verbose=False, proxies=False, completely=False)
         self.assertEquals(count, 1)
 
         # And you still do
         txn = self._sqlCalendarStore.newTransaction()
         home = (yield txn.calendarHomeWithUID(self.uid))
         self.assertNotEquals(home, None)
-        (yield txn.commit())
+        yield txn.commit()

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_purge_old_events.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_purge_old_events.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_purge_old_events.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -18,25 +18,22 @@
 Tests for calendarserver.tools.purge
 """
 
-from calendarserver.tools.purge import PurgeOldEventsService, PurgeAttachmentsService, \
-    PurgePrincipalService
+import os
 
+from calendarserver.tools.purge import (
+    PurgeOldEventsService, PurgeAttachmentsService, PurgePrincipalService
+)
 from pycalendar.datetime import DateTime
 from pycalendar.timezone import Timezone
-
 from twext.enterprise.dal.syntax import Update, Delete
-from txweb2.http_headers import MimeType
-
 from twisted.internet.defer import inlineCallbacks, returnValue
-
 from twistedcaldav.config import config
 from twistedcaldav.test.util import StoreTestCase
 from twistedcaldav.vcard import Component as VCardComponent
-
 from txdav.common.datastore.sql_tables import schema
 from txdav.common.datastore.test.util import populateCalendarsFrom
+from txweb2.http_headers import MimeType
 
-import os
 
 
 now = DateTime.getToday().getYear()
@@ -415,16 +412,16 @@
         # Turn off delayed indexing option so we can have some useful tests
         self.patch(config, "FreeBusyIndexDelayedExpand", False)
 
-        self.patch(config.DirectoryService.params, "xmlFile",
-            os.path.join(
-                os.path.dirname(__file__), "purge", "accounts.xml"
-            )
-        )
-        self.patch(config.ResourceService.params, "xmlFile",
-            os.path.join(
-                os.path.dirname(__file__), "purge", "resources.xml"
-            )
-        )
+        # self.patch(config.DirectoryService.params, "xmlFile",
+        #     os.path.join(
+        #         os.path.dirname(__file__), "purge", "accounts.xml"
+        #     )
+        # )
+        # self.patch(config.ResourceService.params, "xmlFile",
+        #     os.path.join(
+        #         os.path.dirname(__file__), "purge", "resources.xml"
+        #     )
+        # )
 
 
     @inlineCallbacks
@@ -679,9 +676,9 @@
         (yield txn.commit())
 
         # Purge home1
-        total, ignored = (yield PurgePrincipalService.purgeUIDs(self._sqlCalendarStore, self.directory,
+        total = yield PurgePrincipalService.purgeUIDs(self._sqlCalendarStore, self.directory,
             self.rootResource, ("home1",), verbose=False, proxies=False,
-            when=DateTime(now, 4, 1, 12, 0, 0, 0, Timezone(utc=True))))
+            when=DateTime(now, 4, 1, 12, 0, 0, 0, Timezone(utc=True)))
 
         # 4 items deleted: 3 events and 1 vcard
         self.assertEquals(total, 4)
@@ -716,8 +713,8 @@
         (yield txn.commit())
 
         # Purge home1 completely
-        total, ignored = (yield PurgePrincipalService.purgeUIDs(self._sqlCalendarStore, self.directory,
-            self.rootResource, ("home1",), verbose=False, proxies=False, completely=True))
+        total = yield PurgePrincipalService.purgeUIDs(self._sqlCalendarStore, self.directory,
+            self.rootResource, ("home1",), verbose=False, proxies=False, completely=True)
 
         # 9 items deleted: 8 events and 1 vcard
         self.assertEquals(total, 9)

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_resources.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_resources.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/test/test_resources.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -20,19 +20,18 @@
     from twisted.internet.defer import inlineCallbacks, succeed
     from twistedcaldav.directory.directory import DirectoryService
     from twistedcaldav.test.util import TestCase
-    import dsattributes
-    strGUID = dsattributes.kDS1AttrGeneratedUID
-    strName = dsattributes.kDS1AttrDistinguishedName
-    RUN_TESTS = True
+    strGUID = "dsAttrTypeStandard:GeneratedUID"
+    strName = "dsAttrTypeStandard:RealName"
+
 except ImportError:
-    RUN_TESTS = False
+    pass
 
-
-
-if RUN_TESTS:
+else:
     class StubDirectoryRecord(object):
 
-        def __init__(self, recordType, guid=None, shortNames=None, fullName=None):
+        def __init__(
+            self, recordType, guid=None, shortNames=None, fullName=None
+        ):
             self.recordType = recordType
             self.guid = guid
             self.shortNames = shortNames
@@ -51,13 +50,16 @@
         def createRecords(self, data):
             for recordType, recordData in data:
                 guid = recordData["guid"]
-                record = StubDirectoryRecord(recordType, guid=guid,
-                    shortNames=recordData['shortNames'],
-                    fullName=recordData['fullName'])
+                record = StubDirectoryRecord(
+                    recordType, guid=guid,
+                    shortNames=recordData["shortNames"],
+                    fullName=recordData["fullName"]
+                )
                 self.records[guid] = record
 
-        def updateRecord(self, recordType, guid=None, shortNames=None,
-            fullName=None):
+        def updateRecord(
+            self, recordType, guid=None, shortNames=None, fullName=None
+        ):
             pass
 
 
@@ -92,35 +94,46 @@
         def test_migrateResources(self):
 
             data = {
-                    dsattributes.kDSStdRecordTypeResources :
-                    [
-                        ['projector1', {
-                            strGUID : '6C99E240-E915-4012-82FA-99E0F638D7EF',
-                            strName : 'Projector 1'
-                        }],
-                        ['projector2', {
-                            strGUID : '7C99E240-E915-4012-82FA-99E0F638D7EF',
-                            strName : 'Projector 2'
-                        }],
-                    ],
-                    dsattributes.kDSStdRecordTypePlaces :
-                    [
-                        ['office1', {
-                            strGUID : '8C99E240-E915-4012-82FA-99E0F638D7EF',
-                            strName : 'Office 1'
-                        }],
-                    ],
-                }
+                "dsRecTypeStandard:Resources":
+                [
+                    ["projector1", {
+                        strGUID: "6C99E240-E915-4012-82FA-99E0F638D7EF",
+                        strName: "Projector 1"
+                    }],
+                    ["projector2", {
+                        strGUID: "7C99E240-E915-4012-82FA-99E0F638D7EF",
+                        strName: "Projector 2"
+                    }],
+                ],
+                "dsRecTypeStandard:Places":
+                [
+                    ["office1", {
+                        strGUID: "8C99E240-E915-4012-82FA-99E0F638D7EF",
+                        strName: "Office 1"
+                    }],
+                ],
+            }
 
             def queryMethod(sourceService, recordType, verbose=False):
                 return data[recordType]
 
             directoryService = StubDirectoryService(StubAugmentService())
-            yield migrateResources(None, directoryService, queryMethod=queryMethod)
+            yield migrateResources(
+                None, directoryService, queryMethod=queryMethod
+            )
             for guid, recordType in (
-                ('6C99E240-E915-4012-82FA-99E0F638D7EF', DirectoryService.recordType_resources),
-                ('7C99E240-E915-4012-82FA-99E0F638D7EF', DirectoryService.recordType_resources),
-                ('8C99E240-E915-4012-82FA-99E0F638D7EF', DirectoryService.recordType_locations),
+                (
+                    "6C99E240-E915-4012-82FA-99E0F638D7EF",
+                    DirectoryService.recordType_resources
+                ),
+                (
+                    "7C99E240-E915-4012-82FA-99E0F638D7EF",
+                    DirectoryService.recordType_resources
+                ),
+                (
+                    "8C99E240-E915-4012-82FA-99E0F638D7EF",
+                    DirectoryService.recordType_locations
+                ),
             ):
                 self.assertTrue(guid in directoryService.records)
                 record = directoryService.records[guid]
@@ -131,27 +144,44 @@
             #
             # Add more to OD and re-migrate
             #
-            data[dsattributes.kDSStdRecordTypeResources].append(
-                ['projector3', {
-                    strGUID : '9C99E240-E915-4012-82FA-99E0F638D7EF',
-                    strName : 'Projector 3'
+            data["dsRecTypeStandard:Resources"].append(
+                ["projector3", {
+                    strGUID: "9C99E240-E915-4012-82FA-99E0F638D7EF",
+                    strName: "Projector 3"
                 }]
             )
-            data[dsattributes.kDSStdRecordTypePlaces].append(
-                ['office2', {
-                    strGUID : 'AC99E240-E915-4012-82FA-99E0F638D7EF',
-                    strName : 'Office 2'
+            data["dsRecTypeStandard:Places"].append(
+                ["office2", {
+                    strGUID: "AC99E240-E915-4012-82FA-99E0F638D7EF",
+                    strName: "Office 2"
                 }]
             )
 
-            yield migrateResources(None, directoryService, queryMethod=queryMethod)
+            yield migrateResources(
+                None, directoryService, queryMethod=queryMethod
+            )
 
             for guid, recordType in (
-                ('6C99E240-E915-4012-82FA-99E0F638D7EF', DirectoryService.recordType_resources),
-                ('7C99E240-E915-4012-82FA-99E0F638D7EF', DirectoryService.recordType_resources),
-                ('9C99E240-E915-4012-82FA-99E0F638D7EF', DirectoryService.recordType_resources),
-                ('8C99E240-E915-4012-82FA-99E0F638D7EF', DirectoryService.recordType_locations),
-                ('AC99E240-E915-4012-82FA-99E0F638D7EF', DirectoryService.recordType_locations),
+                (
+                    "6C99E240-E915-4012-82FA-99E0F638D7EF",
+                    DirectoryService.recordType_resources
+                ),
+                (
+                    "7C99E240-E915-4012-82FA-99E0F638D7EF",
+                    DirectoryService.recordType_resources
+                ),
+                (
+                    "9C99E240-E915-4012-82FA-99E0F638D7EF",
+                    DirectoryService.recordType_resources
+                ),
+                (
+                    "8C99E240-E915-4012-82FA-99E0F638D7EF",
+                    DirectoryService.recordType_locations
+                ),
+                (
+                    "AC99E240-E915-4012-82FA-99E0F638D7EF",
+                    DirectoryService.recordType_locations
+                ),
             ):
                 self.assertTrue(guid in directoryService.records)
                 record = directoryService.records[guid]

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/util.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/util.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/tools/util.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -20,8 +20,6 @@
 
 __all__ = [
     "loadConfig",
-    "getDirectory",
-    "dummyDirectoryRecord",
     "UsageError",
     "booleanArgument",
 ]
@@ -37,26 +35,19 @@
 from twistedcaldav.stdconfig import DEFAULT_CONFIG_FILE
 
 
-from twisted.python.filepath import FilePath
-from twisted.python.reflect import namedClass
 from twext.python.log import Logger
 from twisted.internet.defer import inlineCallbacks, returnValue
 
 from txdav.xml import element as davxml
 
-from calendarserver.provision.root import RootResource
 
 from twistedcaldav import memcachepool
-from twistedcaldav.directory import calendaruserproxy
-from twistedcaldav.directory.aggregate import AggregateDirectoryService
-from twistedcaldav.directory.directory import DirectoryService, DirectoryRecord
 from txdav.who.groups import schedulePolledGroupCachingUpdate
-from calendarserver.push.notifier import NotifierFactory
 
-from txdav.common.datastore.file import CommonDataStore
 
 log = Logger()
 
+
 def loadConfig(configFileName):
     """
     Helper method for command-line utilities to load configuration plist
@@ -78,145 +69,145 @@
 
 
 
-def getDirectory(config=config):
+# def getDirectory(config=config):
 
-    class MyDirectoryService (AggregateDirectoryService):
-        def getPrincipalCollection(self):
-            if not hasattr(self, "_principalCollection"):
+#     class MyDirectoryService (AggregateDirectoryService):
+#         def getPrincipalCollection(self):
+#             if not hasattr(self, "_principalCollection"):
 
-                if config.Notifications.Enabled:
-                    # FIXME: NotifierFactory needs reference to the store in order
-                    # to get a txn in order to create a Work item
-                    notifierFactory = NotifierFactory(
-                        None, config.ServerHostName,
-                        config.Notifications.CoalesceSeconds,
-                    )
-                else:
-                    notifierFactory = None
+#                 if config.Notifications.Enabled:
+#                     # FIXME: NotifierFactory needs reference to the store in order
+#                     # to get a txn in order to create a Work item
+#                     notifierFactory = NotifierFactory(
+#                         None, config.ServerHostName,
+#                         config.Notifications.CoalesceSeconds,
+#                     )
+#                 else:
+#                     notifierFactory = None
 
-                # Need a data store
-                _newStore = CommonDataStore(FilePath(config.DocumentRoot),
-                    notifierFactory, self, True, False)
-                if notifierFactory is not None:
-                    notifierFactory.store = _newStore
+#                 # Need a data store
+#                 _newStore = CommonDataStore(FilePath(config.DocumentRoot),
+#                     notifierFactory, self, True, False)
+#                 if notifierFactory is not None:
+#                     notifierFactory.store = _newStore
 
-                #
-                # Instantiating a DirectoryCalendarHomeProvisioningResource with a directory
-                # will register it with the directory (still smells like a hack).
-                #
-                # We need that in order to locate calendar homes via the directory.
-                #
-                from twistedcaldav.directory.calendar import DirectoryCalendarHomeProvisioningResource
-                DirectoryCalendarHomeProvisioningResource(self, "/calendars/", _newStore)
+#                 #
+#                 # Instantiating a DirectoryCalendarHomeProvisioningResource with a directory
+#                 # will register it with the directory (still smells like a hack).
+#                 #
+#                 # We need that in order to locate calendar homes via the directory.
+#                 #
+#                 from twistedcaldav.directory.calendar import DirectoryCalendarHomeProvisioningResource
+#                 DirectoryCalendarHomeProvisioningResource(self, "/calendars/", _newStore)
 
-                from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
-                self._principalCollection = DirectoryPrincipalProvisioningResource("/principals/", self)
+#                 from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
+#                 self._principalCollection = DirectoryPrincipalProvisioningResource("/principals/", self)
 
-            return self._principalCollection
+#             return self._principalCollection
 
-        def setPrincipalCollection(self, coll):
-            # See principal.py line 237:  self.directory.principalCollection = self
-            pass
+#         def setPrincipalCollection(self, coll):
+#             # See principal.py line 237:  self.directory.principalCollection = self
+#             pass
 
-        principalCollection = property(getPrincipalCollection, setPrincipalCollection)
+#         principalCollection = property(getPrincipalCollection, setPrincipalCollection)
 
-        def calendarHomeForRecord(self, record):
-            principal = self.principalCollection.principalForRecord(record)
-            if principal:
-                try:
-                    return principal.calendarHome()
-                except AttributeError:
-                    pass
-            return None
+#         def calendarHomeForRecord(self, record):
+#             principal = self.principalCollection.principalForRecord(record)
+#             if principal:
+#                 try:
+#                     return principal.calendarHome()
+#                 except AttributeError:
+#                     pass
+#             return None
 
-        def calendarHomeForShortName(self, recordType, shortName):
-            principal = self.principalCollection.principalForShortName(recordType, shortName)
-            if principal:
-                return principal.calendarHome()
-            return None
+#         def calendarHomeForShortName(self, recordType, shortName):
+#             principal = self.principalCollection.principalForShortName(recordType, shortName)
+#             if principal:
+#                 return principal.calendarHome()
+#             return None
 
-        def principalForCalendarUserAddress(self, cua):
-            return self.principalCollection.principalForCalendarUserAddress(cua)
+#         def principalForCalendarUserAddress(self, cua):
+#             return self.principalCollection.principalForCalendarUserAddress(cua)
 
-        def principalForUID(self, uid):
-            return self.principalCollection.principalForUID(uid)
+#         def principalForUID(self, uid):
+#             return self.principalCollection.principalForUID(uid)
 
-    # Load augment/proxy db classes now
-    if config.AugmentService.type:
-        augmentClass = namedClass(config.AugmentService.type)
-        augmentService = augmentClass(**config.AugmentService.params)
-    else:
-        augmentService = None
+#     # Load augment/proxy db classes now
+#     if config.AugmentService.type:
+#         augmentClass = namedClass(config.AugmentService.type)
+#         augmentService = augmentClass(**config.AugmentService.params)
+#     else:
+#         augmentService = None
 
-    proxydbClass = namedClass(config.ProxyDBService.type)
-    calendaruserproxy.ProxyDBService = proxydbClass(**config.ProxyDBService.params)
+#     proxydbClass = namedClass(config.ProxyDBService.type)
+#     calendaruserproxy.ProxyDBService = proxydbClass(**config.ProxyDBService.params)
 
-    # Wait for directory service to become available
-    BaseDirectoryService = namedClass(config.DirectoryService.type)
-    config.DirectoryService.params.augmentService = augmentService
-    directory = BaseDirectoryService(config.DirectoryService.params)
-    while not directory.isAvailable():
-        sleep(5)
+#     # Wait for directory service to become available
+#     BaseDirectoryService = namedClass(config.DirectoryService.type)
+#     config.DirectoryService.params.augmentService = augmentService
+#     directory = BaseDirectoryService(config.DirectoryService.params)
+#     while not directory.isAvailable():
+#         sleep(5)
 
-    directories = [directory]
+#     directories = [directory]
 
-    if config.ResourceService.Enabled:
-        resourceClass = namedClass(config.ResourceService.type)
-        config.ResourceService.params.augmentService = augmentService
-        resourceDirectory = resourceClass(config.ResourceService.params)
-        resourceDirectory.realmName = directory.realmName
-        directories.append(resourceDirectory)
+#     if config.ResourceService.Enabled:
+#         resourceClass = namedClass(config.ResourceService.type)
+#         config.ResourceService.params.augmentService = augmentService
+#         resourceDirectory = resourceClass(config.ResourceService.params)
+#         resourceDirectory.realmName = directory.realmName
+#         directories.append(resourceDirectory)
 
-    aggregate = MyDirectoryService(directories, None)
-    aggregate.augmentService = augmentService
+#     aggregate = MyDirectoryService(directories, None)
+#     aggregate.augmentService = augmentService
 
-    #
-    # Wire up the resource hierarchy
-    #
-    principalCollection = aggregate.getPrincipalCollection()
-    root = RootResource(
-        config.DocumentRoot,
-        principalCollections=(principalCollection,),
-    )
-    root.putChild("principals", principalCollection)
+#     #
+#     # Wire up the resource hierarchy
+#     #
+#     principalCollection = aggregate.getPrincipalCollection()
+#     root = RootResource(
+#         config.DocumentRoot,
+#         principalCollections=(principalCollection,),
+#     )
+#     root.putChild("principals", principalCollection)
 
-    # Need a data store
-    _newStore = CommonDataStore(FilePath(config.DocumentRoot), None, aggregate, True, False)
+#     # Need a data store
+#     _newStore = CommonDataStore(FilePath(config.DocumentRoot), None, aggregate, True, False)
 
-    from twistedcaldav.directory.calendar import DirectoryCalendarHomeProvisioningResource
-    calendarCollection = DirectoryCalendarHomeProvisioningResource(
-        aggregate, "/calendars/",
-        _newStore,
-    )
-    root.putChild("calendars", calendarCollection)
+#     from twistedcaldav.directory.calendar import DirectoryCalendarHomeProvisioningResource
+#     calendarCollection = DirectoryCalendarHomeProvisioningResource(
+#         aggregate, "/calendars/",
+#         _newStore,
+#     )
+#     root.putChild("calendars", calendarCollection)
 
-    return aggregate
+#     return aggregate
 
 
 
-class DummyDirectoryService (DirectoryService):
-    realmName = ""
-    baseGUID = "51856FD4-5023-4890-94FE-4356C4AAC3E4"
-    def recordTypes(self):
-        return ()
+# class DummyDirectoryService (DirectoryService):
+#     realmName = ""
+#     baseGUID = "51856FD4-5023-4890-94FE-4356C4AAC3E4"
+#     def recordTypes(self):
+#         return ()
 
 
-    def listRecords(self):
-        return ()
+#     def listRecords(self):
+#         return ()
 
 
-    def recordWithShortName(self):
-        return None
+#     def recordWithShortName(self):
+#         return None
 
-dummyDirectoryRecord = DirectoryRecord(
-    service=DummyDirectoryService(),
-    recordType="dummy",
-    guid="8EF0892F-7CB6-4B8E-B294-7C5A5321136A",
-    shortNames=("dummy",),
-    fullName="Dummy McDummerson",
-    firstName="Dummy",
-    lastName="McDummerson",
-)
+# dummyDirectoryRecord = DirectoryRecord(
+#     service=DummyDirectoryService(),
+#     recordType="dummy",
+#     guid="8EF0892F-7CB6-4B8E-B294-7C5A5321136A",
+#     shortNames=("dummy",),
+#     fullName="Dummy McDummerson",
+#     firstName="Dummy",
+#     lastName="McDummerson",
+# )
 
 class UsageError (StandardError):
     pass
@@ -334,6 +325,7 @@
 
 
 
+ at inlineCallbacks
 def principalForPrincipalID(principalID, checkOnly=False, directory=None):
 
     # Allow a directory parameter to be passed in, but default to config.directory
@@ -351,16 +343,16 @@
             raise ValueError("Can't resolve all paths yet")
 
         if checkOnly:
-            return None
+            returnValue(None)
 
-        return directory.principalCollection.principalForUID(uid)
+        returnValue((yield directory.principalCollection.principalForUID(uid)))
 
     if principalID.startswith("("):
         try:
             i = principalID.index(")")
 
             if checkOnly:
-                return None
+                returnValue(None)
 
             recordType = principalID[1:i]
             shortName = principalID[i + 1:]
@@ -368,34 +360,87 @@
             if not recordType or not shortName or "(" in recordType:
                 raise ValueError()
 
-            return directory.principalCollection.principalForShortName(recordType, shortName)
+            returnValue((yield directory.principalCollection.principalForShortName(recordType, shortName)))
 
         except ValueError:
             pass
 
     if ":" in principalID:
         if checkOnly:
-            return None
+            returnValue(None)
 
         recordType, shortName = principalID.split(":", 1)
 
-        return directory.principalCollection.principalForShortName(recordType, shortName)
+        returnValue((yield directory.principalCollection.principalForShortName(recordType, shortName)))
 
     try:
         UUID(principalID)
 
         if checkOnly:
-            return None
+            returnValue(None)
 
-        x = directory.principalCollection.principalForUID(principalID)
-        return x
+        returnValue((yield directory.principalCollection.principalForUID(principalID)))
     except ValueError:
         pass
 
     raise ValueError("Invalid principal identifier: %s" % (principalID,))
 
 
+ at inlineCallbacks
+def recordForPrincipalID(directory, principalID, checkOnly=False):
 
+    if principalID.startswith("/"):
+        segments = principalID.strip("/").split("/")
+        if (len(segments) == 3 and
+            segments[0] == "principals" and segments[1] == "__uids__"):
+            uid = segments[2]
+        else:
+            raise ValueError("Can't resolve all paths yet")
+
+        if checkOnly:
+            returnValue(None)
+
+        returnValue((yield directory.recordWithUID(uid)))
+
+    if principalID.startswith("("):
+        try:
+            i = principalID.index(")")
+
+            if checkOnly:
+                returnValue(None)
+
+            recordType = directory.oldNameToRecordType(principalID[1:i])
+            shortName = principalID[i + 1:]
+
+            if not recordType or not shortName or "(" in recordType:
+                raise ValueError()
+
+            returnValue((yield directory.recordWithShortName(recordType, shortName)))
+
+        except ValueError:
+            pass
+
+    if ":" in principalID:
+        if checkOnly:
+            returnValue(None)
+
+        recordType, shortName = principalID.split(":", 1)
+        recordType = directory.oldNameToRecordType(recordType)
+
+        returnValue((yield directory.recordWithShortName(recordType, shortName)))
+
+    try:
+        if checkOnly:
+            returnValue(None)
+
+        returnValue((yield directory.recordWithUID(principalID)))
+    except ValueError:
+        pass
+
+    raise ValueError("Invalid principal identifier: %s" % (principalID,))
+
+
+
 def proxySubprincipal(principal, proxyType):
     return principal.getChild("calendar-proxy-" + proxyType)
 
@@ -501,12 +546,19 @@
 
 
 def prettyPrincipal(principal):
-    record = principal.record
-    return "\"%s\" (%s:%s)" % (record.fullName, record.recordType,
-        record.shortNames[0])
+    prettyRecord(principal.record)
 
 
+def prettyRecord(record):
+    return "\"{d}\" {uid} ({rt}) {sn}".format(
+        d=record.displayName,
+        rt=record.recordType.name,
+        uid=record.uid,
+        sn=(", ".join(record.shortNames))
+    )
 
+
+
 class ProxyError(Exception):
     """
     Raised when proxy assignments cannot be performed

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/webadmin/principals.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/webadmin/principals.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/webadmin/principals.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -121,16 +121,17 @@
         self._store = store
 
 
+    @inlineCallbacks
     def getChild(self, name):
         if name == "":
-            return self
+            returnValue(self)
 
-        record = self._directory.recordWithUID(name)
+        record = yield self._directory.recordWithUID(name)
 
         if record:
-            return PrincipalResource(record, self._store)
+            returnValue(PrincipalResource(record, self._store))
         else:
-            return None
+            returnValue(None)
 
 
 

Modified: CalendarServer/branches/users/sagen/move2who-5/calendarserver/webcal/resource.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/calendarserver/webcal/resource.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/calendarserver/webcal/resource.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -48,15 +48,17 @@
 class WebCalendarResource (ReadOnlyResourceMixIn, DAVFile):
 
     def defaultAccessControlList(self):
-        return davxml.ACL(
-            davxml.ACE(
-                davxml.Principal(davxml.Authenticated()),
-                davxml.Grant(
-                    davxml.Privilege(davxml.Read()),
+        return succeed(
+            davxml.ACL(
+                davxml.ACE(
+                    davxml.Principal(davxml.Authenticated()),
+                    davxml.Grant(
+                        davxml.Privilege(davxml.Read()),
+                    ),
+                    davxml.Protected(),
+                    TwistedACLInheritable(),
                 ),
-                davxml.Protected(),
-                TwistedACLInheritable(),
-            ),
+            )
         )
 
 

Modified: CalendarServer/branches/users/sagen/move2who-5/conf/auth/accounts-test.xml
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/conf/auth/accounts-test.xml	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/conf/auth/accounts-test.xml	2014-04-04 17:20:27 UTC (rev 13158)
@@ -16,174 +16,1716 @@
 limitations under the License.
  -->
 
-<!DOCTYPE accounts SYSTEM "accounts.dtd">
-
-<accounts realm="Test Realm">
-  <user>
-    <uid>admin</uid>
-    <guid>admin</guid>
+<directory realm="Test Realm">
+<record>
+    <uid>0C8BDE62-E600-4696-83D3-8B5ECABDFD2E</uid>
+    <guid>0C8BDE62-E600-4696-83D3-8B5ECABDFD2E</guid>
+    <short-name>admin</short-name>
     <password>admin</password>
-    <name>Super User</name>
-    <first-name>Super</first-name>
-    <last-name>User</last-name>
-  </user>
-  <user>
-    <uid>apprentice</uid>
-    <guid>apprentice</guid>
+    <full-name>Super User</full-name>
+    <email>admin at example.com</email>
+</record>
+<record>
+    <uid>29B6C503-11DF-43EC-8CCA-40C7003149CE</uid>
+    <guid>29B6C503-11DF-43EC-8CCA-40C7003149CE</guid>
+    <short-name>apprentice</short-name>
     <password>apprentice</password>
-    <name>Apprentice Super User</name>
-    <first-name>Apprentice</first-name>
-    <last-name>Super User</last-name>
-  </user>
-  <user>
-    <uid>wsanchez</uid>
-    <guid>wsanchez</guid>
-    <email-address>wsanchez at example.com</email-address>
-    <password>test</password>
-    <name>Wilfredo Sanchez Vega</name>
-    <first-name>Wilfredo</first-name>
-    <last-name>Sanchez Vega</last-name>
-  </user>
-  <user>
-    <uid>cdaboo</uid>
-    <guid>cdaboo</guid>
-    <email-address>cdaboo at example.com</email-address>
-    <password>test</password>
-    <name>Cyrus Daboo</name>
-    <first-name>Cyrus</first-name>
-    <last-name>Daboo</last-name>
-  </user>
-  <user>
-    <uid>sagen</uid>
-    <guid>sagen</guid>
-    <email-address>sagen at example.com</email-address>
-    <password>test</password>
-    <name>Morgen Sagen</name>
-    <first-name>Morgen</first-name>
-    <last-name>Sagen</last-name>
-  </user>
-  <user>
-    <uid>dre</uid>
-    <guid>andre</guid>
-    <email-address>dre at example.com</email-address>
-    <password>test</password>
-    <name>Andre LaBranche</name>
-    <first-name>Andre</first-name>
-    <last-name>LaBranche</last-name>
-  </user>
-  <user>
-    <uid>glyph</uid>
-    <guid>glyph</guid>
-    <email-address>glyph at example.com</email-address>
-    <password>test</password>
-    <name>Glyph Lefkowitz</name>
-    <first-name>Glyph</first-name>
-    <last-name>Lefkowitz</last-name>
-  </user>
-  <user>
-    <uid>i18nuser</uid>
-    <guid>i18nuser</guid>
-    <email-address>i18nuser at example.com</email-address>
+    <full-name>Apprentice Super User</full-name>
+    <email>apprentice at example.com</email>
+</record>
+<record>
+    <uid>860B3EE9-6D7C-4296-9639-E6B998074A78</uid>
+    <guid>860B3EE9-6D7C-4296-9639-E6B998074A78</guid>
+    <short-name>i18nuser</short-name>
     <password>i18nuser</password>
-    <name>まだ</name>
-    <first-name>ま</first-name>
-    <last-name>だ</last-name>
-  </user>
-  <user repeat="101">
-    <uid>user%02d</uid>
-    <uid>User %02d</uid>
-    <guid>user%02d</guid>
-    <password>user%02d</password>
-    <name>User %02d</name>
-    <first-name>User</first-name>
-    <last-name>%02d</last-name>
-    <email-address>user%02d at example.com</email-address>
-  </user>
-  <user repeat="10">
-    <uid>public%02d</uid>
-    <guid>public%02d</guid>
-    <password>public%02d</password>
-    <name>Public %02d</name>
-    <first-name>Public</first-name>
-    <last-name>%02d</last-name>
-  </user>
-  <group>
-    <uid>group01</uid>
-    <guid>group01</guid>
-    <password>group01</password>
-    <name>Group 01</name>
-    <members>
-      <member type="users">user01</member>
-    </members>
-  </group>
-  <group>
-    <uid>group02</uid>
-    <guid>group02</guid>
-    <password>group02</password>
-    <name>Group 02</name>
-    <members>
-      <member type="users">user06</member>
-      <member type="users">user07</member>
-    </members>
-  </group>
-  <group>
-    <uid>group03</uid>
-    <guid>group03</guid>
-    <password>group03</password>
-    <name>Group 03</name>
-    <members>
-      <member type="users">user08</member>
-      <member type="users">user09</member>
-    </members>
-  </group>
-  <group>
-    <uid>group04</uid>
-    <guid>group04</guid>
-    <password>group04</password>
-    <name>Group 04</name>
-    <members>
-      <member type="groups">group02</member>
-      <member type="groups">group03</member>
-      <member type="users">user10</member>
-    </members>
-  </group>
-  <group> <!-- delegategroup -->
-    <uid>group05</uid>
-    <guid>group05</guid>
-    <password>group05</password>
-    <name>Group 05</name>
-    <members>
-      <member type="groups">group06</member>
-      <member type="users">user20</member>
-    </members>
-  </group>
-  <group> <!-- delegatesubgroup -->
-    <uid>group06</uid>
-    <guid>group06</guid>
-    <password>group06</password>
-    <name>Group 06</name>
-    <members>
-      <member type="users">user21</member>
-    </members>
-  </group>
-  <group> <!-- readonlydelegategroup -->
-    <uid>group07</uid>
-    <guid>group07</guid>
-    <password>group07</password>
-    <name>Group 07</name>
-    <members>
-      <member type="users">user22</member>
-      <member type="users">user23</member>
-      <member type="users">user24</member>
-    </members>
-  </group>
-  <group>
-    <uid>disabledgroup</uid>
-    <guid>disabledgroup</guid>
-    <password>disabledgroup</password>
-    <name>Disabled Group</name>
-    <members>
-      <member type="users">user01</member>
-    </members>
-  </group>
-</accounts>
+    <full-name>まだ</full-name>
+    <email>i18nuser at example.com</email>
+</record>
+<record type="user">
+    <short-name>user01</short-name>
+    <uid>10000000-0000-0000-0000-000000000001</uid>
+    <guid>10000000-0000-0000-0000-000000000001</guid>
+    <password>user01</password>
+    <full-name>User 01</full-name>
+    <email>user01 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user02</short-name>
+    <uid>10000000-0000-0000-0000-000000000002</uid>
+    <guid>10000000-0000-0000-0000-000000000002</guid>
+    <password>user02</password>
+    <full-name>User 02</full-name>
+    <email>user02 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user03</short-name>
+    <uid>10000000-0000-0000-0000-000000000003</uid>
+    <guid>10000000-0000-0000-0000-000000000003</guid>
+    <password>user03</password>
+    <full-name>User 03</full-name>
+    <email>user03 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user04</short-name>
+    <uid>10000000-0000-0000-0000-000000000004</uid>
+    <guid>10000000-0000-0000-0000-000000000004</guid>
+    <password>user04</password>
+    <full-name>User 04</full-name>
+    <email>user04 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user05</short-name>
+    <uid>10000000-0000-0000-0000-000000000005</uid>
+    <guid>10000000-0000-0000-0000-000000000005</guid>
+    <password>user05</password>
+    <full-name>User 05</full-name>
+    <email>user05 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user06</short-name>
+    <uid>10000000-0000-0000-0000-000000000006</uid>
+    <guid>10000000-0000-0000-0000-000000000006</guid>
+    <password>user06</password>
+    <full-name>User 06</full-name>
+    <email>user06 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user07</short-name>
+    <uid>10000000-0000-0000-0000-000000000007</uid>
+    <guid>10000000-0000-0000-0000-000000000007</guid>
+    <password>user07</password>
+    <full-name>User 07</full-name>
+    <email>user07 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user08</short-name>
+    <uid>10000000-0000-0000-0000-000000000008</uid>
+    <guid>10000000-0000-0000-0000-000000000008</guid>
+    <password>user08</password>
+    <full-name>User 08</full-name>
+    <email>user08 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user09</short-name>
+    <uid>10000000-0000-0000-0000-000000000009</uid>
+    <guid>10000000-0000-0000-0000-000000000009</guid>
+    <password>user09</password>
+    <full-name>User 09</full-name>
+    <email>user09 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user10</short-name>
+    <uid>10000000-0000-0000-0000-000000000010</uid>
+    <guid>10000000-0000-0000-0000-000000000010</guid>
+    <password>user10</password>
+    <full-name>User 10</full-name>
+    <email>user10 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user11</short-name>
+    <uid>10000000-0000-0000-0000-000000000011</uid>
+    <guid>10000000-0000-0000-0000-000000000011</guid>
+    <password>user11</password>
+    <full-name>User 11</full-name>
+    <email>user11 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user12</short-name>
+    <uid>10000000-0000-0000-0000-000000000012</uid>
+    <guid>10000000-0000-0000-0000-000000000012</guid>
+    <password>user12</password>
+    <full-name>User 12</full-name>
+    <email>user12 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user13</short-name>
+    <uid>10000000-0000-0000-0000-000000000013</uid>
+    <guid>10000000-0000-0000-0000-000000000013</guid>
+    <password>user13</password>
+    <full-name>User 13</full-name>
+    <email>user13 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user14</short-name>
+    <uid>10000000-0000-0000-0000-000000000014</uid>
+    <guid>10000000-0000-0000-0000-000000000014</guid>
+    <password>user14</password>
+    <full-name>User 14</full-name>
+    <email>user14 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user15</short-name>
+    <uid>10000000-0000-0000-0000-000000000015</uid>
+    <guid>10000000-0000-0000-0000-000000000015</guid>
+    <password>user15</password>
+    <full-name>User 15</full-name>
+    <email>user15 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user16</short-name>
+    <uid>10000000-0000-0000-0000-000000000016</uid>
+    <guid>10000000-0000-0000-0000-000000000016</guid>
+    <password>user16</password>
+    <full-name>User 16</full-name>
+    <email>user16 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user17</short-name>
+    <uid>10000000-0000-0000-0000-000000000017</uid>
+    <guid>10000000-0000-0000-0000-000000000017</guid>
+    <password>user17</password>
+    <full-name>User 17</full-name>
+    <email>user17 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user18</short-name>
+    <uid>10000000-0000-0000-0000-000000000018</uid>
+    <guid>10000000-0000-0000-0000-000000000018</guid>
+    <password>user18</password>
+    <full-name>User 18</full-name>
+    <email>user18 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user19</short-name>
+    <uid>10000000-0000-0000-0000-000000000019</uid>
+    <guid>10000000-0000-0000-0000-000000000019</guid>
+    <password>user19</password>
+    <full-name>User 19</full-name>
+    <email>user19 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user20</short-name>
+    <uid>10000000-0000-0000-0000-000000000020</uid>
+    <guid>10000000-0000-0000-0000-000000000020</guid>
+    <password>user20</password>
+    <full-name>User 20</full-name>
+    <email>user20 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user21</short-name>
+    <uid>10000000-0000-0000-0000-000000000021</uid>
+    <guid>10000000-0000-0000-0000-000000000021</guid>
+    <password>user21</password>
+    <full-name>User 21</full-name>
+    <email>user21 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user22</short-name>
+    <uid>10000000-0000-0000-0000-000000000022</uid>
+    <guid>10000000-0000-0000-0000-000000000022</guid>
+    <password>user22</password>
+    <full-name>User 22</full-name>
+    <email>user22 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user23</short-name>
+    <uid>10000000-0000-0000-0000-000000000023</uid>
+    <guid>10000000-0000-0000-0000-000000000023</guid>
+    <password>user23</password>
+    <full-name>User 23</full-name>
+    <email>user23 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user24</short-name>
+    <uid>10000000-0000-0000-0000-000000000024</uid>
+    <guid>10000000-0000-0000-0000-000000000024</guid>
+    <password>user24</password>
+    <full-name>User 24</full-name>
+    <email>user24 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user25</short-name>
+    <uid>10000000-0000-0000-0000-000000000025</uid>
+    <guid>10000000-0000-0000-0000-000000000025</guid>
+    <password>user25</password>
+    <full-name>User 25</full-name>
+    <email>user25 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user26</short-name>
+    <uid>10000000-0000-0000-0000-000000000026</uid>
+    <guid>10000000-0000-0000-0000-000000000026</guid>
+    <password>user26</password>
+    <full-name>User 26</full-name>
+    <email>user26 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user27</short-name>
+    <uid>10000000-0000-0000-0000-000000000027</uid>
+    <guid>10000000-0000-0000-0000-000000000027</guid>
+    <password>user27</password>
+    <full-name>User 27</full-name>
+    <email>user27 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user28</short-name>
+    <uid>10000000-0000-0000-0000-000000000028</uid>
+    <guid>10000000-0000-0000-0000-000000000028</guid>
+    <password>user28</password>
+    <full-name>User 28</full-name>
+    <email>user28 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user29</short-name>
+    <uid>10000000-0000-0000-0000-000000000029</uid>
+    <guid>10000000-0000-0000-0000-000000000029</guid>
+    <password>user29</password>
+    <full-name>User 29</full-name>
+    <email>user29 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user30</short-name>
+    <uid>10000000-0000-0000-0000-000000000030</uid>
+    <guid>10000000-0000-0000-0000-000000000030</guid>
+    <password>user30</password>
+    <full-name>User 30</full-name>
+    <email>user30 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user31</short-name>
+    <uid>10000000-0000-0000-0000-000000000031</uid>
+    <guid>10000000-0000-0000-0000-000000000031</guid>
+    <password>user31</password>
+    <full-name>User 31</full-name>
+    <email>user31 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user32</short-name>
+    <uid>10000000-0000-0000-0000-000000000032</uid>
+    <guid>10000000-0000-0000-0000-000000000032</guid>
+    <password>user32</password>
+    <full-name>User 32</full-name>
+    <email>user32 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user33</short-name>
+    <uid>10000000-0000-0000-0000-000000000033</uid>
+    <guid>10000000-0000-0000-0000-000000000033</guid>
+    <password>user33</password>
+    <full-name>User 33</full-name>
+    <email>user33 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user34</short-name>
+    <uid>10000000-0000-0000-0000-000000000034</uid>
+    <guid>10000000-0000-0000-0000-000000000034</guid>
+    <password>user34</password>
+    <full-name>User 34</full-name>
+    <email>user34 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user35</short-name>
+    <uid>10000000-0000-0000-0000-000000000035</uid>
+    <guid>10000000-0000-0000-0000-000000000035</guid>
+    <password>user35</password>
+    <full-name>User 35</full-name>
+    <email>user35 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user36</short-name>
+    <uid>10000000-0000-0000-0000-000000000036</uid>
+    <guid>10000000-0000-0000-0000-000000000036</guid>
+    <password>user36</password>
+    <full-name>User 36</full-name>
+    <email>user36 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user37</short-name>
+    <uid>10000000-0000-0000-0000-000000000037</uid>
+    <guid>10000000-0000-0000-0000-000000000037</guid>
+    <password>user37</password>
+    <full-name>User 37</full-name>
+    <email>user37 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user38</short-name>
+    <uid>10000000-0000-0000-0000-000000000038</uid>
+    <guid>10000000-0000-0000-0000-000000000038</guid>
+    <password>user38</password>
+    <full-name>User 38</full-name>
+    <email>user38 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user39</short-name>
+    <uid>10000000-0000-0000-0000-000000000039</uid>
+    <guid>10000000-0000-0000-0000-000000000039</guid>
+    <password>user39</password>
+    <full-name>User 39</full-name>
+    <email>user39 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user40</short-name>
+    <uid>10000000-0000-0000-0000-000000000040</uid>
+    <guid>10000000-0000-0000-0000-000000000040</guid>
+    <password>user40</password>
+    <full-name>User 40</full-name>
+    <email>user40 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user41</short-name>
+    <uid>10000000-0000-0000-0000-000000000041</uid>
+    <guid>10000000-0000-0000-0000-000000000041</guid>
+    <password>user41</password>
+    <full-name>User 41</full-name>
+    <email>user41 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user42</short-name>
+    <uid>10000000-0000-0000-0000-000000000042</uid>
+    <guid>10000000-0000-0000-0000-000000000042</guid>
+    <password>user42</password>
+    <full-name>User 42</full-name>
+    <email>user42 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user43</short-name>
+    <uid>10000000-0000-0000-0000-000000000043</uid>
+    <guid>10000000-0000-0000-0000-000000000043</guid>
+    <password>user43</password>
+    <full-name>User 43</full-name>
+    <email>user43 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user44</short-name>
+    <uid>10000000-0000-0000-0000-000000000044</uid>
+    <guid>10000000-0000-0000-0000-000000000044</guid>
+    <password>user44</password>
+    <full-name>User 44</full-name>
+    <email>user44 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user45</short-name>
+    <uid>10000000-0000-0000-0000-000000000045</uid>
+    <guid>10000000-0000-0000-0000-000000000045</guid>
+    <password>user45</password>
+    <full-name>User 45</full-name>
+    <email>user45 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user46</short-name>
+    <uid>10000000-0000-0000-0000-000000000046</uid>
+    <guid>10000000-0000-0000-0000-000000000046</guid>
+    <password>user46</password>
+    <full-name>User 46</full-name>
+    <email>user46 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user47</short-name>
+    <uid>10000000-0000-0000-0000-000000000047</uid>
+    <guid>10000000-0000-0000-0000-000000000047</guid>
+    <password>user47</password>
+    <full-name>User 47</full-name>
+    <email>user47 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user48</short-name>
+    <uid>10000000-0000-0000-0000-000000000048</uid>
+    <guid>10000000-0000-0000-0000-000000000048</guid>
+    <password>user48</password>
+    <full-name>User 48</full-name>
+    <email>user48 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user49</short-name>
+    <uid>10000000-0000-0000-0000-000000000049</uid>
+    <guid>10000000-0000-0000-0000-000000000049</guid>
+    <password>user49</password>
+    <full-name>User 49</full-name>
+    <email>user49 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user50</short-name>
+    <uid>10000000-0000-0000-0000-000000000050</uid>
+    <guid>10000000-0000-0000-0000-000000000050</guid>
+    <password>user50</password>
+    <full-name>User 50</full-name>
+    <email>user50 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user51</short-name>
+    <uid>10000000-0000-0000-0000-000000000051</uid>
+    <guid>10000000-0000-0000-0000-000000000051</guid>
+    <password>user51</password>
+    <full-name>User 51</full-name>
+    <email>user51 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user52</short-name>
+    <uid>10000000-0000-0000-0000-000000000052</uid>
+    <guid>10000000-0000-0000-0000-000000000052</guid>
+    <password>user52</password>
+    <full-name>User 52</full-name>
+    <email>user52 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user53</short-name>
+    <uid>10000000-0000-0000-0000-000000000053</uid>
+    <guid>10000000-0000-0000-0000-000000000053</guid>
+    <password>user53</password>
+    <full-name>User 53</full-name>
+    <email>user53 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user54</short-name>
+    <uid>10000000-0000-0000-0000-000000000054</uid>
+    <guid>10000000-0000-0000-0000-000000000054</guid>
+    <password>user54</password>
+    <full-name>User 54</full-name>
+    <email>user54 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user55</short-name>
+    <uid>10000000-0000-0000-0000-000000000055</uid>
+    <guid>10000000-0000-0000-0000-000000000055</guid>
+    <password>user55</password>
+    <full-name>User 55</full-name>
+    <email>user55 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user56</short-name>
+    <uid>10000000-0000-0000-0000-000000000056</uid>
+    <guid>10000000-0000-0000-0000-000000000056</guid>
+    <password>user56</password>
+    <full-name>User 56</full-name>
+    <email>user56 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user57</short-name>
+    <uid>10000000-0000-0000-0000-000000000057</uid>
+    <guid>10000000-0000-0000-0000-000000000057</guid>
+    <password>user57</password>
+    <full-name>User 57</full-name>
+    <email>user57 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user58</short-name>
+    <uid>10000000-0000-0000-0000-000000000058</uid>
+    <guid>10000000-0000-0000-0000-000000000058</guid>
+    <password>user58</password>
+    <full-name>User 58</full-name>
+    <email>user58 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user59</short-name>
+    <uid>10000000-0000-0000-0000-000000000059</uid>
+    <guid>10000000-0000-0000-0000-000000000059</guid>
+    <password>user59</password>
+    <full-name>User 59</full-name>
+    <email>user59 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user60</short-name>
+    <uid>10000000-0000-0000-0000-000000000060</uid>
+    <guid>10000000-0000-0000-0000-000000000060</guid>
+    <password>user60</password>
+    <full-name>User 60</full-name>
+    <email>user60 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user61</short-name>
+    <uid>10000000-0000-0000-0000-000000000061</uid>
+    <guid>10000000-0000-0000-0000-000000000061</guid>
+    <password>user61</password>
+    <full-name>User 61</full-name>
+    <email>user61 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user62</short-name>
+    <uid>10000000-0000-0000-0000-000000000062</uid>
+    <guid>10000000-0000-0000-0000-000000000062</guid>
+    <password>user62</password>
+    <full-name>User 62</full-name>
+    <email>user62 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user63</short-name>
+    <uid>10000000-0000-0000-0000-000000000063</uid>
+    <guid>10000000-0000-0000-0000-000000000063</guid>
+    <password>user63</password>
+    <full-name>User 63</full-name>
+    <email>user63 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user64</short-name>
+    <uid>10000000-0000-0000-0000-000000000064</uid>
+    <guid>10000000-0000-0000-0000-000000000064</guid>
+    <password>user64</password>
+    <full-name>User 64</full-name>
+    <email>user64 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user65</short-name>
+    <uid>10000000-0000-0000-0000-000000000065</uid>
+    <guid>10000000-0000-0000-0000-000000000065</guid>
+    <password>user65</password>
+    <full-name>User 65</full-name>
+    <email>user65 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user66</short-name>
+    <uid>10000000-0000-0000-0000-000000000066</uid>
+    <guid>10000000-0000-0000-0000-000000000066</guid>
+    <password>user66</password>
+    <full-name>User 66</full-name>
+    <email>user66 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user67</short-name>
+    <uid>10000000-0000-0000-0000-000000000067</uid>
+    <guid>10000000-0000-0000-0000-000000000067</guid>
+    <password>user67</password>
+    <full-name>User 67</full-name>
+    <email>user67 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user68</short-name>
+    <uid>10000000-0000-0000-0000-000000000068</uid>
+    <guid>10000000-0000-0000-0000-000000000068</guid>
+    <password>user68</password>
+    <full-name>User 68</full-name>
+    <email>user68 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user69</short-name>
+    <uid>10000000-0000-0000-0000-000000000069</uid>
+    <guid>10000000-0000-0000-0000-000000000069</guid>
+    <password>user69</password>
+    <full-name>User 69</full-name>
+    <email>user69 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user70</short-name>
+    <uid>10000000-0000-0000-0000-000000000070</uid>
+    <guid>10000000-0000-0000-0000-000000000070</guid>
+    <password>user70</password>
+    <full-name>User 70</full-name>
+    <email>user70 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user71</short-name>
+    <uid>10000000-0000-0000-0000-000000000071</uid>
+    <guid>10000000-0000-0000-0000-000000000071</guid>
+    <password>user71</password>
+    <full-name>User 71</full-name>
+    <email>user71 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user72</short-name>
+    <uid>10000000-0000-0000-0000-000000000072</uid>
+    <guid>10000000-0000-0000-0000-000000000072</guid>
+    <password>user72</password>
+    <full-name>User 72</full-name>
+    <email>user72 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user73</short-name>
+    <uid>10000000-0000-0000-0000-000000000073</uid>
+    <guid>10000000-0000-0000-0000-000000000073</guid>
+    <password>user73</password>
+    <full-name>User 73</full-name>
+    <email>user73 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user74</short-name>
+    <uid>10000000-0000-0000-0000-000000000074</uid>
+    <guid>10000000-0000-0000-0000-000000000074</guid>
+    <password>user74</password>
+    <full-name>User 74</full-name>
+    <email>user74 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user75</short-name>
+    <uid>10000000-0000-0000-0000-000000000075</uid>
+    <guid>10000000-0000-0000-0000-000000000075</guid>
+    <password>user75</password>
+    <full-name>User 75</full-name>
+    <email>user75 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user76</short-name>
+    <uid>10000000-0000-0000-0000-000000000076</uid>
+    <guid>10000000-0000-0000-0000-000000000076</guid>
+    <password>user76</password>
+    <full-name>User 76</full-name>
+    <email>user76 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user77</short-name>
+    <uid>10000000-0000-0000-0000-000000000077</uid>
+    <guid>10000000-0000-0000-0000-000000000077</guid>
+    <password>user77</password>
+    <full-name>User 77</full-name>
+    <email>user77 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user78</short-name>
+    <uid>10000000-0000-0000-0000-000000000078</uid>
+    <guid>10000000-0000-0000-0000-000000000078</guid>
+    <password>user78</password>
+    <full-name>User 78</full-name>
+    <email>user78 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user79</short-name>
+    <uid>10000000-0000-0000-0000-000000000079</uid>
+    <guid>10000000-0000-0000-0000-000000000079</guid>
+    <password>user79</password>
+    <full-name>User 79</full-name>
+    <email>user79 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user80</short-name>
+    <uid>10000000-0000-0000-0000-000000000080</uid>
+    <guid>10000000-0000-0000-0000-000000000080</guid>
+    <password>user80</password>
+    <full-name>User 80</full-name>
+    <email>user80 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user81</short-name>
+    <uid>10000000-0000-0000-0000-000000000081</uid>
+    <guid>10000000-0000-0000-0000-000000000081</guid>
+    <password>user81</password>
+    <full-name>User 81</full-name>
+    <email>user81 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user82</short-name>
+    <uid>10000000-0000-0000-0000-000000000082</uid>
+    <guid>10000000-0000-0000-0000-000000000082</guid>
+    <password>user82</password>
+    <full-name>User 82</full-name>
+    <email>user82 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user83</short-name>
+    <uid>10000000-0000-0000-0000-000000000083</uid>
+    <guid>10000000-0000-0000-0000-000000000083</guid>
+    <password>user83</password>
+    <full-name>User 83</full-name>
+    <email>user83 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user84</short-name>
+    <uid>10000000-0000-0000-0000-000000000084</uid>
+    <guid>10000000-0000-0000-0000-000000000084</guid>
+    <password>user84</password>
+    <full-name>User 84</full-name>
+    <email>user84 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user85</short-name>
+    <uid>10000000-0000-0000-0000-000000000085</uid>
+    <guid>10000000-0000-0000-0000-000000000085</guid>
+    <password>user85</password>
+    <full-name>User 85</full-name>
+    <email>user85 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user86</short-name>
+    <uid>10000000-0000-0000-0000-000000000086</uid>
+    <guid>10000000-0000-0000-0000-000000000086</guid>
+    <password>user86</password>
+    <full-name>User 86</full-name>
+    <email>user86 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user87</short-name>
+    <uid>10000000-0000-0000-0000-000000000087</uid>
+    <guid>10000000-0000-0000-0000-000000000087</guid>
+    <password>user87</password>
+    <full-name>User 87</full-name>
+    <email>user87 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user88</short-name>
+    <uid>10000000-0000-0000-0000-000000000088</uid>
+    <guid>10000000-0000-0000-0000-000000000088</guid>
+    <password>user88</password>
+    <full-name>User 88</full-name>
+    <email>user88 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user89</short-name>
+    <uid>10000000-0000-0000-0000-000000000089</uid>
+    <guid>10000000-0000-0000-0000-000000000089</guid>
+    <password>user89</password>
+    <full-name>User 89</full-name>
+    <email>user89 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user90</short-name>
+    <uid>10000000-0000-0000-0000-000000000090</uid>
+    <guid>10000000-0000-0000-0000-000000000090</guid>
+    <password>user90</password>
+    <full-name>User 90</full-name>
+    <email>user90 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user91</short-name>
+    <uid>10000000-0000-0000-0000-000000000091</uid>
+    <guid>10000000-0000-0000-0000-000000000091</guid>
+    <password>user91</password>
+    <full-name>User 91</full-name>
+    <email>user91 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user92</short-name>
+    <uid>10000000-0000-0000-0000-000000000092</uid>
+    <guid>10000000-0000-0000-0000-000000000092</guid>
+    <password>user92</password>
+    <full-name>User 92</full-name>
+    <email>user92 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user93</short-name>
+    <uid>10000000-0000-0000-0000-000000000093</uid>
+    <guid>10000000-0000-0000-0000-000000000093</guid>
+    <password>user93</password>
+    <full-name>User 93</full-name>
+    <email>user93 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user94</short-name>
+    <uid>10000000-0000-0000-0000-000000000094</uid>
+    <guid>10000000-0000-0000-0000-000000000094</guid>
+    <password>user94</password>
+    <full-name>User 94</full-name>
+    <email>user94 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user95</short-name>
+    <uid>10000000-0000-0000-0000-000000000095</uid>
+    <guid>10000000-0000-0000-0000-000000000095</guid>
+    <password>user95</password>
+    <full-name>User 95</full-name>
+    <email>user95 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user96</short-name>
+    <uid>10000000-0000-0000-0000-000000000096</uid>
+    <guid>10000000-0000-0000-0000-000000000096</guid>
+    <password>user96</password>
+    <full-name>User 96</full-name>
+    <email>user96 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user97</short-name>
+    <uid>10000000-0000-0000-0000-000000000097</uid>
+    <guid>10000000-0000-0000-0000-000000000097</guid>
+    <password>user97</password>
+    <full-name>User 97</full-name>
+    <email>user97 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user98</short-name>
+    <uid>10000000-0000-0000-0000-000000000098</uid>
+    <guid>10000000-0000-0000-0000-000000000098</guid>
+    <password>user98</password>
+    <full-name>User 98</full-name>
+    <email>user98 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user99</short-name>
+    <uid>10000000-0000-0000-0000-000000000099</uid>
+    <guid>10000000-0000-0000-0000-000000000099</guid>
+    <password>user99</password>
+    <full-name>User 99</full-name>
+    <email>user99 at example.com</email>
+</record>
+<record type="user">
+    <short-name>user100</short-name>
+    <uid>10000000-0000-0000-0000-000000000100</uid>
+    <guid>10000000-0000-0000-0000-000000000100</guid>
+    <password>user100</password>
+    <full-name>User 100</full-name>
+    <email>user100 at example.com</email>
+</record>
+<record type="user">
+    <short-name>public01</short-name>
+    <uid>50000000-0000-0000-0000-000000000001</uid>
+    <guid>50000000-0000-0000-0000-000000000001</guid>
+    <password>public01</password>
+    <full-name>Public 01</full-name>
+    <email>public01 at example.com</email>
+</record>
+<record type="user">
+    <short-name>public02</short-name>
+    <uid>50000000-0000-0000-0000-000000000002</uid>
+    <guid>50000000-0000-0000-0000-000000000002</guid>
+    <password>public02</password>
+    <full-name>Public 02</full-name>
+    <email>public02 at example.com</email>
+</record>
+<record type="user">
+    <short-name>public03</short-name>
+    <uid>50000000-0000-0000-0000-000000000003</uid>
+    <guid>50000000-0000-0000-0000-000000000003</guid>
+    <password>public03</password>
+    <full-name>Public 03</full-name>
+    <email>public03 at example.com</email>
+</record>
+<record type="user">
+    <short-name>public04</short-name>
+    <uid>50000000-0000-0000-0000-000000000004</uid>
+    <guid>50000000-0000-0000-0000-000000000004</guid>
+    <password>public04</password>
+    <full-name>Public 04</full-name>
+    <email>public04 at example.com</email>
+</record>
+<record type="user">
+    <short-name>public05</short-name>
+    <uid>50000000-0000-0000-0000-000000000005</uid>
+    <guid>50000000-0000-0000-0000-000000000005</guid>
+    <password>public05</password>
+    <full-name>Public 05</full-name>
+    <email>public05 at example.com</email>
+</record>
+<record type="user">
+    <short-name>public06</short-name>
+    <uid>50000000-0000-0000-0000-000000000006</uid>
+    <guid>50000000-0000-0000-0000-000000000006</guid>
+    <password>public06</password>
+    <full-name>Public 06</full-name>
+    <email>public06 at example.com</email>
+</record>
+<record type="user">
+    <short-name>public07</short-name>
+    <uid>50000000-0000-0000-0000-000000000007</uid>
+    <guid>50000000-0000-0000-0000-000000000007</guid>
+    <password>public07</password>
+    <full-name>Public 07</full-name>
+    <email>public07 at example.com</email>
+</record>
+<record type="user">
+    <short-name>public08</short-name>
+    <uid>50000000-0000-0000-0000-000000000008</uid>
+    <guid>50000000-0000-0000-0000-000000000008</guid>
+    <password>public08</password>
+    <full-name>Public 08</full-name>
+    <email>public08 at example.com</email>
+</record>
+<record type="user">
+    <short-name>public09</short-name>
+    <uid>50000000-0000-0000-0000-000000000009</uid>
+    <guid>50000000-0000-0000-0000-000000000009</guid>
+    <password>public09</password>
+    <full-name>Public 09</full-name>
+    <email>public09 at example.com</email>
+</record>
+<record type="user">
+    <short-name>public10</short-name>
+    <uid>50000000-0000-0000-0000-000000000010</uid>
+    <guid>50000000-0000-0000-0000-000000000010</guid>
+    <password>public10</password>
+    <full-name>Public 10</full-name>
+    <email>public10 at example.com</email>
+</record>
+<record type="group">
+    <short-name>group01</short-name>
+    <uid>20000000-0000-0000-0000-000000000001</uid>
+    <guid>20000000-0000-0000-0000-000000000001</guid>
+    <full-name>Group 01</full-name>
+    <email>group01 at example.com</email>
+    <member-uid>10000000-0000-0000-0000-000000000001</member-uid>
+</record>
+<record type="group">
+    <short-name>group02</short-name>
+    <uid>20000000-0000-0000-0000-000000000002</uid>
+    <guid>20000000-0000-0000-0000-000000000002</guid>
+    <full-name>Group 02</full-name>
+    <email>group02 at example.com</email>
+    <member-uid>10000000-0000-0000-0000-000000000006</member-uid>
+    <member-uid>10000000-0000-0000-0000-000000000007</member-uid>
+</record>
+<record type="group">
+    <short-name>group03</short-name>
+    <uid>20000000-0000-0000-0000-000000000003</uid>
+    <guid>20000000-0000-0000-0000-000000000003</guid>
+    <full-name>Group 03</full-name>
+    <email>group03 at example.com</email>
+    <member-uid>10000000-0000-0000-0000-000000000008</member-uid>
+    <member-uid>10000000-0000-0000-0000-000000000009</member-uid>
+</record>
+<record type="group">
+    <short-name>group04</short-name>
+    <uid>20000000-0000-0000-0000-000000000004</uid>
+    <guid>20000000-0000-0000-0000-000000000004</guid>
+    <full-name>Group 04</full-name>
+    <email>group04 at example.com</email>
+    <member-uid>20000000-0000-0000-0000-000000000002</member-uid>
+    <member-uid>20000000-0000-0000-0000-000000000003</member-uid>
+    <member-uid>10000000-0000-0000-0000-000000000010</member-uid>
+</record>
+<record type="group">
+    <short-name>group05</short-name>
+    <uid>20000000-0000-0000-0000-000000000005</uid>
+    <guid>20000000-0000-0000-0000-000000000005</guid>
+    <full-name>Group 05</full-name>
+    <email>group05 at example.com</email>
+    <member-uid>20000000-0000-0000-0000-000000000006</member-uid>
+    <member-uid>10000000-0000-0000-0000-000000000020</member-uid>
+</record>
+<record type="group">
+    <short-name>group06</short-name>
+    <uid>20000000-0000-0000-0000-000000000006</uid>
+    <guid>20000000-0000-0000-0000-000000000006</guid>
+    <full-name>Group 06</full-name>
+    <email>group06 at example.com</email>
+    <member-uid>10000000-0000-0000-0000-000000000021</member-uid>
+</record>
+<record type="group">
+    <short-name>group07</short-name>
+    <uid>20000000-0000-0000-0000-000000000007</uid>
+    <guid>20000000-0000-0000-0000-000000000007</guid>
+    <full-name>Group 07</full-name>
+    <email>group07 at example.com</email>
+    <member-uid>10000000-0000-0000-0000-000000000022</member-uid>
+    <member-uid>10000000-0000-0000-0000-000000000023</member-uid>
+    <member-uid>10000000-0000-0000-0000-000000000024</member-uid>
+</record>
+<record type="group">
+    <short-name>group08</short-name>
+    <uid>20000000-0000-0000-0000-000000000008</uid>
+    <guid>20000000-0000-0000-0000-000000000008</guid>
+    <full-name>Group 08</full-name>
+    <email>group08 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group09</short-name>
+    <uid>20000000-0000-0000-0000-000000000009</uid>
+    <guid>20000000-0000-0000-0000-000000000009</guid>
+    <full-name>Group 09</full-name>
+    <email>group09 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group10</short-name>
+    <uid>20000000-0000-0000-0000-000000000010</uid>
+    <guid>20000000-0000-0000-0000-000000000010</guid>
+    <full-name>Group 10</full-name>
+    <email>group10 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group11</short-name>
+    <uid>20000000-0000-0000-0000-000000000011</uid>
+    <guid>20000000-0000-0000-0000-000000000011</guid>
+    <full-name>Group 11</full-name>
+    <email>group11 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group12</short-name>
+    <uid>20000000-0000-0000-0000-000000000012</uid>
+    <guid>20000000-0000-0000-0000-000000000012</guid>
+    <full-name>Group 12</full-name>
+    <email>group12 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group13</short-name>
+    <uid>20000000-0000-0000-0000-000000000013</uid>
+    <guid>20000000-0000-0000-0000-000000000013</guid>
+    <full-name>Group 13</full-name>
+    <email>group13 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group14</short-name>
+    <uid>20000000-0000-0000-0000-000000000014</uid>
+    <guid>20000000-0000-0000-0000-000000000014</guid>
+    <full-name>Group 14</full-name>
+    <email>group14 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group15</short-name>
+    <uid>20000000-0000-0000-0000-000000000015</uid>
+    <guid>20000000-0000-0000-0000-000000000015</guid>
+    <full-name>Group 15</full-name>
+    <email>group15 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group16</short-name>
+    <uid>20000000-0000-0000-0000-000000000016</uid>
+    <guid>20000000-0000-0000-0000-000000000016</guid>
+    <full-name>Group 16</full-name>
+    <email>group16 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group17</short-name>
+    <uid>20000000-0000-0000-0000-000000000017</uid>
+    <guid>20000000-0000-0000-0000-000000000017</guid>
+    <full-name>Group 17</full-name>
+    <email>group17 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group18</short-name>
+    <uid>20000000-0000-0000-0000-000000000018</uid>
+    <guid>20000000-0000-0000-0000-000000000018</guid>
+    <full-name>Group 18</full-name>
+    <email>group18 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group19</short-name>
+    <uid>20000000-0000-0000-0000-000000000019</uid>
+    <guid>20000000-0000-0000-0000-000000000019</guid>
+    <full-name>Group 19</full-name>
+    <email>group19 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group20</short-name>
+    <uid>20000000-0000-0000-0000-000000000020</uid>
+    <guid>20000000-0000-0000-0000-000000000020</guid>
+    <full-name>Group 20</full-name>
+    <email>group20 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group21</short-name>
+    <uid>20000000-0000-0000-0000-000000000021</uid>
+    <guid>20000000-0000-0000-0000-000000000021</guid>
+    <full-name>Group 21</full-name>
+    <email>group21 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group22</short-name>
+    <uid>20000000-0000-0000-0000-000000000022</uid>
+    <guid>20000000-0000-0000-0000-000000000022</guid>
+    <full-name>Group 22</full-name>
+    <email>group22 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group23</short-name>
+    <uid>20000000-0000-0000-0000-000000000023</uid>
+    <guid>20000000-0000-0000-0000-000000000023</guid>
+    <full-name>Group 23</full-name>
+    <email>group23 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group24</short-name>
+    <uid>20000000-0000-0000-0000-000000000024</uid>
+    <guid>20000000-0000-0000-0000-000000000024</guid>
+    <full-name>Group 24</full-name>
+    <email>group24 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group25</short-name>
+    <uid>20000000-0000-0000-0000-000000000025</uid>
+    <guid>20000000-0000-0000-0000-000000000025</guid>
+    <full-name>Group 25</full-name>
+    <email>group25 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group26</short-name>
+    <uid>20000000-0000-0000-0000-000000000026</uid>
+    <guid>20000000-0000-0000-0000-000000000026</guid>
+    <full-name>Group 26</full-name>
+    <email>group26 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group27</short-name>
+    <uid>20000000-0000-0000-0000-000000000027</uid>
+    <guid>20000000-0000-0000-0000-000000000027</guid>
+    <full-name>Group 27</full-name>
+    <email>group27 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group28</short-name>
+    <uid>20000000-0000-0000-0000-000000000028</uid>
+    <guid>20000000-0000-0000-0000-000000000028</guid>
+    <full-name>Group 28</full-name>
+    <email>group28 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group29</short-name>
+    <uid>20000000-0000-0000-0000-000000000029</uid>
+    <guid>20000000-0000-0000-0000-000000000029</guid>
+    <full-name>Group 29</full-name>
+    <email>group29 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group30</short-name>
+    <uid>20000000-0000-0000-0000-000000000030</uid>
+    <guid>20000000-0000-0000-0000-000000000030</guid>
+    <full-name>Group 30</full-name>
+    <email>group30 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group31</short-name>
+    <uid>20000000-0000-0000-0000-000000000031</uid>
+    <guid>20000000-0000-0000-0000-000000000031</guid>
+    <full-name>Group 31</full-name>
+    <email>group31 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group32</short-name>
+    <uid>20000000-0000-0000-0000-000000000032</uid>
+    <guid>20000000-0000-0000-0000-000000000032</guid>
+    <full-name>Group 32</full-name>
+    <email>group32 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group33</short-name>
+    <uid>20000000-0000-0000-0000-000000000033</uid>
+    <guid>20000000-0000-0000-0000-000000000033</guid>
+    <full-name>Group 33</full-name>
+    <email>group33 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group34</short-name>
+    <uid>20000000-0000-0000-0000-000000000034</uid>
+    <guid>20000000-0000-0000-0000-000000000034</guid>
+    <full-name>Group 34</full-name>
+    <email>group34 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group35</short-name>
+    <uid>20000000-0000-0000-0000-000000000035</uid>
+    <guid>20000000-0000-0000-0000-000000000035</guid>
+    <full-name>Group 35</full-name>
+    <email>group35 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group36</short-name>
+    <uid>20000000-0000-0000-0000-000000000036</uid>
+    <guid>20000000-0000-0000-0000-000000000036</guid>
+    <full-name>Group 36</full-name>
+    <email>group36 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group37</short-name>
+    <uid>20000000-0000-0000-0000-000000000037</uid>
+    <guid>20000000-0000-0000-0000-000000000037</guid>
+    <full-name>Group 37</full-name>
+    <email>group37 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group38</short-name>
+    <uid>20000000-0000-0000-0000-000000000038</uid>
+    <guid>20000000-0000-0000-0000-000000000038</guid>
+    <full-name>Group 38</full-name>
+    <email>group38 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group39</short-name>
+    <uid>20000000-0000-0000-0000-000000000039</uid>
+    <guid>20000000-0000-0000-0000-000000000039</guid>
+    <full-name>Group 39</full-name>
+    <email>group39 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group40</short-name>
+    <uid>20000000-0000-0000-0000-000000000040</uid>
+    <guid>20000000-0000-0000-0000-000000000040</guid>
+    <full-name>Group 40</full-name>
+    <email>group40 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group41</short-name>
+    <uid>20000000-0000-0000-0000-000000000041</uid>
+    <guid>20000000-0000-0000-0000-000000000041</guid>
+    <full-name>Group 41</full-name>
+    <email>group41 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group42</short-name>
+    <uid>20000000-0000-0000-0000-000000000042</uid>
+    <guid>20000000-0000-0000-0000-000000000042</guid>
+    <full-name>Group 42</full-name>
+    <email>group42 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group43</short-name>
+    <uid>20000000-0000-0000-0000-000000000043</uid>
+    <guid>20000000-0000-0000-0000-000000000043</guid>
+    <full-name>Group 43</full-name>
+    <email>group43 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group44</short-name>
+    <uid>20000000-0000-0000-0000-000000000044</uid>
+    <guid>20000000-0000-0000-0000-000000000044</guid>
+    <full-name>Group 44</full-name>
+    <email>group44 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group45</short-name>
+    <uid>20000000-0000-0000-0000-000000000045</uid>
+    <guid>20000000-0000-0000-0000-000000000045</guid>
+    <full-name>Group 45</full-name>
+    <email>group45 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group46</short-name>
+    <uid>20000000-0000-0000-0000-000000000046</uid>
+    <guid>20000000-0000-0000-0000-000000000046</guid>
+    <full-name>Group 46</full-name>
+    <email>group46 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group47</short-name>
+    <uid>20000000-0000-0000-0000-000000000047</uid>
+    <guid>20000000-0000-0000-0000-000000000047</guid>
+    <full-name>Group 47</full-name>
+    <email>group47 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group48</short-name>
+    <uid>20000000-0000-0000-0000-000000000048</uid>
+    <guid>20000000-0000-0000-0000-000000000048</guid>
+    <full-name>Group 48</full-name>
+    <email>group48 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group49</short-name>
+    <uid>20000000-0000-0000-0000-000000000049</uid>
+    <guid>20000000-0000-0000-0000-000000000049</guid>
+    <full-name>Group 49</full-name>
+    <email>group49 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group50</short-name>
+    <uid>20000000-0000-0000-0000-000000000050</uid>
+    <guid>20000000-0000-0000-0000-000000000050</guid>
+    <full-name>Group 50</full-name>
+    <email>group50 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group51</short-name>
+    <uid>20000000-0000-0000-0000-000000000051</uid>
+    <guid>20000000-0000-0000-0000-000000000051</guid>
+    <full-name>Group 51</full-name>
+    <email>group51 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group52</short-name>
+    <uid>20000000-0000-0000-0000-000000000052</uid>
+    <guid>20000000-0000-0000-0000-000000000052</guid>
+    <full-name>Group 52</full-name>
+    <email>group52 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group53</short-name>
+    <uid>20000000-0000-0000-0000-000000000053</uid>
+    <guid>20000000-0000-0000-0000-000000000053</guid>
+    <full-name>Group 53</full-name>
+    <email>group53 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group54</short-name>
+    <uid>20000000-0000-0000-0000-000000000054</uid>
+    <guid>20000000-0000-0000-0000-000000000054</guid>
+    <full-name>Group 54</full-name>
+    <email>group54 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group55</short-name>
+    <uid>20000000-0000-0000-0000-000000000055</uid>
+    <guid>20000000-0000-0000-0000-000000000055</guid>
+    <full-name>Group 55</full-name>
+    <email>group55 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group56</short-name>
+    <uid>20000000-0000-0000-0000-000000000056</uid>
+    <guid>20000000-0000-0000-0000-000000000056</guid>
+    <full-name>Group 56</full-name>
+    <email>group56 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group57</short-name>
+    <uid>20000000-0000-0000-0000-000000000057</uid>
+    <guid>20000000-0000-0000-0000-000000000057</guid>
+    <full-name>Group 57</full-name>
+    <email>group57 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group58</short-name>
+    <uid>20000000-0000-0000-0000-000000000058</uid>
+    <guid>20000000-0000-0000-0000-000000000058</guid>
+    <full-name>Group 58</full-name>
+    <email>group58 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group59</short-name>
+    <uid>20000000-0000-0000-0000-000000000059</uid>
+    <guid>20000000-0000-0000-0000-000000000059</guid>
+    <full-name>Group 59</full-name>
+    <email>group59 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group60</short-name>
+    <uid>20000000-0000-0000-0000-000000000060</uid>
+    <guid>20000000-0000-0000-0000-000000000060</guid>
+    <full-name>Group 60</full-name>
+    <email>group60 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group61</short-name>
+    <uid>20000000-0000-0000-0000-000000000061</uid>
+    <guid>20000000-0000-0000-0000-000000000061</guid>
+    <full-name>Group 61</full-name>
+    <email>group61 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group62</short-name>
+    <uid>20000000-0000-0000-0000-000000000062</uid>
+    <guid>20000000-0000-0000-0000-000000000062</guid>
+    <full-name>Group 62</full-name>
+    <email>group62 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group63</short-name>
+    <uid>20000000-0000-0000-0000-000000000063</uid>
+    <guid>20000000-0000-0000-0000-000000000063</guid>
+    <full-name>Group 63</full-name>
+    <email>group63 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group64</short-name>
+    <uid>20000000-0000-0000-0000-000000000064</uid>
+    <guid>20000000-0000-0000-0000-000000000064</guid>
+    <full-name>Group 64</full-name>
+    <email>group64 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group65</short-name>
+    <uid>20000000-0000-0000-0000-000000000065</uid>
+    <guid>20000000-0000-0000-0000-000000000065</guid>
+    <full-name>Group 65</full-name>
+    <email>group65 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group66</short-name>
+    <uid>20000000-0000-0000-0000-000000000066</uid>
+    <guid>20000000-0000-0000-0000-000000000066</guid>
+    <full-name>Group 66</full-name>
+    <email>group66 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group67</short-name>
+    <uid>20000000-0000-0000-0000-000000000067</uid>
+    <guid>20000000-0000-0000-0000-000000000067</guid>
+    <full-name>Group 67</full-name>
+    <email>group67 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group68</short-name>
+    <uid>20000000-0000-0000-0000-000000000068</uid>
+    <guid>20000000-0000-0000-0000-000000000068</guid>
+    <full-name>Group 68</full-name>
+    <email>group68 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group69</short-name>
+    <uid>20000000-0000-0000-0000-000000000069</uid>
+    <guid>20000000-0000-0000-0000-000000000069</guid>
+    <full-name>Group 69</full-name>
+    <email>group69 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group70</short-name>
+    <uid>20000000-0000-0000-0000-000000000070</uid>
+    <guid>20000000-0000-0000-0000-000000000070</guid>
+    <full-name>Group 70</full-name>
+    <email>group70 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group71</short-name>
+    <uid>20000000-0000-0000-0000-000000000071</uid>
+    <guid>20000000-0000-0000-0000-000000000071</guid>
+    <full-name>Group 71</full-name>
+    <email>group71 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group72</short-name>
+    <uid>20000000-0000-0000-0000-000000000072</uid>
+    <guid>20000000-0000-0000-0000-000000000072</guid>
+    <full-name>Group 72</full-name>
+    <email>group72 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group73</short-name>
+    <uid>20000000-0000-0000-0000-000000000073</uid>
+    <guid>20000000-0000-0000-0000-000000000073</guid>
+    <full-name>Group 73</full-name>
+    <email>group73 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group74</short-name>
+    <uid>20000000-0000-0000-0000-000000000074</uid>
+    <guid>20000000-0000-0000-0000-000000000074</guid>
+    <full-name>Group 74</full-name>
+    <email>group74 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group75</short-name>
+    <uid>20000000-0000-0000-0000-000000000075</uid>
+    <guid>20000000-0000-0000-0000-000000000075</guid>
+    <full-name>Group 75</full-name>
+    <email>group75 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group76</short-name>
+    <uid>20000000-0000-0000-0000-000000000076</uid>
+    <guid>20000000-0000-0000-0000-000000000076</guid>
+    <full-name>Group 76</full-name>
+    <email>group76 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group77</short-name>
+    <uid>20000000-0000-0000-0000-000000000077</uid>
+    <guid>20000000-0000-0000-0000-000000000077</guid>
+    <full-name>Group 77</full-name>
+    <email>group77 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group78</short-name>
+    <uid>20000000-0000-0000-0000-000000000078</uid>
+    <guid>20000000-0000-0000-0000-000000000078</guid>
+    <full-name>Group 78</full-name>
+    <email>group78 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group79</short-name>
+    <uid>20000000-0000-0000-0000-000000000079</uid>
+    <guid>20000000-0000-0000-0000-000000000079</guid>
+    <full-name>Group 79</full-name>
+    <email>group79 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group80</short-name>
+    <uid>20000000-0000-0000-0000-000000000080</uid>
+    <guid>20000000-0000-0000-0000-000000000080</guid>
+    <full-name>Group 80</full-name>
+    <email>group80 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group81</short-name>
+    <uid>20000000-0000-0000-0000-000000000081</uid>
+    <guid>20000000-0000-0000-0000-000000000081</guid>
+    <full-name>Group 81</full-name>
+    <email>group81 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group82</short-name>
+    <uid>20000000-0000-0000-0000-000000000082</uid>
+    <guid>20000000-0000-0000-0000-000000000082</guid>
+    <full-name>Group 82</full-name>
+    <email>group82 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group83</short-name>
+    <uid>20000000-0000-0000-0000-000000000083</uid>
+    <guid>20000000-0000-0000-0000-000000000083</guid>
+    <full-name>Group 83</full-name>
+    <email>group83 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group84</short-name>
+    <uid>20000000-0000-0000-0000-000000000084</uid>
+    <guid>20000000-0000-0000-0000-000000000084</guid>
+    <full-name>Group 84</full-name>
+    <email>group84 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group85</short-name>
+    <uid>20000000-0000-0000-0000-000000000085</uid>
+    <guid>20000000-0000-0000-0000-000000000085</guid>
+    <full-name>Group 85</full-name>
+    <email>group85 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group86</short-name>
+    <uid>20000000-0000-0000-0000-000000000086</uid>
+    <guid>20000000-0000-0000-0000-000000000086</guid>
+    <full-name>Group 86</full-name>
+    <email>group86 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group87</short-name>
+    <uid>20000000-0000-0000-0000-000000000087</uid>
+    <guid>20000000-0000-0000-0000-000000000087</guid>
+    <full-name>Group 87</full-name>
+    <email>group87 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group88</short-name>
+    <uid>20000000-0000-0000-0000-000000000088</uid>
+    <guid>20000000-0000-0000-0000-000000000088</guid>
+    <full-name>Group 88</full-name>
+    <email>group88 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group89</short-name>
+    <uid>20000000-0000-0000-0000-000000000089</uid>
+    <guid>20000000-0000-0000-0000-000000000089</guid>
+    <full-name>Group 89</full-name>
+    <email>group89 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group90</short-name>
+    <uid>20000000-0000-0000-0000-000000000090</uid>
+    <guid>20000000-0000-0000-0000-000000000090</guid>
+    <full-name>Group 90</full-name>
+    <email>group90 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group91</short-name>
+    <uid>20000000-0000-0000-0000-000000000091</uid>
+    <guid>20000000-0000-0000-0000-000000000091</guid>
+    <full-name>Group 91</full-name>
+    <email>group91 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group92</short-name>
+    <uid>20000000-0000-0000-0000-000000000092</uid>
+    <guid>20000000-0000-0000-0000-000000000092</guid>
+    <full-name>Group 92</full-name>
+    <email>group92 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group93</short-name>
+    <uid>20000000-0000-0000-0000-000000000093</uid>
+    <guid>20000000-0000-0000-0000-000000000093</guid>
+    <full-name>Group 93</full-name>
+    <email>group93 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group94</short-name>
+    <uid>20000000-0000-0000-0000-000000000094</uid>
+    <guid>20000000-0000-0000-0000-000000000094</guid>
+    <full-name>Group 94</full-name>
+    <email>group94 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group95</short-name>
+    <uid>20000000-0000-0000-0000-000000000095</uid>
+    <guid>20000000-0000-0000-0000-000000000095</guid>
+    <full-name>Group 95</full-name>
+    <email>group95 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group96</short-name>
+    <uid>20000000-0000-0000-0000-000000000096</uid>
+    <guid>20000000-0000-0000-0000-000000000096</guid>
+    <full-name>Group 96</full-name>
+    <email>group96 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group97</short-name>
+    <uid>20000000-0000-0000-0000-000000000097</uid>
+    <guid>20000000-0000-0000-0000-000000000097</guid>
+    <full-name>Group 97</full-name>
+    <email>group97 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group98</short-name>
+    <uid>20000000-0000-0000-0000-000000000098</uid>
+    <guid>20000000-0000-0000-0000-000000000098</guid>
+    <full-name>Group 98</full-name>
+    <email>group98 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group99</short-name>
+    <uid>20000000-0000-0000-0000-000000000099</uid>
+    <guid>20000000-0000-0000-0000-000000000099</guid>
+    <full-name>Group 99</full-name>
+    <email>group99 at example.com</email>
+    
+</record>
+<record type="group">
+    <short-name>group100</short-name>
+    <uid>20000000-0000-0000-0000-000000000100</uid>
+    <guid>20000000-0000-0000-0000-000000000100</guid>
+    <full-name>Group 100</full-name>
+    <email>group100 at example.com</email>
+    
+</record>
+</directory>

Modified: CalendarServer/branches/users/sagen/move2who-5/conf/auth/augments-test.xml
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/conf/auth/augments-test.xml	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/conf/auth/augments-test.xml	2014-04-04 17:20:27 UTC (rev 13158)
@@ -1,185 +1,80 @@
 <?xml version="1.0" encoding="utf-8"?>
-<!DOCTYPE augments SYSTEM "augments.dtd">
 
+<!--
+Copyright (c) 2006-2014 Apple Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+ -->
+
 <augments>
-  <record>
+<record>
     <uid>Default</uid>
-    <enable>true</enable>
     <enable-calendar>true</enable-calendar>
     <enable-addressbook>true</enable-addressbook>
-  </record>
-  <record repeat="10">
-    <uid>location%02d</uid>
-    <enable>true</enable>
+</record>
+<record>
+    <uid>Default-Location</uid>
     <enable-calendar>true</enable-calendar>
     <enable-addressbook>true</enable-addressbook>
-    <auto-schedule>true</auto-schedule>
-  </record>
-  <record repeat="4">
-    <uid>resource%02d</uid>
-    <enable>true</enable>
+    <auto-schedule-mode>automatic</auto-schedule-mode>
+</record>
+<record>
+    <uid>Default-Resource</uid>
     <enable-calendar>true</enable-calendar>
     <enable-addressbook>true</enable-addressbook>
-    <auto-schedule>true</auto-schedule>
-  </record>
-  <record>
-    <uid>resource05</uid>
-    <enable>true</enable>
-    <enable-calendar>true</enable-calendar>
-    <enable-addressbook>true</enable-addressbook>
-    <auto-schedule>true</auto-schedule>
+    <auto-schedule-mode>automatic</auto-schedule-mode>
+</record>
+<record>
+    <uid>40000000-0000-0000-0000-000000000005</uid>
     <auto-schedule-mode>none</auto-schedule-mode>
-  </record>
-  <record>
-    <uid>resource06</uid>
-    <enable>true</enable>
-    <enable-calendar>true</enable-calendar>
     <enable-addressbook>true</enable-addressbook>
-    <auto-schedule>true</auto-schedule>
-    <auto-schedule-mode>accept-always</auto-schedule-mode>
-  </record>
-  <record>
-    <uid>resource07</uid>
-    <enable>true</enable>
     <enable-calendar>true</enable-calendar>
+</record>
+<record>
+    <uid>40000000-0000-0000-0000-000000000006</uid>
+    <auto-schedule-mode>accept-always</auto-schedule-mode>
     <enable-addressbook>true</enable-addressbook>
-    <auto-schedule>true</auto-schedule>
-    <auto-schedule-mode>decline-always</auto-schedule-mode>
-  </record>
-  <record>
-    <uid>resource08</uid>
-    <enable>true</enable>
     <enable-calendar>true</enable-calendar>
-    <enable-addressbook>true</enable-addressbook>
-    <auto-schedule>true</auto-schedule>
-    <auto-schedule-mode>accept-if-free</auto-schedule-mode>
-  </record>
-  <record>
-    <uid>resource09</uid>
-    <enable>true</enable>
-    <enable-calendar>true</enable-calendar>
-    <enable-addressbook>true</enable-addressbook>
-    <auto-schedule>true</auto-schedule>
-    <auto-schedule-mode>decline-if-busy</auto-schedule-mode>
-  </record>
-  <record>
-    <uid>resource10</uid>
-    <enable>true</enable>
-    <enable-calendar>true</enable-calendar>
-    <enable-addressbook>true</enable-addressbook>
-    <auto-schedule>true</auto-schedule>
-    <auto-schedule-mode>automatic</auto-schedule-mode>
-  </record>
-  <record>
-    <uid>resource11</uid>
-    <enable>true</enable>
-    <enable-calendar>true</enable-calendar>
-    <enable-addressbook>true</enable-addressbook>
-    <auto-schedule>true</auto-schedule>
+</record>
+<record>
+    <uid>40000000-0000-0000-0000-000000000007</uid>
     <auto-schedule-mode>decline-always</auto-schedule-mode>
-    <auto-accept-group>group01</auto-accept-group>
-  </record>
-  <record repeat="10">
-    <uid>group%02d</uid>
-    <enable>true</enable>
-  </record>
-  <record>
-    <uid>disabledgroup</uid>
-    <enable>false</enable>
-  </record>
-  <record>
-    <uid>delegatedroom</uid>
-    <enable>true</enable>
-    <enable-calendar>true</enable-calendar>
-    <enable-addressbook>false</enable-addressbook>
-    <auto-schedule>false</auto-schedule>
-  </record>
-  <record>
-    <uid>03DFF660-8BCC-4198-8588-DD77F776F518</uid>
-    <enable>true</enable>
-    <enable-calendar>true</enable-calendar>
     <enable-addressbook>true</enable-addressbook>
-    <enable-login>true</enable-login>
-    <auto-schedule>true</auto-schedule>
-  </record>
-  <record>
-    <uid>80689D41-DAF8-4189-909C-DB017B271892</uid>
-    <enable>true</enable>
     <enable-calendar>true</enable-calendar>
+</record>
+<record>
+    <uid>40000000-0000-0000-0000-000000000008</uid>
+    <auto-schedule-mode>accept-if-free</auto-schedule-mode>
     <enable-addressbook>true</enable-addressbook>
-    <enable-login>true</enable-login>
-    <auto-schedule>true</auto-schedule>
-    <auto-schedule-mode>default</auto-schedule-mode>
-  </record>
-  <record>
-    <uid>C38BEE7A-36EE-478C-9DCB-CBF4612AFE65</uid>
-    <enable>true</enable>
     <enable-calendar>true</enable-calendar>
+</record>
+<record>
+    <uid>40000000-0000-0000-0000-000000000009</uid>
+    <auto-schedule-mode>decline-if-busy</auto-schedule-mode>
     <enable-addressbook>true</enable-addressbook>
-    <enable-login>true</enable-login>
-    <auto-schedule>true</auto-schedule>
-    <auto-schedule-mode>default</auto-schedule-mode>
-    <auto-accept-group>group01</auto-accept-group>
-  </record>
-  <record>
-    <uid>CCE95217-A57B-481A-AC3D-FEC9AB6CE3A9</uid>
-    <enable>true</enable>
     <enable-calendar>true</enable-calendar>
+</record>
+<record>
+    <uid>40000000-0000-0000-0000-000000000010</uid>
+    <auto-schedule-mode>automatic</auto-schedule-mode>
     <enable-addressbook>true</enable-addressbook>
-    <enable-login>true</enable-login>
-    <auto-schedule>true</auto-schedule>
-  </record>
-  <record>
-    <uid>0CE0BF31-5F9E-4801-A489-8C70CF287F5F</uid>
-    <enable>true</enable>
     <enable-calendar>true</enable-calendar>
+</record>
+<record>
+    <uid>40000000-0000-0000-0000-000000000011</uid>
+    <auto-schedule-mode>decline-always</auto-schedule-mode>
     <enable-addressbook>true</enable-addressbook>
-    <enable-login>true</enable-login>
-    <auto-schedule>true</auto-schedule>
-  </record>
-  <record>
-    <uid>6F9EE33B-78F6-481B-9289-3D0812FF0D64</uid>
-    <enable>true</enable>
     <enable-calendar>true</enable-calendar>
-    <enable-addressbook>true</enable-addressbook>
-    <enable-login>true</enable-login>
-    <auto-schedule>false</auto-schedule>
-    <auto-schedule-mode>default</auto-schedule-mode>
-  </record>
-  <record>
-    <uid>76E7ECA6-08BC-4AE7-930D-F2E7453993A5</uid>
-    <enable>true</enable>
-    <enable-calendar>true</enable-calendar>
-    <enable-addressbook>true</enable-addressbook>
-    <enable-login>true</enable-login>
-    <auto-schedule>false</auto-schedule>
-    <auto-schedule-mode>default</auto-schedule-mode>
-  </record>
-  <record>
-    <uid>63A2F949-2D8D-4C8D-B8A5-DCF2A94610F3</uid>
-    <enable>true</enable>
-    <enable-calendar>true</enable-calendar>
-    <enable-addressbook>true</enable-addressbook>
-    <enable-login>true</enable-login>
-    <auto-schedule>false</auto-schedule>
-    <auto-schedule-mode>default</auto-schedule-mode>
-  </record>
-  <record>
-    <uid>06E3BDCB-9C19-485A-B14E-F146A80ADDC6</uid>
-    <enable>true</enable>
-    <enable-calendar>true</enable-calendar>
-    <enable-addressbook>true</enable-addressbook>
-    <enable-login>true</enable-login>
-    <auto-schedule>true</auto-schedule>
-    <auto-schedule-mode>default</auto-schedule-mode>
-  </record>
-  <record>
-    <uid>4D66A20A-1437-437D-8069-2F14E8322234</uid>
-    <enable>true</enable>
-    <enable-calendar>true</enable-calendar>
-    <enable-addressbook>true</enable-addressbook>
-    <enable-login>true</enable-login>
-    <auto-schedule>true</auto-schedule>
-    <auto-schedule-mode>default</auto-schedule-mode>
-  </record>
+    <auto-accept-group>20000000-0000-0000-0000-000000000001</auto-accept-group>
+</record>
 </augments>

Copied: CalendarServer/branches/users/sagen/move2who-5/conf/auth/generate_test_accounts.py (from rev 13157, CalendarServer/branches/users/sagen/move2who-4/conf/auth/generate_test_accounts.py)
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/conf/auth/generate_test_accounts.py	                        (rev 0)
+++ CalendarServer/branches/users/sagen/move2who-5/conf/auth/generate_test_accounts.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -0,0 +1,341 @@
+#!/usr/bin/env python
+
+# Generates test directory records in accounts-test.xml, resources-test.xml,
+# augments-test.xml and proxies-test.xml (overwriting them if they exist in
+# the current directory).
+
+prefix = """<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+Copyright (c) 2006-2014 Apple Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+ -->
+
+"""
+
+# The uids and guids for CDT test accounts are the same
+# The short name is of the form userNN
+USERGUIDS = "10000000-0000-0000-0000-000000000%03d"
+GROUPGUIDS = "20000000-0000-0000-0000-000000000%03d"
+LOCATIONGUIDS = "30000000-0000-0000-0000-000000000%03d"
+RESOURCEGUIDS = "40000000-0000-0000-0000-000000000%03d"
+PUBLICGUIDS = "50000000-0000-0000-0000-0000000000%02d"
+
+# accounts-test.xml
+
+out = file("accounts-test.xml", "w")
+out.write(prefix)
+out.write('<directory realm="Test Realm">\n')
+
+
+for uid, fullName, guid in (
+    ("admin", "Super User", "0C8BDE62-E600-4696-83D3-8B5ECABDFD2E"),
+    ("apprentice", "Apprentice Super User", "29B6C503-11DF-43EC-8CCA-40C7003149CE"),
+    ("i18nuser", u"\u307e\u3060".encode("utf-8"), "860B3EE9-6D7C-4296-9639-E6B998074A78"),
+):
+    out.write("""<record>
+    <uid>{guid}</uid>
+    <guid>{guid}</guid>
+    <short-name>{uid}</short-name>
+    <password>{uid}</password>
+    <full-name>{fullName}</full-name>
+    <email>{uid}@example.com</email>
+</record>
+""".format(uid=uid, guid=guid, fullName=fullName))
+
+# user01-100
+for i in xrange(1, 101):
+    out.write("""<record type="user">
+    <short-name>user%02d</short-name>
+    <uid>%s</uid>
+    <guid>%s</guid>
+    <password>user%02d</password>
+    <full-name>User %02d</full-name>
+    <email>user%02d at example.com</email>
+</record>
+""" % (i, USERGUIDS % i, USERGUIDS % i, i, i, i))
+
+# public01-10
+for i in xrange(1, 11):
+    out.write("""<record type="user">
+    <short-name>public%02d</short-name>
+    <uid>%s</uid>
+    <guid>%s</guid>
+    <password>public%02d</password>
+    <full-name>Public %02d</full-name>
+    <email>public%02d at example.com</email>
+</record>
+""" % (i, PUBLICGUIDS % i, PUBLICGUIDS % i, i, i, i))
+
+# group01-100
+members = {
+    GROUPGUIDS % 1: (USERGUIDS % 1,),
+    GROUPGUIDS % 2: (USERGUIDS % 6, USERGUIDS % 7),
+    GROUPGUIDS % 3: (USERGUIDS % 8, USERGUIDS % 9),
+    GROUPGUIDS % 4: (GROUPGUIDS % 2, GROUPGUIDS % 3, USERGUIDS % 10),
+    GROUPGUIDS % 5: (GROUPGUIDS % 6, USERGUIDS % 20),
+    GROUPGUIDS % 6: (USERGUIDS % 21,),
+    GROUPGUIDS % 7: (USERGUIDS % 22, USERGUIDS % 23, USERGUIDS % 24),
+}
+
+for i in xrange(1, 101):
+
+    memberElements = []
+    groupUID = GROUPGUIDS % i
+    if groupUID in members:
+        for uid in members[groupUID]:
+            memberElements.append("<member-uid>{}</member-uid>".format(uid))
+        memberString = "\n    ".join(memberElements)
+    else:
+        memberString = ""
+
+    out.write("""<record type="group">
+    <short-name>group%02d</short-name>
+    <uid>%s</uid>
+    <guid>%s</guid>
+    <full-name>Group %02d</full-name>
+    <email>group%02d at example.com</email>
+    %s
+</record>
+""" % (i, GROUPGUIDS % i, GROUPGUIDS % i, i, i, memberString))
+
+out.write("</directory>\n")
+out.close()
+
+
+# resources-test.xml
+
+out = file("resources-test.xml", "w")
+out.write(prefix)
+out.write('<directory realm="Test Realm">\n')
+
+out.write("""
+  <record type="location">
+    <short-name>pretend</short-name>
+    <uid>pretend</uid>
+    <full-name>Pretend Conference Room</full-name>
+    <associated-address>il1</associated-address>
+  </record>
+  <record type="address">
+    <short-name>il1</short-name>
+    <uid>il1</uid>
+    <full-name>IL1</full-name>
+    <street-address>1 Infinite Loop, Cupertino, CA 95014</street-address>
+    <geographic-location>37.331741,-122.030333</geographic-location>
+  </record>
+  <record type="location">
+    <short-name>fantastic</short-name>
+    <uid>fantastic</uid>
+    <full-name>Fantastic Conference Room</full-name>
+    <associated-address>il2</associated-address>
+  </record>
+  <record type="address">
+    <short-name>il2</short-name>
+    <uid>il2</uid>
+    <full-name>IL2</full-name>
+    <street-address>2 Infinite Loop, Cupertino, CA 95014</street-address>
+    <geographic-location>37.332633,-122.030502</geographic-location>
+  </record>
+  <record type="location">
+    <short-name>delegatedroom</short-name>
+    <uid>delegatedroom</uid>
+    <full-name>Delegated Conference Room</full-name>
+  </record>
+
+""")
+
+for i in xrange(1, 101):
+    out.write("""<record type="location">
+    <short-name>location%02d</short-name>
+    <uid>%s</uid>
+    <guid>%s</guid>
+    <full-name>Location %02d</full-name>
+</record>
+""" % (i, LOCATIONGUIDS % i, LOCATIONGUIDS % i, i))
+
+
+for i in xrange(1, 101):
+    out.write("""<record type="resource">
+    <short-name>resource%02d</short-name>
+    <uid>%s</uid>
+    <guid>%s</guid>
+    <full-name>Resource %02d</full-name>
+</record>
+""" % (i, RESOURCEGUIDS % i, RESOURCEGUIDS % i, i))
+
+out.write("</directory>\n")
+out.close()
+
+
+# augments-test.xml
+
+out = file("augments-test.xml", "w")
+out.write(prefix)
+out.write("<augments>\n")
+
+augments = (
+    # resource05
+    (RESOURCEGUIDS % 5, {
+        "auto-schedule-mode": "none",
+        "enable-calendar": "true",
+        "enable-addressbook": "true",
+    }),
+    # resource06
+    (RESOURCEGUIDS % 6, {
+        "auto-schedule-mode": "accept-always",
+        "enable-calendar": "true",
+        "enable-addressbook": "true",
+    }),
+    # resource07
+    (RESOURCEGUIDS % 7, {
+        "auto-schedule-mode": "decline-always",
+        "enable-calendar": "true",
+        "enable-addressbook": "true",
+    }),
+    # resource08
+    (RESOURCEGUIDS % 8, {
+        "auto-schedule-mode": "accept-if-free",
+        "enable-calendar": "true",
+        "enable-addressbook": "true",
+    }),
+    # resource09
+    (RESOURCEGUIDS % 9, {
+        "auto-schedule-mode": "decline-if-busy",
+        "enable-calendar": "true",
+        "enable-addressbook": "true",
+    }),
+    # resource10
+    (RESOURCEGUIDS % 10, {
+        "auto-schedule-mode": "automatic",
+        "enable-calendar": "true",
+        "enable-addressbook": "true",
+    }),
+    # resource11
+    (RESOURCEGUIDS % 11, {
+        "auto-schedule-mode": "decline-always",
+        "auto-accept-group": GROUPGUIDS % 1,
+        "enable-calendar": "true",
+        "enable-addressbook": "true",
+    }),
+)
+
+out.write("""<record>
+    <uid>Default</uid>
+    <enable-calendar>true</enable-calendar>
+    <enable-addressbook>true</enable-addressbook>
+</record>
+""")
+
+out.write("""<record>
+    <uid>Default-Location</uid>
+    <enable-calendar>true</enable-calendar>
+    <enable-addressbook>true</enable-addressbook>
+    <auto-schedule-mode>automatic</auto-schedule-mode>
+</record>
+""")
+
+out.write("""<record>
+    <uid>Default-Resource</uid>
+    <enable-calendar>true</enable-calendar>
+    <enable-addressbook>true</enable-addressbook>
+    <auto-schedule-mode>automatic</auto-schedule-mode>
+</record>
+""")
+
+for uid, settings in augments:
+    elements = []
+    for key, value in settings.iteritems():
+        elements.append("<{key}>{value}</{key}>".format(key=key, value=value))
+    elementsString = "\n    ".join(elements)
+
+    out.write("""<record>
+    <uid>{uid}</uid>
+    {elements}
+</record>
+""".format(uid=uid, elements=elementsString))
+
+out.write("</augments>\n")
+out.close()
+
+
+# proxies-test.xml
+
+out = file("proxies-test.xml", "w")
+out.write(prefix)
+out.write("<proxies>\n")
+
+proxies = (
+    (RESOURCEGUIDS % 1, {
+        "proxies": (USERGUIDS % 1,),
+        "read-only-proxies": (USERGUIDS % 3,),
+    }),
+    (RESOURCEGUIDS % 2, {
+        "proxies": (USERGUIDS % 1,),
+        "read-only-proxies": (USERGUIDS % 3,),
+    }),
+    (RESOURCEGUIDS % 3, {
+        "proxies": (USERGUIDS % 1,),
+        "read-only-proxies": (USERGUIDS % 3,),
+    }),
+    (RESOURCEGUIDS % 4, {
+        "proxies": (USERGUIDS % 1,),
+        "read-only-proxies": (USERGUIDS % 3,),
+    }),
+    (RESOURCEGUIDS % 5, {
+        "proxies": (USERGUIDS % 1,),
+        "read-only-proxies": (USERGUIDS % 3,),
+    }),
+    (RESOURCEGUIDS % 6, {
+        "proxies": (USERGUIDS % 1,),
+        "read-only-proxies": (USERGUIDS % 3,),
+    }),
+    (RESOURCEGUIDS % 7, {
+        "proxies": (USERGUIDS % 1,),
+        "read-only-proxies": (USERGUIDS % 3,),
+    }),
+    (RESOURCEGUIDS % 8, {
+        "proxies": (USERGUIDS % 1,),
+        "read-only-proxies": (USERGUIDS % 3,),
+    }),
+    (RESOURCEGUIDS % 9, {
+        "proxies": (USERGUIDS % 1,),
+        "read-only-proxies": (USERGUIDS % 3,),
+    }),
+    (RESOURCEGUIDS % 10, {
+        "proxies": (USERGUIDS % 1,),
+        "read-only-proxies": (USERGUIDS % 3,),
+    }),
+    ("delegatedroom", {
+        "proxies": (GROUPGUIDS % 5,),
+        "read-only-proxies": (),
+    }),
+)
+
+for uid, settings in proxies:
+    elements = []
+    for key, values in settings.iteritems():
+        elements.append("<{key}>".format(key=key))
+        for value in values:
+            elements.append("<member>{value}</member>".format(value=value))
+        elements.append("</{key}>".format(key=key))
+    elementsString = "\n    ".join(elements)
+
+    out.write("""<record>
+    <guid>{uid}</guid>
+    {elements}
+</record>
+""".format(uid=uid, elements=elementsString))
+
+out.write("</proxies>\n")
+out.close()

Modified: CalendarServer/branches/users/sagen/move2who-5/conf/auth/proxies-test.xml
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/conf/auth/proxies-test.xml	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/conf/auth/proxies-test.xml	2014-04-04 17:20:27 UTC (rev 13158)
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="utf-8"?>
 
 <!--
-Copyright (c) 2009-2014 Apple Inc. All rights reserved.
+Copyright (c) 2006-2014 Apple Inc. All rights reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -16,25 +16,103 @@
 limitations under the License.
  -->
 
-<!DOCTYPE proxies SYSTEM "proxies.dtd">
-
 <proxies>
-  <record repeat="10">
-    <guid>resource%02d</guid>
+<record>
+    <guid>40000000-0000-0000-0000-000000000001</guid>
     <proxies>
-      <member>user01</member>
+    <member>10000000-0000-0000-0000-000000000001</member>
     </proxies>
     <read-only-proxies>
-      <member>user03</member>
+    <member>10000000-0000-0000-0000-000000000003</member>
     </read-only-proxies>
-  </record>
-  <record>
+</record>
+<record>
+    <guid>40000000-0000-0000-0000-000000000002</guid>
+    <proxies>
+    <member>10000000-0000-0000-0000-000000000001</member>
+    </proxies>
+    <read-only-proxies>
+    <member>10000000-0000-0000-0000-000000000003</member>
+    </read-only-proxies>
+</record>
+<record>
+    <guid>40000000-0000-0000-0000-000000000003</guid>
+    <proxies>
+    <member>10000000-0000-0000-0000-000000000001</member>
+    </proxies>
+    <read-only-proxies>
+    <member>10000000-0000-0000-0000-000000000003</member>
+    </read-only-proxies>
+</record>
+<record>
+    <guid>40000000-0000-0000-0000-000000000004</guid>
+    <proxies>
+    <member>10000000-0000-0000-0000-000000000001</member>
+    </proxies>
+    <read-only-proxies>
+    <member>10000000-0000-0000-0000-000000000003</member>
+    </read-only-proxies>
+</record>
+<record>
+    <guid>40000000-0000-0000-0000-000000000005</guid>
+    <proxies>
+    <member>10000000-0000-0000-0000-000000000001</member>
+    </proxies>
+    <read-only-proxies>
+    <member>10000000-0000-0000-0000-000000000003</member>
+    </read-only-proxies>
+</record>
+<record>
+    <guid>40000000-0000-0000-0000-000000000006</guid>
+    <proxies>
+    <member>10000000-0000-0000-0000-000000000001</member>
+    </proxies>
+    <read-only-proxies>
+    <member>10000000-0000-0000-0000-000000000003</member>
+    </read-only-proxies>
+</record>
+<record>
+    <guid>40000000-0000-0000-0000-000000000007</guid>
+    <proxies>
+    <member>10000000-0000-0000-0000-000000000001</member>
+    </proxies>
+    <read-only-proxies>
+    <member>10000000-0000-0000-0000-000000000003</member>
+    </read-only-proxies>
+</record>
+<record>
+    <guid>40000000-0000-0000-0000-000000000008</guid>
+    <proxies>
+    <member>10000000-0000-0000-0000-000000000001</member>
+    </proxies>
+    <read-only-proxies>
+    <member>10000000-0000-0000-0000-000000000003</member>
+    </read-only-proxies>
+</record>
+<record>
+    <guid>40000000-0000-0000-0000-000000000009</guid>
+    <proxies>
+    <member>10000000-0000-0000-0000-000000000001</member>
+    </proxies>
+    <read-only-proxies>
+    <member>10000000-0000-0000-0000-000000000003</member>
+    </read-only-proxies>
+</record>
+<record>
+    <guid>40000000-0000-0000-0000-000000000010</guid>
+    <proxies>
+    <member>10000000-0000-0000-0000-000000000001</member>
+    </proxies>
+    <read-only-proxies>
+    <member>10000000-0000-0000-0000-000000000003</member>
+    </read-only-proxies>
+</record>
+<record>
     <guid>delegatedroom</guid>
     <proxies>
-      <member>group05</member>
+    <member>20000000-0000-0000-0000-000000000005</member>
     </proxies>
     <read-only-proxies>
-      <member>group07</member>
     </read-only-proxies>
-  </record>
+</record>
 </proxies>

Modified: CalendarServer/branches/users/sagen/move2who-5/conf/auth/resources-test.xml
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/conf/auth/resources-test.xml	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/conf/auth/resources-test.xml	2014-04-04 17:20:27 UTC (rev 13158)
@@ -1,135 +1,1253 @@
-<accounts realm="Test Realm">
-  <location repeat="10">
-    <uid>location%02d</uid>
-    <guid>location%02d</guid>
-    <password>location%02d</password>
-    <name>Room %02d</name>
-  </location>
-  <resource repeat="20">
-    <uid>resource%02d</uid>
-    <guid>resource%02d</guid>
-    <password>resource%02d</password>
-    <name>Resource %02d</name>
-  </resource>
-  <location>
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+Copyright (c) 2006-2014 Apple Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+ -->
+
+<directory realm="Test Realm">
+
+  <record type="location">
+    <short-name>pretend</short-name>
+    <uid>pretend</uid>
+    <full-name>Pretend Conference Room</full-name>
+    <associated-address>il1</associated-address>
+  </record>
+  <record type="address">
+    <short-name>il1</short-name>
+    <uid>il1</uid>
+    <full-name>IL1</full-name>
+    <street-address>1 Infinite Loop, Cupertino, CA 95014</street-address>
+    <geographic-location>37.331741,-122.030333</geographic-location>
+  </record>
+  <record type="location">
+    <short-name>fantastic</short-name>
     <uid>fantastic</uid>
-    <guid>4D66A20A-1437-437D-8069-2F14E8322234</guid>
-    <name>Fantastic Conference Room</name>
-    <extras>
-      <associatedAddress>63A2F949-2D8D-4C8D-B8A5-DCF2A94610F3</associatedAddress>
-    </extras>
-  </location>
-  <location>
-    <uid>jupiter</uid>
-    <guid>jupiter</guid>
-    <name>Jupiter Conference Room, Building 2, 1st Floor</name>
-  </location>
-  <location>
-    <uid>uranus</uid>
-    <guid>uranus</guid>
-    <name>Uranus Conference Room, Building 3, 1st Floor</name>
-  </location>
-  <location>
-    <uid>morgensroom</uid>
-    <guid>03DFF660-8BCC-4198-8588-DD77F776F518</guid>
-    <name>Morgen's Room</name>
-  </location>
-  <location>
-    <uid>mercury</uid>
-    <guid>mercury</guid>
-    <name>Mercury Conference Room, Building 1, 2nd Floor</name>
-  </location>
-  <location>
+    <full-name>Fantastic Conference Room</full-name>
+    <associated-address>il2</associated-address>
+  </record>
+  <record type="address">
+    <short-name>il2</short-name>
+    <uid>il2</uid>
+    <full-name>IL2</full-name>
+    <street-address>2 Infinite Loop, Cupertino, CA 95014</street-address>
+    <geographic-location>37.332633,-122.030502</geographic-location>
+  </record>
+  <record type="location">
+    <short-name>delegatedroom</short-name>
     <uid>delegatedroom</uid>
-    <guid>delegatedroom</guid>
-    <name>Delegated Conference Room</name>
-  </location>
-  <location>
-    <uid>mars</uid>
-    <guid>redplanet</guid>
-    <name>Mars Conference Room, Building 1, 1st Floor</name>
-  </location>
-  <location>
-    <uid>sharissroom</uid>
-    <guid>80689D41-DAF8-4189-909C-DB017B271892</guid>
-    <name>Shari's Room</name>
-    <extras>
-      <associatedAddress>6F9EE33B-78F6-481B-9289-3D0812FF0D64</associatedAddress>
-    </extras>
-  </location>
-  <location>
-    <uid>pluto</uid>
-    <guid>pluto</guid>
-    <name>Pluto Conference Room, Building 2, 1st Floor</name>
-  </location>
-  <location>
-    <uid>saturn</uid>
-    <guid>saturn</guid>
-    <name>Saturn Conference Room, Building 2, 1st Floor</name>
-  </location>
-  <location>
-    <uid>pretend</uid>
-    <guid>06E3BDCB-9C19-485A-B14E-F146A80ADDC6</guid>
-    <name>Pretend Conference Room</name>
-    <extras>
-      <associatedAddress>76E7ECA6-08BC-4AE7-930D-F2E7453993A5</associatedAddress>
-    </extras>
-  </location>
-  <location>
-    <uid>neptune</uid>
-    <guid>neptune</guid>
-    <name>Neptune Conference Room, Building 2, 1st Floor</name>
-  </location>
-  <location>
-    <uid>Earth</uid>
-    <guid>Earth</guid>
-    <name>Earth Conference Room, Building 1, 1st Floor</name>
-  </location>
-  <location>
-    <uid>venus</uid>
-    <guid>venus</guid>
-    <name>Venus Conference Room, Building 1, 2nd Floor</name>
-  </location>
-  <resource>
-    <uid>sharisotherresource</uid>
-    <guid>CCE95217-A57B-481A-AC3D-FEC9AB6CE3A9</guid>
-    <name>Shari's Other Resource</name>
-  </resource>
-  <resource>
-    <uid>sharisresource</uid>
-    <guid>C38BEE7A-36EE-478C-9DCB-CBF4612AFE65</guid>
-    <name>Shari's Resource</name>
-  </resource>
-  <resource>
-    <uid>sharisotherresource1</uid>
-    <guid>0CE0BF31-5F9E-4801-A489-8C70CF287F5F</guid>
-    <name>Shari's Other Resource1</name>
-  </resource>
-  <address>
-    <uid>testaddress1</uid>
-    <guid>6F9EE33B-78F6-481B-9289-3D0812FF0D64</guid>
-    <name>Test Address One</name>
-    <extras>
-      <streetAddress>20300 Stevens Creek Blvd, Cupertino, CA 95014</streetAddress>
-      <geo>37.322281,-122.028345</geo>
-    </extras>
-  </address>
-  <address>
-    <uid>il2</uid>
-    <guid>63A2F949-2D8D-4C8D-B8A5-DCF2A94610F3</guid>
-    <name>IL2</name>
-    <extras>
-      <streetAddress>2 Infinite Loop, Cupertino, CA 95014</streetAddress>
-      <geo>37.332633,-122.030502</geo>
-    </extras>
-  </address>
-  <address>
-    <uid>il1</uid>
-    <guid>76E7ECA6-08BC-4AE7-930D-F2E7453993A5</guid>
-    <name>IL1</name>
-    <extras>
-      <streetAddress>1 Infinite Loop, Cupertino, CA 95014</streetAddress>
-      <geo>37.331741,-122.030333</geo>
-    </extras>
-  </address>
-</accounts>
+    <full-name>Delegated Conference Room</full-name>
+  </record>
+
+<record type="location">
+    <short-name>location01</short-name>
+    <uid>30000000-0000-0000-0000-000000000001</uid>
+    <guid>30000000-0000-0000-0000-000000000001</guid>
+    <full-name>Location 01</full-name>
+</record>
+<record type="location">
+    <short-name>location02</short-name>
+    <uid>30000000-0000-0000-0000-000000000002</uid>
+    <guid>30000000-0000-0000-0000-000000000002</guid>
+    <full-name>Location 02</full-name>
+</record>
+<record type="location">
+    <short-name>location03</short-name>
+    <uid>30000000-0000-0000-0000-000000000003</uid>
+    <guid>30000000-0000-0000-0000-000000000003</guid>
+    <full-name>Location 03</full-name>
+</record>
+<record type="location">
+    <short-name>location04</short-name>
+    <uid>30000000-0000-0000-0000-000000000004</uid>
+    <guid>30000000-0000-0000-0000-000000000004</guid>
+    <full-name>Location 04</full-name>
+</record>
+<record type="location">
+    <short-name>location05</short-name>
+    <uid>30000000-0000-0000-0000-000000000005</uid>
+    <guid>30000000-0000-0000-0000-000000000005</guid>
+    <full-name>Location 05</full-name>
+</record>
+<record type="location">
+    <short-name>location06</short-name>
+    <uid>30000000-0000-0000-0000-000000000006</uid>
+    <guid>30000000-0000-0000-0000-000000000006</guid>
+    <full-name>Location 06</full-name>
+</record>
+<record type="location">
+    <short-name>location07</short-name>
+    <uid>30000000-0000-0000-0000-000000000007</uid>
+    <guid>30000000-0000-0000-0000-000000000007</guid>
+    <full-name>Location 07</full-name>
+</record>
+<record type="location">
+    <short-name>location08</short-name>
+    <uid>30000000-0000-0000-0000-000000000008</uid>
+    <guid>30000000-0000-0000-0000-000000000008</guid>
+    <full-name>Location 08</full-name>
+</record>
+<record type="location">
+    <short-name>location09</short-name>
+    <uid>30000000-0000-0000-0000-000000000009</uid>
+    <guid>30000000-0000-0000-0000-000000000009</guid>
+    <full-name>Location 09</full-name>
+</record>
+<record type="location">
+    <short-name>location10</short-name>
+    <uid>30000000-0000-0000-0000-000000000010</uid>
+    <guid>30000000-0000-0000-0000-000000000010</guid>
+    <full-name>Location 10</full-name>
+</record>
+<record type="location">
+    <short-name>location11</short-name>
+    <uid>30000000-0000-0000-0000-000000000011</uid>
+    <guid>30000000-0000-0000-0000-000000000011</guid>
+    <full-name>Location 11</full-name>
+</record>
+<record type="location">
+    <short-name>location12</short-name>
+    <uid>30000000-0000-0000-0000-000000000012</uid>
+    <guid>30000000-0000-0000-0000-000000000012</guid>
+    <full-name>Location 12</full-name>
+</record>
+<record type="location">
+    <short-name>location13</short-name>
+    <uid>30000000-0000-0000-0000-000000000013</uid>
+    <guid>30000000-0000-0000-0000-000000000013</guid>
+    <full-name>Location 13</full-name>
+</record>
+<record type="location">
+    <short-name>location14</short-name>
+    <uid>30000000-0000-0000-0000-000000000014</uid>
+    <guid>30000000-0000-0000-0000-000000000014</guid>
+    <full-name>Location 14</full-name>
+</record>
+<record type="location">
+    <short-name>location15</short-name>
+    <uid>30000000-0000-0000-0000-000000000015</uid>
+    <guid>30000000-0000-0000-0000-000000000015</guid>
+    <full-name>Location 15</full-name>
+</record>
+<record type="location">
+    <short-name>location16</short-name>
+    <uid>30000000-0000-0000-0000-000000000016</uid>
+    <guid>30000000-0000-0000-0000-000000000016</guid>
+    <full-name>Location 16</full-name>
+</record>
+<record type="location">
+    <short-name>location17</short-name>
+    <uid>30000000-0000-0000-0000-000000000017</uid>
+    <guid>30000000-0000-0000-0000-000000000017</guid>
+    <full-name>Location 17</full-name>
+</record>
+<record type="location">
+    <short-name>location18</short-name>
+    <uid>30000000-0000-0000-0000-000000000018</uid>
+    <guid>30000000-0000-0000-0000-000000000018</guid>
+    <full-name>Location 18</full-name>
+</record>
+<record type="location">
+    <short-name>location19</short-name>
+    <uid>30000000-0000-0000-0000-000000000019</uid>
+    <guid>30000000-0000-0000-0000-000000000019</guid>
+    <full-name>Location 19</full-name>
+</record>
+<record type="location">
+    <short-name>location20</short-name>
+    <uid>30000000-0000-0000-0000-000000000020</uid>
+    <guid>30000000-0000-0000-0000-000000000020</guid>
+    <full-name>Location 20</full-name>
+</record>
+<record type="location">
+    <short-name>location21</short-name>
+    <uid>30000000-0000-0000-0000-000000000021</uid>
+    <guid>30000000-0000-0000-0000-000000000021</guid>
+    <full-name>Location 21</full-name>
+</record>
+<record type="location">
+    <short-name>location22</short-name>
+    <uid>30000000-0000-0000-0000-000000000022</uid>
+    <guid>30000000-0000-0000-0000-000000000022</guid>
+    <full-name>Location 22</full-name>
+</record>
+<record type="location">
+    <short-name>location23</short-name>
+    <uid>30000000-0000-0000-0000-000000000023</uid>
+    <guid>30000000-0000-0000-0000-000000000023</guid>
+    <full-name>Location 23</full-name>
+</record>
+<record type="location">
+    <short-name>location24</short-name>
+    <uid>30000000-0000-0000-0000-000000000024</uid>
+    <guid>30000000-0000-0000-0000-000000000024</guid>
+    <full-name>Location 24</full-name>
+</record>
+<record type="location">
+    <short-name>location25</short-name>
+    <uid>30000000-0000-0000-0000-000000000025</uid>
+    <guid>30000000-0000-0000-0000-000000000025</guid>
+    <full-name>Location 25</full-name>
+</record>
+<record type="location">
+    <short-name>location26</short-name>
+    <uid>30000000-0000-0000-0000-000000000026</uid>
+    <guid>30000000-0000-0000-0000-000000000026</guid>
+    <full-name>Location 26</full-name>
+</record>
+<record type="location">
+    <short-name>location27</short-name>
+    <uid>30000000-0000-0000-0000-000000000027</uid>
+    <guid>30000000-0000-0000-0000-000000000027</guid>
+    <full-name>Location 27</full-name>
+</record>
+<record type="location">
+    <short-name>location28</short-name>
+    <uid>30000000-0000-0000-0000-000000000028</uid>
+    <guid>30000000-0000-0000-0000-000000000028</guid>
+    <full-name>Location 28</full-name>
+</record>
+<record type="location">
+    <short-name>location29</short-name>
+    <uid>30000000-0000-0000-0000-000000000029</uid>
+    <guid>30000000-0000-0000-0000-000000000029</guid>
+    <full-name>Location 29</full-name>
+</record>
+<record type="location">
+    <short-name>location30</short-name>
+    <uid>30000000-0000-0000-0000-000000000030</uid>
+    <guid>30000000-0000-0000-0000-000000000030</guid>
+    <full-name>Location 30</full-name>
+</record>
+<record type="location">
+    <short-name>location31</short-name>
+    <uid>30000000-0000-0000-0000-000000000031</uid>
+    <guid>30000000-0000-0000-0000-000000000031</guid>
+    <full-name>Location 31</full-name>
+</record>
+<record type="location">
+    <short-name>location32</short-name>
+    <uid>30000000-0000-0000-0000-000000000032</uid>
+    <guid>30000000-0000-0000-0000-000000000032</guid>
+    <full-name>Location 32</full-name>
+</record>
+<record type="location">
+    <short-name>location33</short-name>
+    <uid>30000000-0000-0000-0000-000000000033</uid>
+    <guid>30000000-0000-0000-0000-000000000033</guid>
+    <full-name>Location 33</full-name>
+</record>
+<record type="location">
+    <short-name>location34</short-name>
+    <uid>30000000-0000-0000-0000-000000000034</uid>
+    <guid>30000000-0000-0000-0000-000000000034</guid>
+    <full-name>Location 34</full-name>
+</record>
+<record type="location">
+    <short-name>location35</short-name>
+    <uid>30000000-0000-0000-0000-000000000035</uid>
+    <guid>30000000-0000-0000-0000-000000000035</guid>
+    <full-name>Location 35</full-name>
+</record>
+<record type="location">
+    <short-name>location36</short-name>
+    <uid>30000000-0000-0000-0000-000000000036</uid>
+    <guid>30000000-0000-0000-0000-000000000036</guid>
+    <full-name>Location 36</full-name>
+</record>
+<record type="location">
+    <short-name>location37</short-name>
+    <uid>30000000-0000-0000-0000-000000000037</uid>
+    <guid>30000000-0000-0000-0000-000000000037</guid>
+    <full-name>Location 37</full-name>
+</record>
+<record type="location">
+    <short-name>location38</short-name>
+    <uid>30000000-0000-0000-0000-000000000038</uid>
+    <guid>30000000-0000-0000-0000-000000000038</guid>
+    <full-name>Location 38</full-name>
+</record>
+<record type="location">
+    <short-name>location39</short-name>
+    <uid>30000000-0000-0000-0000-000000000039</uid>
+    <guid>30000000-0000-0000-0000-000000000039</guid>
+    <full-name>Location 39</full-name>
+</record>
+<record type="location">
+    <short-name>location40</short-name>
+    <uid>30000000-0000-0000-0000-000000000040</uid>
+    <guid>30000000-0000-0000-0000-000000000040</guid>
+    <full-name>Location 40</full-name>
+</record>
+<record type="location">
+    <short-name>location41</short-name>
+    <uid>30000000-0000-0000-0000-000000000041</uid>
+    <guid>30000000-0000-0000-0000-000000000041</guid>
+    <full-name>Location 41</full-name>
+</record>
+<record type="location">
+    <short-name>location42</short-name>
+    <uid>30000000-0000-0000-0000-000000000042</uid>
+    <guid>30000000-0000-0000-0000-000000000042</guid>
+    <full-name>Location 42</full-name>
+</record>
+<record type="location">
+    <short-name>location43</short-name>
+    <uid>30000000-0000-0000-0000-000000000043</uid>
+    <guid>30000000-0000-0000-0000-000000000043</guid>
+    <full-name>Location 43</full-name>
+</record>
+<record type="location">
+    <short-name>location44</short-name>
+    <uid>30000000-0000-0000-0000-000000000044</uid>
+    <guid>30000000-0000-0000-0000-000000000044</guid>
+    <full-name>Location 44</full-name>
+</record>
+<record type="location">
+    <short-name>location45</short-name>
+    <uid>30000000-0000-0000-0000-000000000045</uid>
+    <guid>30000000-0000-0000-0000-000000000045</guid>
+    <full-name>Location 45</full-name>
+</record>
+<record type="location">
+    <short-name>location46</short-name>
+    <uid>30000000-0000-0000-0000-000000000046</uid>
+    <guid>30000000-0000-0000-0000-000000000046</guid>
+    <full-name>Location 46</full-name>
+</record>
+<record type="location">
+    <short-name>location47</short-name>
+    <uid>30000000-0000-0000-0000-000000000047</uid>
+    <guid>30000000-0000-0000-0000-000000000047</guid>
+    <full-name>Location 47</full-name>
+</record>
+<record type="location">
+    <short-name>location48</short-name>
+    <uid>30000000-0000-0000-0000-000000000048</uid>
+    <guid>30000000-0000-0000-0000-000000000048</guid>
+    <full-name>Location 48</full-name>
+</record>
+<record type="location">
+    <short-name>location49</short-name>
+    <uid>30000000-0000-0000-0000-000000000049</uid>
+    <guid>30000000-0000-0000-0000-000000000049</guid>
+    <full-name>Location 49</full-name>
+</record>
+<record type="location">
+    <short-name>location50</short-name>
+    <uid>30000000-0000-0000-0000-000000000050</uid>
+    <guid>30000000-0000-0000-0000-000000000050</guid>
+    <full-name>Location 50</full-name>
+</record>
+<record type="location">
+    <short-name>location51</short-name>
+    <uid>30000000-0000-0000-0000-000000000051</uid>
+    <guid>30000000-0000-0000-0000-000000000051</guid>
+    <full-name>Location 51</full-name>
+</record>
+<record type="location">
+    <short-name>location52</short-name>
+    <uid>30000000-0000-0000-0000-000000000052</uid>
+    <guid>30000000-0000-0000-0000-000000000052</guid>
+    <full-name>Location 52</full-name>
+</record>
+<record type="location">
+    <short-name>location53</short-name>
+    <uid>30000000-0000-0000-0000-000000000053</uid>
+    <guid>30000000-0000-0000-0000-000000000053</guid>
+    <full-name>Location 53</full-name>
+</record>
+<record type="location">
+    <short-name>location54</short-name>
+    <uid>30000000-0000-0000-0000-000000000054</uid>
+    <guid>30000000-0000-0000-0000-000000000054</guid>
+    <full-name>Location 54</full-name>
+</record>
+<record type="location">
+    <short-name>location55</short-name>
+    <uid>30000000-0000-0000-0000-000000000055</uid>
+    <guid>30000000-0000-0000-0000-000000000055</guid>
+    <full-name>Location 55</full-name>
+</record>
+<record type="location">
+    <short-name>location56</short-name>
+    <uid>30000000-0000-0000-0000-000000000056</uid>
+    <guid>30000000-0000-0000-0000-000000000056</guid>
+    <full-name>Location 56</full-name>
+</record>
+<record type="location">
+    <short-name>location57</short-name>
+    <uid>30000000-0000-0000-0000-000000000057</uid>
+    <guid>30000000-0000-0000-0000-000000000057</guid>
+    <full-name>Location 57</full-name>
+</record>
+<record type="location">
+    <short-name>location58</short-name>
+    <uid>30000000-0000-0000-0000-000000000058</uid>
+    <guid>30000000-0000-0000-0000-000000000058</guid>
+    <full-name>Location 58</full-name>
+</record>
+<record type="location">
+    <short-name>location59</short-name>
+    <uid>30000000-0000-0000-0000-000000000059</uid>
+    <guid>30000000-0000-0000-0000-000000000059</guid>
+    <full-name>Location 59</full-name>
+</record>
+<record type="location">
+    <short-name>location60</short-name>
+    <uid>30000000-0000-0000-0000-000000000060</uid>
+    <guid>30000000-0000-0000-0000-000000000060</guid>
+    <full-name>Location 60</full-name>
+</record>
+<record type="location">
+    <short-name>location61</short-name>
+    <uid>30000000-0000-0000-0000-000000000061</uid>
+    <guid>30000000-0000-0000-0000-000000000061</guid>
+    <full-name>Location 61</full-name>
+</record>
+<record type="location">
+    <short-name>location62</short-name>
+    <uid>30000000-0000-0000-0000-000000000062</uid>
+    <guid>30000000-0000-0000-0000-000000000062</guid>
+    <full-name>Location 62</full-name>
+</record>
+<record type="location">
+    <short-name>location63</short-name>
+    <uid>30000000-0000-0000-0000-000000000063</uid>
+    <guid>30000000-0000-0000-0000-000000000063</guid>
+    <full-name>Location 63</full-name>
+</record>
+<record type="location">
+    <short-name>location64</short-name>
+    <uid>30000000-0000-0000-0000-000000000064</uid>
+    <guid>30000000-0000-0000-0000-000000000064</guid>
+    <full-name>Location 64</full-name>
+</record>
+<record type="location">
+    <short-name>location65</short-name>
+    <uid>30000000-0000-0000-0000-000000000065</uid>
+    <guid>30000000-0000-0000-0000-000000000065</guid>
+    <full-name>Location 65</full-name>
+</record>
+<record type="location">
+    <short-name>location66</short-name>
+    <uid>30000000-0000-0000-0000-000000000066</uid>
+    <guid>30000000-0000-0000-0000-000000000066</guid>
+    <full-name>Location 66</full-name>
+</record>
+<record type="location">
+    <short-name>location67</short-name>
+    <uid>30000000-0000-0000-0000-000000000067</uid>
+    <guid>30000000-0000-0000-0000-000000000067</guid>
+    <full-name>Location 67</full-name>
+</record>
+<record type="location">
+    <short-name>location68</short-name>
+    <uid>30000000-0000-0000-0000-000000000068</uid>
+    <guid>30000000-0000-0000-0000-000000000068</guid>
+    <full-name>Location 68</full-name>
+</record>
+<record type="location">
+    <short-name>location69</short-name>
+    <uid>30000000-0000-0000-0000-000000000069</uid>
+    <guid>30000000-0000-0000-0000-000000000069</guid>
+    <full-name>Location 69</full-name>
+</record>
+<record type="location">
+    <short-name>location70</short-name>
+    <uid>30000000-0000-0000-0000-000000000070</uid>
+    <guid>30000000-0000-0000-0000-000000000070</guid>
+    <full-name>Location 70</full-name>
+</record>
+<record type="location">
+    <short-name>location71</short-name>
+    <uid>30000000-0000-0000-0000-000000000071</uid>
+    <guid>30000000-0000-0000-0000-000000000071</guid>
+    <full-name>Location 71</full-name>
+</record>
+<record type="location">
+    <short-name>location72</short-name>
+    <uid>30000000-0000-0000-0000-000000000072</uid>
+    <guid>30000000-0000-0000-0000-000000000072</guid>
+    <full-name>Location 72</full-name>
+</record>
+<record type="location">
+    <short-name>location73</short-name>
+    <uid>30000000-0000-0000-0000-000000000073</uid>
+    <guid>30000000-0000-0000-0000-000000000073</guid>
+    <full-name>Location 73</full-name>
+</record>
+<record type="location">
+    <short-name>location74</short-name>
+    <uid>30000000-0000-0000-0000-000000000074</uid>
+    <guid>30000000-0000-0000-0000-000000000074</guid>
+    <full-name>Location 74</full-name>
+</record>
+<record type="location">
+    <short-name>location75</short-name>
+    <uid>30000000-0000-0000-0000-000000000075</uid>
+    <guid>30000000-0000-0000-0000-000000000075</guid>
+    <full-name>Location 75</full-name>
+</record>
+<record type="location">
+    <short-name>location76</short-name>
+    <uid>30000000-0000-0000-0000-000000000076</uid>
+    <guid>30000000-0000-0000-0000-000000000076</guid>
+    <full-name>Location 76</full-name>
+</record>
+<record type="location">
+    <short-name>location77</short-name>
+    <uid>30000000-0000-0000-0000-000000000077</uid>
+    <guid>30000000-0000-0000-0000-000000000077</guid>
+    <full-name>Location 77</full-name>
+</record>
+<record type="location">
+    <short-name>location78</short-name>
+    <uid>30000000-0000-0000-0000-000000000078</uid>
+    <guid>30000000-0000-0000-0000-000000000078</guid>
+    <full-name>Location 78</full-name>
+</record>
+<record type="location">
+    <short-name>location79</short-name>
+    <uid>30000000-0000-0000-0000-000000000079</uid>
+    <guid>30000000-0000-0000-0000-000000000079</guid>
+    <full-name>Location 79</full-name>
+</record>
+<record type="location">
+    <short-name>location80</short-name>
+    <uid>30000000-0000-0000-0000-000000000080</uid>
+    <guid>30000000-0000-0000-0000-000000000080</guid>
+    <full-name>Location 80</full-name>
+</record>
+<record type="location">
+    <short-name>location81</short-name>
+    <uid>30000000-0000-0000-0000-000000000081</uid>
+    <guid>30000000-0000-0000-0000-000000000081</guid>
+    <full-name>Location 81</full-name>
+</record>
+<record type="location">
+    <short-name>location82</short-name>
+    <uid>30000000-0000-0000-0000-000000000082</uid>
+    <guid>30000000-0000-0000-0000-000000000082</guid>
+    <full-name>Location 82</full-name>
+</record>
+<record type="location">
+    <short-name>location83</short-name>
+    <uid>30000000-0000-0000-0000-000000000083</uid>
+    <guid>30000000-0000-0000-0000-000000000083</guid>
+    <full-name>Location 83</full-name>
+</record>
+<record type="location">
+    <short-name>location84</short-name>
+    <uid>30000000-0000-0000-0000-000000000084</uid>
+    <guid>30000000-0000-0000-0000-000000000084</guid>
+    <full-name>Location 84</full-name>
+</record>
+<record type="location">
+    <short-name>location85</short-name>
+    <uid>30000000-0000-0000-0000-000000000085</uid>
+    <guid>30000000-0000-0000-0000-000000000085</guid>
+    <full-name>Location 85</full-name>
+</record>
+<record type="location">
+    <short-name>location86</short-name>
+    <uid>30000000-0000-0000-0000-000000000086</uid>
+    <guid>30000000-0000-0000-0000-000000000086</guid>
+    <full-name>Location 86</full-name>
+</record>
+<record type="location">
+    <short-name>location87</short-name>
+    <uid>30000000-0000-0000-0000-000000000087</uid>
+    <guid>30000000-0000-0000-0000-000000000087</guid>
+    <full-name>Location 87</full-name>
+</record>
+<record type="location">
+    <short-name>location88</short-name>
+    <uid>30000000-0000-0000-0000-000000000088</uid>
+    <guid>30000000-0000-0000-0000-000000000088</guid>
+    <full-name>Location 88</full-name>
+</record>
+<record type="location">
+    <short-name>location89</short-name>
+    <uid>30000000-0000-0000-0000-000000000089</uid>
+    <guid>30000000-0000-0000-0000-000000000089</guid>
+    <full-name>Location 89</full-name>
+</record>
+<record type="location">
+    <short-name>location90</short-name>
+    <uid>30000000-0000-0000-0000-000000000090</uid>
+    <guid>30000000-0000-0000-0000-000000000090</guid>
+    <full-name>Location 90</full-name>
+</record>
+<record type="location">
+    <short-name>location91</short-name>
+    <uid>30000000-0000-0000-0000-000000000091</uid>
+    <guid>30000000-0000-0000-0000-000000000091</guid>
+    <full-name>Location 91</full-name>
+</record>
+<record type="location">
+    <short-name>location92</short-name>
+    <uid>30000000-0000-0000-0000-000000000092</uid>
+    <guid>30000000-0000-0000-0000-000000000092</guid>
+    <full-name>Location 92</full-name>
+</record>
+<record type="location">
+    <short-name>location93</short-name>
+    <uid>30000000-0000-0000-0000-000000000093</uid>
+    <guid>30000000-0000-0000-0000-000000000093</guid>
+    <full-name>Location 93</full-name>
+</record>
+<record type="location">
+    <short-name>location94</short-name>
+    <uid>30000000-0000-0000-0000-000000000094</uid>
+    <guid>30000000-0000-0000-0000-000000000094</guid>
+    <full-name>Location 94</full-name>
+</record>
+<record type="location">
+    <short-name>location95</short-name>
+    <uid>30000000-0000-0000-0000-000000000095</uid>
+    <guid>30000000-0000-0000-0000-000000000095</guid>
+    <full-name>Location 95</full-name>
+</record>
+<record type="location">
+    <short-name>location96</short-name>
+    <uid>30000000-0000-0000-0000-000000000096</uid>
+    <guid>30000000-0000-0000-0000-000000000096</guid>
+    <full-name>Location 96</full-name>
+</record>
+<record type="location">
+    <short-name>location97</short-name>
+    <uid>30000000-0000-0000-0000-000000000097</uid>
+    <guid>30000000-0000-0000-0000-000000000097</guid>
+    <full-name>Location 97</full-name>
+</record>
+<record type="location">
+    <short-name>location98</short-name>
+    <uid>30000000-0000-0000-0000-000000000098</uid>
+    <guid>30000000-0000-0000-0000-000000000098</guid>
+    <full-name>Location 98</full-name>
+</record>
+<record type="location">
+    <short-name>location99</short-name>
+    <uid>30000000-0000-0000-0000-000000000099</uid>
+    <guid>30000000-0000-0000-0000-000000000099</guid>
+    <full-name>Location 99</full-name>
+</record>
+<record type="location">
+    <short-name>location100</short-name>
+    <uid>30000000-0000-0000-0000-000000000100</uid>
+    <guid>30000000-0000-0000-0000-000000000100</guid>
+    <full-name>Location 100</full-name>
+</record>
+<record type="resource">
+    <short-name>resource01</short-name>
+    <uid>40000000-0000-0000-0000-000000000001</uid>
+    <guid>40000000-0000-0000-0000-000000000001</guid>
+    <full-name>Resource 01</full-name>
+</record>
+<record type="resource">
+    <short-name>resource02</short-name>
+    <uid>40000000-0000-0000-0000-000000000002</uid>
+    <guid>40000000-0000-0000-0000-000000000002</guid>
+    <full-name>Resource 02</full-name>
+</record>
+<record type="resource">
+    <short-name>resource03</short-name>
+    <uid>40000000-0000-0000-0000-000000000003</uid>
+    <guid>40000000-0000-0000-0000-000000000003</guid>
+    <full-name>Resource 03</full-name>
+</record>
+<record type="resource">
+    <short-name>resource04</short-name>
+    <uid>40000000-0000-0000-0000-000000000004</uid>
+    <guid>40000000-0000-0000-0000-000000000004</guid>
+    <full-name>Resource 04</full-name>
+</record>
+<record type="resource">
+    <short-name>resource05</short-name>
+    <uid>40000000-0000-0000-0000-000000000005</uid>
+    <guid>40000000-0000-0000-0000-000000000005</guid>
+    <full-name>Resource 05</full-name>
+</record>
+<record type="resource">
+    <short-name>resource06</short-name>
+    <uid>40000000-0000-0000-0000-000000000006</uid>
+    <guid>40000000-0000-0000-0000-000000000006</guid>
+    <full-name>Resource 06</full-name>
+</record>
+<record type="resource">
+    <short-name>resource07</short-name>
+    <uid>40000000-0000-0000-0000-000000000007</uid>
+    <guid>40000000-0000-0000-0000-000000000007</guid>
+    <full-name>Resource 07</full-name>
+</record>
+<record type="resource">
+    <short-name>resource08</short-name>
+    <uid>40000000-0000-0000-0000-000000000008</uid>
+    <guid>40000000-0000-0000-0000-000000000008</guid>
+    <full-name>Resource 08</full-name>
+</record>
+<record type="resource">
+    <short-name>resource09</short-name>
+    <uid>40000000-0000-0000-0000-000000000009</uid>
+    <guid>40000000-0000-0000-0000-000000000009</guid>
+    <full-name>Resource 09</full-name>
+</record>
+<record type="resource">
+    <short-name>resource10</short-name>
+    <uid>40000000-0000-0000-0000-000000000010</uid>
+    <guid>40000000-0000-0000-0000-000000000010</guid>
+    <full-name>Resource 10</full-name>
+</record>
+<record type="resource">
+    <short-name>resource11</short-name>
+    <uid>40000000-0000-0000-0000-000000000011</uid>
+    <guid>40000000-0000-0000-0000-000000000011</guid>
+    <full-name>Resource 11</full-name>
+</record>
+<record type="resource">
+    <short-name>resource12</short-name>
+    <uid>40000000-0000-0000-0000-000000000012</uid>
+    <guid>40000000-0000-0000-0000-000000000012</guid>
+    <full-name>Resource 12</full-name>
+</record>
+<record type="resource">
+    <short-name>resource13</short-name>
+    <uid>40000000-0000-0000-0000-000000000013</uid>
+    <guid>40000000-0000-0000-0000-000000000013</guid>
+    <full-name>Resource 13</full-name>
+</record>
+<record type="resource">
+    <short-name>resource14</short-name>
+    <uid>40000000-0000-0000-0000-000000000014</uid>
+    <guid>40000000-0000-0000-0000-000000000014</guid>
+    <full-name>Resource 14</full-name>
+</record>
+<record type="resource">
+    <short-name>resource15</short-name>
+    <uid>40000000-0000-0000-0000-000000000015</uid>
+    <guid>40000000-0000-0000-0000-000000000015</guid>
+    <full-name>Resource 15</full-name>
+</record>
+<record type="resource">
+    <short-name>resource16</short-name>
+    <uid>40000000-0000-0000-0000-000000000016</uid>
+    <guid>40000000-0000-0000-0000-000000000016</guid>
+    <full-name>Resource 16</full-name>
+</record>
+<record type="resource">
+    <short-name>resource17</short-name>
+    <uid>40000000-0000-0000-0000-000000000017</uid>
+    <guid>40000000-0000-0000-0000-000000000017</guid>
+    <full-name>Resource 17</full-name>
+</record>
+<record type="resource">
+    <short-name>resource18</short-name>
+    <uid>40000000-0000-0000-0000-000000000018</uid>
+    <guid>40000000-0000-0000-0000-000000000018</guid>
+    <full-name>Resource 18</full-name>
+</record>
+<record type="resource">
+    <short-name>resource19</short-name>
+    <uid>40000000-0000-0000-0000-000000000019</uid>
+    <guid>40000000-0000-0000-0000-000000000019</guid>
+    <full-name>Resource 19</full-name>
+</record>
+<record type="resource">
+    <short-name>resource20</short-name>
+    <uid>40000000-0000-0000-0000-000000000020</uid>
+    <guid>40000000-0000-0000-0000-000000000020</guid>
+    <full-name>Resource 20</full-name>
+</record>
+<record type="resource">
+    <short-name>resource21</short-name>
+    <uid>40000000-0000-0000-0000-000000000021</uid>
+    <guid>40000000-0000-0000-0000-000000000021</guid>
+    <full-name>Resource 21</full-name>
+</record>
+<record type="resource">
+    <short-name>resource22</short-name>
+    <uid>40000000-0000-0000-0000-000000000022</uid>
+    <guid>40000000-0000-0000-0000-000000000022</guid>
+    <full-name>Resource 22</full-name>
+</record>
+<record type="resource">
+    <short-name>resource23</short-name>
+    <uid>40000000-0000-0000-0000-000000000023</uid>
+    <guid>40000000-0000-0000-0000-000000000023</guid>
+    <full-name>Resource 23</full-name>
+</record>
+<record type="resource">
+    <short-name>resource24</short-name>
+    <uid>40000000-0000-0000-0000-000000000024</uid>
+    <guid>40000000-0000-0000-0000-000000000024</guid>
+    <full-name>Resource 24</full-name>
+</record>
+<record type="resource">
+    <short-name>resource25</short-name>
+    <uid>40000000-0000-0000-0000-000000000025</uid>
+    <guid>40000000-0000-0000-0000-000000000025</guid>
+    <full-name>Resource 25</full-name>
+</record>
+<record type="resource">
+    <short-name>resource26</short-name>
+    <uid>40000000-0000-0000-0000-000000000026</uid>
+    <guid>40000000-0000-0000-0000-000000000026</guid>
+    <full-name>Resource 26</full-name>
+</record>
+<record type="resource">
+    <short-name>resource27</short-name>
+    <uid>40000000-0000-0000-0000-000000000027</uid>
+    <guid>40000000-0000-0000-0000-000000000027</guid>
+    <full-name>Resource 27</full-name>
+</record>
+<record type="resource">
+    <short-name>resource28</short-name>
+    <uid>40000000-0000-0000-0000-000000000028</uid>
+    <guid>40000000-0000-0000-0000-000000000028</guid>
+    <full-name>Resource 28</full-name>
+</record>
+<record type="resource">
+    <short-name>resource29</short-name>
+    <uid>40000000-0000-0000-0000-000000000029</uid>
+    <guid>40000000-0000-0000-0000-000000000029</guid>
+    <full-name>Resource 29</full-name>
+</record>
+<record type="resource">
+    <short-name>resource30</short-name>
+    <uid>40000000-0000-0000-0000-000000000030</uid>
+    <guid>40000000-0000-0000-0000-000000000030</guid>
+    <full-name>Resource 30</full-name>
+</record>
+<record type="resource">
+    <short-name>resource31</short-name>
+    <uid>40000000-0000-0000-0000-000000000031</uid>
+    <guid>40000000-0000-0000-0000-000000000031</guid>
+    <full-name>Resource 31</full-name>
+</record>
+<record type="resource">
+    <short-name>resource32</short-name>
+    <uid>40000000-0000-0000-0000-000000000032</uid>
+    <guid>40000000-0000-0000-0000-000000000032</guid>
+    <full-name>Resource 32</full-name>
+</record>
+<record type="resource">
+    <short-name>resource33</short-name>
+    <uid>40000000-0000-0000-0000-000000000033</uid>
+    <guid>40000000-0000-0000-0000-000000000033</guid>
+    <full-name>Resource 33</full-name>
+</record>
+<record type="resource">
+    <short-name>resource34</short-name>
+    <uid>40000000-0000-0000-0000-000000000034</uid>
+    <guid>40000000-0000-0000-0000-000000000034</guid>
+    <full-name>Resource 34</full-name>
+</record>
+<record type="resource">
+    <short-name>resource35</short-name>
+    <uid>40000000-0000-0000-0000-000000000035</uid>
+    <guid>40000000-0000-0000-0000-000000000035</guid>
+    <full-name>Resource 35</full-name>
+</record>
+<record type="resource">
+    <short-name>resource36</short-name>
+    <uid>40000000-0000-0000-0000-000000000036</uid>
+    <guid>40000000-0000-0000-0000-000000000036</guid>
+    <full-name>Resource 36</full-name>
+</record>
+<record type="resource">
+    <short-name>resource37</short-name>
+    <uid>40000000-0000-0000-0000-000000000037</uid>
+    <guid>40000000-0000-0000-0000-000000000037</guid>
+    <full-name>Resource 37</full-name>
+</record>
+<record type="resource">
+    <short-name>resource38</short-name>
+    <uid>40000000-0000-0000-0000-000000000038</uid>
+    <guid>40000000-0000-0000-0000-000000000038</guid>
+    <full-name>Resource 38</full-name>
+</record>
+<record type="resource">
+    <short-name>resource39</short-name>
+    <uid>40000000-0000-0000-0000-000000000039</uid>
+    <guid>40000000-0000-0000-0000-000000000039</guid>
+    <full-name>Resource 39</full-name>
+</record>
+<record type="resource">
+    <short-name>resource40</short-name>
+    <uid>40000000-0000-0000-0000-000000000040</uid>
+    <guid>40000000-0000-0000-0000-000000000040</guid>
+    <full-name>Resource 40</full-name>
+</record>
+<record type="resource">
+    <short-name>resource41</short-name>
+    <uid>40000000-0000-0000-0000-000000000041</uid>
+    <guid>40000000-0000-0000-0000-000000000041</guid>
+    <full-name>Resource 41</full-name>
+</record>
+<record type="resource">
+    <short-name>resource42</short-name>
+    <uid>40000000-0000-0000-0000-000000000042</uid>
+    <guid>40000000-0000-0000-0000-000000000042</guid>
+    <full-name>Resource 42</full-name>
+</record>
+<record type="resource">
+    <short-name>resource43</short-name>
+    <uid>40000000-0000-0000-0000-000000000043</uid>
+    <guid>40000000-0000-0000-0000-000000000043</guid>
+    <full-name>Resource 43</full-name>
+</record>
+<record type="resource">
+    <short-name>resource44</short-name>
+    <uid>40000000-0000-0000-0000-000000000044</uid>
+    <guid>40000000-0000-0000-0000-000000000044</guid>
+    <full-name>Resource 44</full-name>
+</record>
+<record type="resource">
+    <short-name>resource45</short-name>
+    <uid>40000000-0000-0000-0000-000000000045</uid>
+    <guid>40000000-0000-0000-0000-000000000045</guid>
+    <full-name>Resource 45</full-name>
+</record>
+<record type="resource">
+    <short-name>resource46</short-name>
+    <uid>40000000-0000-0000-0000-000000000046</uid>
+    <guid>40000000-0000-0000-0000-000000000046</guid>
+    <full-name>Resource 46</full-name>
+</record>
+<record type="resource">
+    <short-name>resource47</short-name>
+    <uid>40000000-0000-0000-0000-000000000047</uid>
+    <guid>40000000-0000-0000-0000-000000000047</guid>
+    <full-name>Resource 47</full-name>
+</record>
+<record type="resource">
+    <short-name>resource48</short-name>
+    <uid>40000000-0000-0000-0000-000000000048</uid>
+    <guid>40000000-0000-0000-0000-000000000048</guid>
+    <full-name>Resource 48</full-name>
+</record>
+<record type="resource">
+    <short-name>resource49</short-name>
+    <uid>40000000-0000-0000-0000-000000000049</uid>
+    <guid>40000000-0000-0000-0000-000000000049</guid>
+    <full-name>Resource 49</full-name>
+</record>
+<record type="resource">
+    <short-name>resource50</short-name>
+    <uid>40000000-0000-0000-0000-000000000050</uid>
+    <guid>40000000-0000-0000-0000-000000000050</guid>
+    <full-name>Resource 50</full-name>
+</record>
+<record type="resource">
+    <short-name>resource51</short-name>
+    <uid>40000000-0000-0000-0000-000000000051</uid>
+    <guid>40000000-0000-0000-0000-000000000051</guid>
+    <full-name>Resource 51</full-name>
+</record>
+<record type="resource">
+    <short-name>resource52</short-name>
+    <uid>40000000-0000-0000-0000-000000000052</uid>
+    <guid>40000000-0000-0000-0000-000000000052</guid>
+    <full-name>Resource 52</full-name>
+</record>
+<record type="resource">
+    <short-name>resource53</short-name>
+    <uid>40000000-0000-0000-0000-000000000053</uid>
+    <guid>40000000-0000-0000-0000-000000000053</guid>
+    <full-name>Resource 53</full-name>
+</record>
+<record type="resource">
+    <short-name>resource54</short-name>
+    <uid>40000000-0000-0000-0000-000000000054</uid>
+    <guid>40000000-0000-0000-0000-000000000054</guid>
+    <full-name>Resource 54</full-name>
+</record>
+<record type="resource">
+    <short-name>resource55</short-name>
+    <uid>40000000-0000-0000-0000-000000000055</uid>
+    <guid>40000000-0000-0000-0000-000000000055</guid>
+    <full-name>Resource 55</full-name>
+</record>
+<record type="resource">
+    <short-name>resource56</short-name>
+    <uid>40000000-0000-0000-0000-000000000056</uid>
+    <guid>40000000-0000-0000-0000-000000000056</guid>
+    <full-name>Resource 56</full-name>
+</record>
+<record type="resource">
+    <short-name>resource57</short-name>
+    <uid>40000000-0000-0000-0000-000000000057</uid>
+    <guid>40000000-0000-0000-0000-000000000057</guid>
+    <full-name>Resource 57</full-name>
+</record>
+<record type="resource">
+    <short-name>resource58</short-name>
+    <uid>40000000-0000-0000-0000-000000000058</uid>
+    <guid>40000000-0000-0000-0000-000000000058</guid>
+    <full-name>Resource 58</full-name>
+</record>
+<record type="resource">
+    <short-name>resource59</short-name>
+    <uid>40000000-0000-0000-0000-000000000059</uid>
+    <guid>40000000-0000-0000-0000-000000000059</guid>
+    <full-name>Resource 59</full-name>
+</record>
+<record type="resource">
+    <short-name>resource60</short-name>
+    <uid>40000000-0000-0000-0000-000000000060</uid>
+    <guid>40000000-0000-0000-0000-000000000060</guid>
+    <full-name>Resource 60</full-name>
+</record>
+<record type="resource">
+    <short-name>resource61</short-name>
+    <uid>40000000-0000-0000-0000-000000000061</uid>
+    <guid>40000000-0000-0000-0000-000000000061</guid>
+    <full-name>Resource 61</full-name>
+</record>
+<record type="resource">
+    <short-name>resource62</short-name>
+    <uid>40000000-0000-0000-0000-000000000062</uid>
+    <guid>40000000-0000-0000-0000-000000000062</guid>
+    <full-name>Resource 62</full-name>
+</record>
+<record type="resource">
+    <short-name>resource63</short-name>
+    <uid>40000000-0000-0000-0000-000000000063</uid>
+    <guid>40000000-0000-0000-0000-000000000063</guid>
+    <full-name>Resource 63</full-name>
+</record>
+<record type="resource">
+    <short-name>resource64</short-name>
+    <uid>40000000-0000-0000-0000-000000000064</uid>
+    <guid>40000000-0000-0000-0000-000000000064</guid>
+    <full-name>Resource 64</full-name>
+</record>
+<record type="resource">
+    <short-name>resource65</short-name>
+    <uid>40000000-0000-0000-0000-000000000065</uid>
+    <guid>40000000-0000-0000-0000-000000000065</guid>
+    <full-name>Resource 65</full-name>
+</record>
+<record type="resource">
+    <short-name>resource66</short-name>
+    <uid>40000000-0000-0000-0000-000000000066</uid>
+    <guid>40000000-0000-0000-0000-000000000066</guid>
+    <full-name>Resource 66</full-name>
+</record>
+<record type="resource">
+    <short-name>resource67</short-name>
+    <uid>40000000-0000-0000-0000-000000000067</uid>
+    <guid>40000000-0000-0000-0000-000000000067</guid>
+    <full-name>Resource 67</full-name>
+</record>
+<record type="resource">
+    <short-name>resource68</short-name>
+    <uid>40000000-0000-0000-0000-000000000068</uid>
+    <guid>40000000-0000-0000-0000-000000000068</guid>
+    <full-name>Resource 68</full-name>
+</record>
+<record type="resource">
+    <short-name>resource69</short-name>
+    <uid>40000000-0000-0000-0000-000000000069</uid>
+    <guid>40000000-0000-0000-0000-000000000069</guid>
+    <full-name>Resource 69</full-name>
+</record>
+<record type="resource">
+    <short-name>resource70</short-name>
+    <uid>40000000-0000-0000-0000-000000000070</uid>
+    <guid>40000000-0000-0000-0000-000000000070</guid>
+    <full-name>Resource 70</full-name>
+</record>
+<record type="resource">
+    <short-name>resource71</short-name>
+    <uid>40000000-0000-0000-0000-000000000071</uid>
+    <guid>40000000-0000-0000-0000-000000000071</guid>
+    <full-name>Resource 71</full-name>
+</record>
+<record type="resource">
+    <short-name>resource72</short-name>
+    <uid>40000000-0000-0000-0000-000000000072</uid>
+    <guid>40000000-0000-0000-0000-000000000072</guid>
+    <full-name>Resource 72</full-name>
+</record>
+<record type="resource">
+    <short-name>resource73</short-name>
+    <uid>40000000-0000-0000-0000-000000000073</uid>
+    <guid>40000000-0000-0000-0000-000000000073</guid>
+    <full-name>Resource 73</full-name>
+</record>
+<record type="resource">
+    <short-name>resource74</short-name>
+    <uid>40000000-0000-0000-0000-000000000074</uid>
+    <guid>40000000-0000-0000-0000-000000000074</guid>
+    <full-name>Resource 74</full-name>
+</record>
+<record type="resource">
+    <short-name>resource75</short-name>
+    <uid>40000000-0000-0000-0000-000000000075</uid>
+    <guid>40000000-0000-0000-0000-000000000075</guid>
+    <full-name>Resource 75</full-name>
+</record>
+<record type="resource">
+    <short-name>resource76</short-name>
+    <uid>40000000-0000-0000-0000-000000000076</uid>
+    <guid>40000000-0000-0000-0000-000000000076</guid>
+    <full-name>Resource 76</full-name>
+</record>
+<record type="resource">
+    <short-name>resource77</short-name>
+    <uid>40000000-0000-0000-0000-000000000077</uid>
+    <guid>40000000-0000-0000-0000-000000000077</guid>
+    <full-name>Resource 77</full-name>
+</record>
+<record type="resource">
+    <short-name>resource78</short-name>
+    <uid>40000000-0000-0000-0000-000000000078</uid>
+    <guid>40000000-0000-0000-0000-000000000078</guid>
+    <full-name>Resource 78</full-name>
+</record>
+<record type="resource">
+    <short-name>resource79</short-name>
+    <uid>40000000-0000-0000-0000-000000000079</uid>
+    <guid>40000000-0000-0000-0000-000000000079</guid>
+    <full-name>Resource 79</full-name>
+</record>
+<record type="resource">
+    <short-name>resource80</short-name>
+    <uid>40000000-0000-0000-0000-000000000080</uid>
+    <guid>40000000-0000-0000-0000-000000000080</guid>
+    <full-name>Resource 80</full-name>
+</record>
+<record type="resource">
+    <short-name>resource81</short-name>
+    <uid>40000000-0000-0000-0000-000000000081</uid>
+    <guid>40000000-0000-0000-0000-000000000081</guid>
+    <full-name>Resource 81</full-name>
+</record>
+<record type="resource">
+    <short-name>resource82</short-name>
+    <uid>40000000-0000-0000-0000-000000000082</uid>
+    <guid>40000000-0000-0000-0000-000000000082</guid>
+    <full-name>Resource 82</full-name>
+</record>
+<record type="resource">
+    <short-name>resource83</short-name>
+    <uid>40000000-0000-0000-0000-000000000083</uid>
+    <guid>40000000-0000-0000-0000-000000000083</guid>
+    <full-name>Resource 83</full-name>
+</record>
+<record type="resource">
+    <short-name>resource84</short-name>
+    <uid>40000000-0000-0000-0000-000000000084</uid>
+    <guid>40000000-0000-0000-0000-000000000084</guid>
+    <full-name>Resource 84</full-name>
+</record>
+<record type="resource">
+    <short-name>resource85</short-name>
+    <uid>40000000-0000-0000-0000-000000000085</uid>
+    <guid>40000000-0000-0000-0000-000000000085</guid>
+    <full-name>Resource 85</full-name>
+</record>
+<record type="resource">
+    <short-name>resource86</short-name>
+    <uid>40000000-0000-0000-0000-000000000086</uid>
+    <guid>40000000-0000-0000-0000-000000000086</guid>
+    <full-name>Resource 86</full-name>
+</record>
+<record type="resource">
+    <short-name>resource87</short-name>
+    <uid>40000000-0000-0000-0000-000000000087</uid>
+    <guid>40000000-0000-0000-0000-000000000087</guid>
+    <full-name>Resource 87</full-name>
+</record>
+<record type="resource">
+    <short-name>resource88</short-name>
+    <uid>40000000-0000-0000-0000-000000000088</uid>
+    <guid>40000000-0000-0000-0000-000000000088</guid>
+    <full-name>Resource 88</full-name>
+</record>
+<record type="resource">
+    <short-name>resource89</short-name>
+    <uid>40000000-0000-0000-0000-000000000089</uid>
+    <guid>40000000-0000-0000-0000-000000000089</guid>
+    <full-name>Resource 89</full-name>
+</record>
+<record type="resource">
+    <short-name>resource90</short-name>
+    <uid>40000000-0000-0000-0000-000000000090</uid>
+    <guid>40000000-0000-0000-0000-000000000090</guid>
+    <full-name>Resource 90</full-name>
+</record>
+<record type="resource">
+    <short-name>resource91</short-name>
+    <uid>40000000-0000-0000-0000-000000000091</uid>
+    <guid>40000000-0000-0000-0000-000000000091</guid>
+    <full-name>Resource 91</full-name>
+</record>
+<record type="resource">
+    <short-name>resource92</short-name>
+    <uid>40000000-0000-0000-0000-000000000092</uid>
+    <guid>40000000-0000-0000-0000-000000000092</guid>
+    <full-name>Resource 92</full-name>
+</record>
+<record type="resource">
+    <short-name>resource93</short-name>
+    <uid>40000000-0000-0000-0000-000000000093</uid>
+    <guid>40000000-0000-0000-0000-000000000093</guid>
+    <full-name>Resource 93</full-name>
+</record>
+<record type="resource">
+    <short-name>resource94</short-name>
+    <uid>40000000-0000-0000-0000-000000000094</uid>
+    <guid>40000000-0000-0000-0000-000000000094</guid>
+    <full-name>Resource 94</full-name>
+</record>
+<record type="resource">
+    <short-name>resource95</short-name>
+    <uid>40000000-0000-0000-0000-000000000095</uid>
+    <guid>40000000-0000-0000-0000-000000000095</guid>
+    <full-name>Resource 95</full-name>
+</record>
+<record type="resource">
+    <short-name>resource96</short-name>
+    <uid>40000000-0000-0000-0000-000000000096</uid>
+    <guid>40000000-0000-0000-0000-000000000096</guid>
+    <full-name>Resource 96</full-name>
+</record>
+<record type="resource">
+    <short-name>resource97</short-name>
+    <uid>40000000-0000-0000-0000-000000000097</uid>
+    <guid>40000000-0000-0000-0000-000000000097</guid>
+    <full-name>Resource 97</full-name>
+</record>
+<record type="resource">
+    <short-name>resource98</short-name>
+    <uid>40000000-0000-0000-0000-000000000098</uid>
+    <guid>40000000-0000-0000-0000-000000000098</guid>
+    <full-name>Resource 98</full-name>
+</record>
+<record type="resource">
+    <short-name>resource99</short-name>
+    <uid>40000000-0000-0000-0000-000000000099</uid>
+    <guid>40000000-0000-0000-0000-000000000099</guid>
+    <full-name>Resource 99</full-name>
+</record>
+<record type="resource">
+    <short-name>resource100</short-name>
+    <uid>40000000-0000-0000-0000-000000000100</uid>
+    <guid>40000000-0000-0000-0000-000000000100</guid>
+    <full-name>Resource 100</full-name>
+</record>
+</directory>

Modified: CalendarServer/branches/users/sagen/move2who-5/conf/caldavd-test.plist
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/conf/caldavd-test.plist	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/conf/caldavd-test.plist	2014-04-04 17:20:27 UTC (rev 13158)
@@ -464,7 +464,7 @@
     <!-- Principals with "DAV:all" access (relative URLs) -->
     <key>AdminPrincipals</key>
     <array>
-      <string>/principals/__uids__/admin/</string>
+      <string>/principals/__uids__/0C8BDE62-E600-4696-83D3-8B5ECABDFD2E/</string>
     </array>
 
     <!-- Principals with "DAV:read" access (relative URLs) -->
@@ -577,7 +577,7 @@
 
     <!-- Log levels -->
     <key>DefaultLogLevel</key>
-    <string>info</string> <!-- debug, info, warn, error -->
+    <string>debug</string> <!-- debug, info, warn, error -->
 
     <!-- Log level overrides for specific functionality -->
     <key>LogLevels</key>
@@ -1017,6 +1017,8 @@
       <string>en</string>
     </dict>
 
-
+    <!-- Directory Address Book -->
+    <key>EnableSearchAddressBook</key>
+    <true/>
   </dict>
 </plist>

Modified: CalendarServer/branches/users/sagen/move2who-5/contrib/performance/loadtest/test_sim.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/contrib/performance/loadtest/test_sim.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/contrib/performance/loadtest/test_sim.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -24,32 +24,34 @@
 from twisted.internet.defer import Deferred, succeed
 from twisted.trial.unittest import TestCase
 
-from twistedcaldav.directory.directory import DirectoryRecord
-
 from contrib.performance.stats import NormalDistribution
 from contrib.performance.loadtest.ical import OS_X_10_6
 from contrib.performance.loadtest.profiles import Eventer, Inviter, Accepter
 from contrib.performance.loadtest.population import (
     SmoothRampUp, ClientType, PopulationParameters, Populator, CalendarClientSimulator,
-    ProfileType, SimpleStatistics)
+    ProfileType, SimpleStatistics
+)
 from contrib.performance.loadtest.sim import (
-    Arrival, SimOptions, LoadSimulator, LagTrackingReactor)
+    Arrival, SimOptions, LoadSimulator, LagTrackingReactor,
+    _DirectoryRecord
+)
 
+
 VALID_CONFIG = {
     'server': 'tcp:127.0.0.1:8008',
     'webadmin': {
         'enabled': True,
         'HTTPPort': 8080,
-        },
+    },
     'arrival': {
         'factory': 'contrib.performance.loadtest.population.SmoothRampUp',
         'params': {
             'groups': 10,
             'groupSize': 1,
             'interval': 3,
-            },
         },
-    }
+    },
+}
 
 VALID_CONFIG_PLIST = writePlistToString(VALID_CONFIG)
 
@@ -104,8 +106,9 @@
     realmName = 'stub'
 
     def _user(self, name):
-        record = DirectoryRecord(self, 'user', name, (name,))
-        record.password = 'password-' + name
+        password = 'password-' + name
+        email = name + "@example.com"
+        record = _DirectoryRecord(name, password, name, email)
         return record
 
 
@@ -119,10 +122,10 @@
             [self._user('alice'), self._user('bob'), self._user('carol')],
             Populator(None), None, None, 'http://example.org:1234/', None, None)
         users = sorted([
-                calsim._createUser(0)[0],
-                calsim._createUser(1)[0],
-                calsim._createUser(2)[0],
-                ])
+            calsim._createUser(0)[0],
+            calsim._createUser(1)[0],
+            calsim._createUser(2)[0],
+        ])
         self.assertEqual(['alice', 'bob', 'carol'], users)
 
 
@@ -171,8 +174,9 @@
 
         params = PopulationParameters()
         params.addClient(1, ClientType(
-                BrokenClient, {'runResult': clientRunResult},
-                [ProfileType(BrokenProfile, {'runResult': profileRunResult})]))
+            BrokenClient, {'runResult': clientRunResult},
+            [ProfileType(BrokenProfile, {'runResult': profileRunResult})])
+        )
         sim = CalendarClientSimulator(
             [self._user('alice')], Populator(None), params, None, 'http://example.com:1234/', None, None)
         sim.add(1, 1)
@@ -284,8 +288,9 @@
         config["accounts"] = {
             "loader": "contrib.performance.loadtest.sim.recordsFromCSVFile",
             "params": {
-                "path": accounts.path},
-            }
+                "path": accounts.path
+            },
+        }
         configpath = FilePath(self.mktemp())
         configpath.setContent(writePlistToString(config))
         io = StringIO()
@@ -312,8 +317,9 @@
         config["accounts"] = {
             "loader": "contrib.performance.loadtest.sim.recordsFromCSVFile",
             "params": {
-                "path": ""},
-            }
+                "path": ""
+            },
+        }
         configpath = FilePath(self.mktemp())
         configpath.setContent(writePlistToString(config))
         sim = LoadSimulator.fromCommandLine(['--config', configpath.path],
@@ -406,8 +412,9 @@
         section of the configuration file specified.
         """
         config = FilePath(self.mktemp())
-        config.setContent(writePlistToString({
-                    "server": "https://127.0.0.3:8432/"}))
+        config.setContent(
+            writePlistToString({"server": "https://127.0.0.3:8432/"})
+        )
         sim = LoadSimulator.fromCommandLine(['--config', config.path])
         self.assertEquals(sim.server, "https://127.0.0.3:8432/")
 
@@ -418,16 +425,18 @@
         [arrival] section of the configuration file specified.
         """
         config = FilePath(self.mktemp())
-        config.setContent(writePlistToString({
-                    "arrival": {
-                        "factory": "contrib.performance.loadtest.population.SmoothRampUp",
-                        "params": {
-                            "groups": 10,
-                            "groupSize": 1,
-                            "interval": 3,
-                            },
-                        },
-                    }))
+        config.setContent(
+            writePlistToString({
+                "arrival": {
+                    "factory": "contrib.performance.loadtest.population.SmoothRampUp",
+                    "params": {
+                        "groups": 10,
+                        "groupSize": 1,
+                        "interval": 3,
+                    },
+                },
+            })
+        )
         sim = LoadSimulator.fromCommandLine(['--config', config.path])
         self.assertEquals(
             sim.arrival,
@@ -461,11 +470,17 @@
         section of the configuration file specified.
         """
         config = FilePath(self.mktemp())
-        config.setContent(writePlistToString({
-                    "clients": [{
+        config.setContent(
+            writePlistToString(
+                {
+                    "clients": [
+                        {
                             "software": "contrib.performance.loadtest.ical.OS_X_10_6",
-                            "params": {"foo": "bar"},
-                            "profiles": [{
+                            "params": {
+                                "foo": "bar"
+                            },
+                            "profiles": [
+                                {
                                     "params": {
                                         "interval": 25,
                                         "eventStartDistribution": {
@@ -473,19 +488,38 @@
                                             "params": {
                                                 "mu": 123,
                                                 "sigma": 456,
-                                                }}},
-                                    "class": "contrib.performance.loadtest.profiles.Eventer"}],
+                                            }
+                                        }
+                                    },
+                                    "class": "contrib.performance.loadtest.profiles.Eventer"
+                                }
+                            ],
                             "weight": 3,
-                            }]}))
+                        }
+                    ]
+                }
+            )
+        )
 
         sim = LoadSimulator.fromCommandLine(
             ['--config', config.path, '--clients', config.path]
         )
         expectedParameters = PopulationParameters()
         expectedParameters.addClient(
-            3, ClientType(OS_X_10_6, {"foo": "bar"}, [ProfileType(Eventer, {
+            3,
+            ClientType(
+                OS_X_10_6,
+                {"foo": "bar"},
+                [
+                    ProfileType(
+                        Eventer, {
                             "interval": 25,
-                            "eventStartDistribution": NormalDistribution(123, 456)})]))
+                            "eventStartDistribution": NormalDistribution(123, 456)
+                        }
+                    )
+                ]
+            )
+        )
         self.assertEquals(sim.parameters, expectedParameters)
 
 
@@ -512,9 +546,18 @@
         configuration file are added to the logging system.
         """
         config = FilePath(self.mktemp())
-        config.setContent(writePlistToString({
-            "observers": [{"type":"contrib.performance.loadtest.population.SimpleStatistics", "params":{}, }, ]
-        }))
+        config.setContent(
+            writePlistToString(
+                {
+                    "observers": [
+                        {
+                            "type": "contrib.performance.loadtest.population.SimpleStatistics",
+                            "params": {},
+                        },
+                    ]
+                }
+            )
+        )
         sim = LoadSimulator.fromCommandLine(['--config', config.path])
         self.assertEquals(len(sim.observers), 1)
         self.assertIsInstance(sim.observers[0], SimpleStatistics)

Modified: CalendarServer/branches/users/sagen/move2who-5/requirements/py_develop.txt
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/requirements/py_develop.txt	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/requirements/py_develop.txt	2014-04-04 17:20:27 UTC (rev 13158)
@@ -4,7 +4,9 @@
 
 pyflakes
 docutils>=0.11
+mockldap>=0.1.4
+q
 
 -e svn+http://svn.calendarserver.org/repository/calendarserver/CalDAVClientLibrary/trunk#egg=CalDAVClientLibrary
 
--e svn+http://svn.calendarserver.org/repository/calendarserver/CalDAVTester/trunk#egg=CalDAVTester
+-e svn+http://svn.calendarserver.org/repository/calendarserver/CalendarServer/branches/users/sagen/move2who-cdt#egg=CalDAVTester

Modified: CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/cache.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/cache.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/cache.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -69,6 +69,7 @@
 
 """
 
+
 class DisabledCacheNotifier(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -164,6 +165,7 @@
             raise URINotFoundException(uri)
 
 
+    @inlineCallbacks
     def _canonicalizeURIForRequest(self, uri, request):
         """
         Always use canonicalized forms of the URIs for caching (i.e. __uids__ paths).
@@ -174,21 +176,24 @@
         uribits = uri.split("/")
         if len(uribits) > 1 and uribits[1] in ("principals", "calendars", "addressbooks"):
             if uribits[2] == "__uids__":
-                return succeed(uri)
+                returnValue(uri)
             else:
                 recordType = uribits[2]
                 recordName = uribits[3]
                 directory = request.site.resource.getDirectory()
-                record = directory.recordWithShortName(recordType, recordName)
+                record = yield directory.recordWithShortName(
+                    directory.oldNameToRecordType(recordType),
+                    recordName
+                )
                 if record is not None:
                     uribits[2] = "__uids__"
-                    uribits[3] = record.uid
-                    return succeed("/".join(uribits))
+                    uribits[3] = record.uid.encode("utf-8")
+                    returnValue("/".join(uribits))
 
         # Fall back to the locateResource approach
         try:
-            return request.locateResource(uri).addCallback(
-                lambda resrc: resrc.url()).addErrback(self._uriNotFound, uri)
+            resrc = yield request.locateResource(uri)
+            returnValue(resrc.url())
         except AssertionError:
             raise URINotFoundException(uri)
 
@@ -252,7 +257,8 @@
         """
         Get the current token for a particular URI.
         """
-
+        if isinstance(uri, unicode):
+            uri = uri.encode("utf-8")
         if cachePoolHandle:
             result = (yield defaultCachePool(cachePoolHandle).get('cacheToken:%s' % (uri,)))
         else:
@@ -436,8 +442,9 @@
                     cTokens,
                 )
             )
-            yield self.getCachePool().set(key, cacheEntry,
-                expireTime=config.ResponseCacheTimeout * 60)
+            yield self.getCachePool().set(
+                key, cacheEntry, expireTime=config.ResponseCacheTimeout * 60
+            )
 
         except URINotFoundException, e:
             self.log.debug("Could not locate URI: {e!r}", e=e)

Modified: CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/customxml.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/customxml.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/customxml.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -1456,6 +1456,8 @@
 
 ResourceType.calendarproxyread = ResourceType(Principal(), Collection(), CalendarProxyRead())
 ResourceType.calendarproxywrite = ResourceType(Principal(), Collection(), CalendarProxyWrite())
+ResourceType.calendarproxyreadfor = ResourceType(Principal(), Collection(), CalendarProxyReadFor())
+ResourceType.calendarproxywritefor = ResourceType(Principal(), Collection(), CalendarProxyWriteFor())
 
 ResourceType.timezones = ResourceType(Timezones())
 

Modified: CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/addressbook.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/addressbook.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/addressbook.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -34,7 +34,6 @@
 from twisted.internet.defer import inlineCallbacks, returnValue, succeed
 
 from twistedcaldav.config import config
-from twistedcaldav.directory.idirectory import IDirectoryService
 
 from twistedcaldav.directory.common import CommonUIDProvisioningResource,\
     uidsResourceName, CommonHomeTypeProvisioningResource
@@ -58,14 +57,14 @@
 
 
 
-class DirectoryAddressBookProvisioningResource (
+class DirectoryAddressBookProvisioningResource(
     ReadOnlyResourceMixIn,
     CalDAVComplianceMixIn,
     DAVResourceWithChildrenMixin,
     DAVResource,
 ):
     def defaultAccessControlList(self):
-        return config.ProvisioningResourceACL
+        return succeed(config.ProvisioningResourceACL)
 
 
     def etag(self):
@@ -77,9 +76,9 @@
 
 
 
-class DirectoryAddressBookHomeProvisioningResource (
-        DirectoryAddressBookProvisioningResource
-    ):
+class DirectoryAddressBookHomeProvisioningResource(
+    DirectoryAddressBookProvisioningResource
+):
     """
     Resource which provisions address book home collections as needed.
     """
@@ -93,19 +92,39 @@
 
         super(DirectoryAddressBookHomeProvisioningResource, self).__init__()
 
-        self.directory = IDirectoryService(directory)
+        # MOVE2WHO
+        self.directory = directory  # IDirectoryService(directory)
         self._url = url
         self._newStore = store
 
         # FIXME: Smells like a hack
         directory.addressBookHomesCollection = self
 
+
         #
         # Create children
         #
-        for recordType in self.directory.recordTypes():
-            self.putChild(recordType, DirectoryAddressBookHomeTypeProvisioningResource(self, recordType))
+        # ...just users, locations, and resources though.  If we iterate all of
+        # the directory's recordTypes, we also get the proxy sub principal types
+        # and other things which don't have addressbooks.
 
+        self.supportedChildTypes = (
+            self.directory.recordType.user,
+            self.directory.recordType.location,
+            self.directory.recordType.resource,
+        )
+
+        for recordType, recordTypeName in [
+            (r, self.directory.recordTypeToOldName(r)) for r in
+            self.supportedChildTypes
+        ]:
+            self.putChild(
+                recordTypeName,
+                DirectoryAddressBookHomeTypeProvisioningResource(
+                    self, recordTypeName, recordType
+                )
+            )
+
         self.putChild(uidsResourceName, DirectoryAddressBookHomeUIDProvisioningResource(self))
 
 
@@ -114,7 +133,10 @@
 
 
     def listChildren(self):
-        return self.directory.recordTypes()
+        return [
+            self.directory.recordTypeToOldName(r) for r in
+            self.supportedChildTypes
+        ]
 
 
     def principalCollections(self):
@@ -129,12 +151,13 @@
         return self.directory.principalCollection.principalForRecord(record)
 
 
+    @inlineCallbacks
     def homeForDirectoryRecord(self, record, request):
-        uidResource = self.getChild(uidsResourceName)
+        uidResource = yield self.getChild(uidsResourceName)
         if uidResource is None:
-            return None
+            returnValue(None)
         else:
-            return uidResource.homeResourceForRecord(record, request)
+            returnValue((yield uidResource.homeResourceForRecord(record, request)))
 
 
     ##
@@ -151,42 +174,46 @@
 
 
 class DirectoryAddressBookHomeTypeProvisioningResource (
-        CommonHomeTypeProvisioningResource,
-        DirectoryAddressBookProvisioningResource
-    ):
+    CommonHomeTypeProvisioningResource,
+    DirectoryAddressBookProvisioningResource
+):
     """
     Resource which provisions address book home collections of a specific
     record type as needed.
     """
-    def __init__(self, parent, recordType):
+    def __init__(self, parent, name, recordType):
         """
         @param parent: the parent of this resource
         @param recordType: the directory record type to provision.
         """
         assert parent is not None
+        assert name is not None
         assert recordType is not None
 
         super(DirectoryAddressBookHomeTypeProvisioningResource, self).__init__()
 
         self.directory = parent.directory
+        self.name = name
         self.recordType = recordType
         self._parent = parent
 
 
     def url(self):
-        return joinURL(self._parent.url(), self.recordType)
+        return joinURL(self._parent.url(), self.name)
 
 
+    @inlineCallbacks
     def listChildren(self):
         if config.EnablePrincipalListings:
+            children = []
+            for record in (
+                yield self.directory.recordsWithRecordType(self.recordType)
+            ):
+                if getattr(record, "hasContacts", False):
+                    for shortName in record.shortNames:
+                        children.append(shortName)
 
-            def _recordShortnameExpand():
-                for record in self.directory.listRecords(self.recordType):
-                    if record.enabledForAddressBooks:
-                        for shortName in record.shortNames:
-                            yield shortName
-
-            return _recordShortnameExpand()
+            returnValue(children)
         else:
             # Not a listable collection
             raise HTTPError(responsecode.FORBIDDEN)
@@ -205,7 +232,7 @@
 
 
     def displayName(self):
-        return self.recordType
+        return self.directory.recordTypeToOldName(self.recordType)
 
     ##
     # ACL
@@ -222,13 +249,13 @@
 
 
 class DirectoryAddressBookHomeUIDProvisioningResource (
-        CommonUIDProvisioningResource,
-        DirectoryAddressBookProvisioningResource
-    ):
+    CommonUIDProvisioningResource,
+    DirectoryAddressBookProvisioningResource
+):
 
     homeResourceTypeName = 'addressbooks'
 
-    enabledAttribute = 'enabledForAddressBooks'
+    enabledAttribute = 'hasContacts'
 
 
     def homeResourceCreator(self, record, transaction):

Deleted: CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/aggregate.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/aggregate.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/aggregate.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -1,385 +0,0 @@
-##
-# Copyright (c) 2006-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-"""
-Directory service implementation which aggregates multiple directory
-services.
-"""
-
-__all__ = [
-    "AggregateDirectoryService",
-    "DuplicateRecordTypeError",
-]
-
-import itertools
-from twisted.cred.error import UnauthorizedLogin
-
-from twistedcaldav.directory.idirectory import IDirectoryService
-from twistedcaldav.directory.directory import DirectoryService, DirectoryError
-from twistedcaldav.directory.directory import UnknownRecordTypeError
-from twisted.internet.defer import inlineCallbacks, returnValue
-
-class AggregateDirectoryService(DirectoryService):
-    """
-    L{IDirectoryService} implementation which aggregates multiple directory
-    services.
-
-    @ivar _recordTypes: A map of record types to L{IDirectoryService}s.
-    @type _recordTypes: L{dict} mapping L{bytes} to L{IDirectoryService}
-        provider.
-    """
-    baseGUID = "06FB225F-39E7-4D34-B1D1-29925F5E619B"
-
-    def __init__(self, services, groupMembershipCache):
-        super(AggregateDirectoryService, self).__init__()
-
-        realmName = None
-        recordTypes = {}
-        self.groupMembershipCache = groupMembershipCache
-
-        for service in services:
-            service = IDirectoryService(service)
-
-            if service.realmName != realmName:
-                assert realmName is None, (
-                    "Aggregated directory services must have the same realm name: %r != %r\nServices: %r"
-                    % (service.realmName, realmName, services)
-                )
-                realmName = service.realmName
-
-            if not hasattr(service, "recordTypePrefix"):
-                service.recordTypePrefix = ""
-            prefix = service.recordTypePrefix
-
-            for recordType in (prefix + r for r in service.recordTypes()):
-                if recordType in recordTypes:
-                    raise DuplicateRecordTypeError(
-                        "%r is in multiple services: %s, %s"
-                        % (recordType, recordTypes[recordType], service)
-                    )
-                recordTypes[recordType] = service
-
-            service.aggregateService = self
-
-        self.realmName = realmName
-        self._recordTypes = recordTypes
-
-        # FIXME: This is a temporary workaround until new data store is in
-        # place.  During the purging of deprovisioned users' data, we need
-        # to be able to look up records by uid and shortName.  The purge
-        # tool sticks temporary fake records in here.
-        self._tmpRecords = {
-            "uids" : { },
-            "shortNames" : { },
-        }
-
-
-    def __repr__(self):
-        return "<%s (%s): %r>" % (self.__class__.__name__, self.realmName, self._recordTypes)
-
-
-    #
-    # Define calendarHomesCollection as a property so we can set it on contained services
-    #
-    def _getCalendarHomesCollection(self):
-        return self._calendarHomesCollection
-
-
-    def _setCalendarHomesCollection(self, value):
-        for service in self._recordTypes.values():
-            service.calendarHomesCollection = value
-        self._calendarHomesCollection = value
-
-    calendarHomesCollection = property(_getCalendarHomesCollection, _setCalendarHomesCollection)
-
-    #
-    # Define addressBookHomesCollection as a property so we can set it on contained services
-    #
-    def _getAddressBookHomesCollection(self):
-        return self._addressBookHomesCollection
-
-
-    def _setAddressBookHomesCollection(self, value):
-        for service in self._recordTypes.values():
-            service.addressBookHomesCollection = value
-        self._addressBookHomesCollection = value
-
-    addressBookHomesCollection = property(_getAddressBookHomesCollection, _setAddressBookHomesCollection)
-
-
-    def addService(self, service):
-        """
-        Add another service to this aggregate.
-
-        @param service: the service to add
-        @type service: L{IDirectoryService}
-        """
-        service = IDirectoryService(service)
-
-        if service.realmName != self.realmName:
-            assert self.realmName is None, (
-                "Aggregated directory services must have the same realm name: %r != %r\nServices: %r"
-                % (service.realmName, self.realmName, service)
-            )
-
-        if not hasattr(service, "recordTypePrefix"):
-            service.recordTypePrefix = ""
-        prefix = service.recordTypePrefix
-
-        for recordType in (prefix + r for r in service.recordTypes()):
-            if recordType in self._recordTypes:
-                raise DuplicateRecordTypeError(
-                    "%r is in multiple services: %s, %s"
-                    % (recordType, self.recordTypes[recordType], service)
-                )
-            self._recordTypes[recordType] = service
-
-        service.aggregateService = self
-
-
-    def recordTypes(self):
-        return set(self._recordTypes)
-
-
-    def listRecords(self, recordType):
-        records = self._query("listRecords", recordType)
-        if records is None:
-            return ()
-        else:
-            return records
-
-
-    def recordWithShortName(self, recordType, shortName):
-
-        # FIXME: These temporary records shouldn't be needed when we move
-        # to the new data store API.  They're currently needed when purging
-        # deprovisioned users' data.
-        record = self._tmpRecords["shortNames"].get(shortName, None)
-        if record:
-            return record
-
-        return self._query("recordWithShortName", recordType, shortName)
-
-
-    def recordWithUID(self, uid):
-
-        # FIXME: These temporary records shouldn't be needed when we move
-        # to the new data store API.  They're currently needed when purging
-        # deprovisioned users' data.
-        record = self._tmpRecords["uids"].get(uid, None)
-        if record:
-            return record
-
-        return self._queryAll("recordWithUID", uid)
-
-    recordWithGUID = recordWithUID
-
-    def recordWithAuthID(self, authID):
-        return self._queryAll("recordWithAuthID", authID)
-
-
-    def recordWithCalendarUserAddress(self, address):
-        return self._queryAll("recordWithCalendarUserAddress", address)
-
-
-    def recordWithCachedGroupsAlias(self, recordType, alias):
-        """
-        @param recordType: the type of the record to look up.
-        @param alias: the cached-groups alias of the record to look up.
-        @type alias: C{str}
-
-        @return: a deferred L{IDirectoryRecord} with the given cached-groups
-            alias, or C{None} if no such record is found.
-        """
-        service = self.serviceForRecordType(recordType)
-        return service.recordWithCachedGroupsAlias(recordType, alias)
-
-
-    @inlineCallbacks
-    def recordsMatchingFields(self, fields, operand="or", recordType=None):
-
-        if recordType:
-            services = (self.serviceForRecordType(recordType),)
-        else:
-            services = set(self._recordTypes.values())
-
-        generators = []
-        for service in services:
-            generator = (yield service.recordsMatchingFields(fields,
-                operand=operand, recordType=recordType))
-            generators.append(generator)
-
-        returnValue(itertools.chain(*generators))
-
-
-    @inlineCallbacks
-    def recordsMatchingTokens(self, tokens, context=None):
-        """
-        Combine the results from the sub-services.
-
-        Each token is searched for within each record's full name and email
-        address; if each token is found within a record that record is returned
-        in the results.
-
-        If context is None, all record types are considered.  If context is
-        "location", only locations are considered.  If context is "attendee",
-        only users, groups, and resources are considered.
-
-        @param tokens: The tokens to search on
-        @type tokens: C{list} of C{str} (utf-8 bytes)
-
-        @param context: An indication of what the end user is searching for;
-            "attendee", "location", or None
-        @type context: C{str}
-
-        @return: a deferred sequence of L{IDirectoryRecord}s which match the
-            given tokens and optional context.
-        """
-
-        services = set(self._recordTypes.values())
-
-        generators = []
-        for service in services:
-            generator = (yield service.recordsMatchingTokens(tokens,
-                context=context))
-            generators.append(generator)
-
-        returnValue(itertools.chain(*generators))
-
-
-    def getGroups(self, guids):
-        """
-        Returns a set of group records for the list of guids passed in.  For
-        any group that also contains subgroups, those subgroups' records are
-        also returned, and so on.
-        """
-        recordType = self.recordType_groups
-        service = self.serviceForRecordType(recordType)
-        return service.getGroups(guids)
-
-
-    def serviceForRecordType(self, recordType):
-        try:
-            return self._recordTypes[recordType]
-        except KeyError:
-            raise UnknownRecordTypeError(recordType)
-
-
-    def _query(self, query, recordType, *args):
-        try:
-            service = self.serviceForRecordType(recordType)
-        except UnknownRecordTypeError:
-            return None
-
-        return getattr(service, query)(
-            recordType[len(service.recordTypePrefix):],
-            *[a[len(service.recordTypePrefix):] for a in args]
-        )
-
-
-    def _queryAll(self, query, *args):
-        for service in self._recordTypes.values():
-            try:
-                record = getattr(service, query)(*args)
-            except UnknownRecordTypeError:
-                record = None
-            if record is not None:
-                return record
-        else:
-            return None
-
-
-    def flushCaches(self):
-        for service in self._recordTypes.values():
-            if hasattr(service, "_initCaches"):
-                service._initCaches()
-
-    userRecordTypes = [DirectoryService.recordType_users]
-
-    def requestAvatarId(self, credentials):
-
-        if credentials.authnPrincipal:
-            return credentials.authnPrincipal.record.service.requestAvatarId(credentials)
-
-        raise UnauthorizedLogin("No such user: %s" % (credentials.credentials.username,))
-
-
-    def getResourceInfo(self):
-        results = []
-        for service in self._recordTypes.values():
-            for result in service.getResourceInfo():
-                if result:
-                    results.append(result)
-        return results
-
-
-    def getExternalProxyAssignments(self):
-        service = self.serviceForRecordType(self.recordType_locations)
-        return service.getExternalProxyAssignments()
-
-
-    def createRecord(self, recordType, guid=None, shortNames=(), authIDs=set(),
-        fullName=None, firstName=None, lastName=None, emailAddresses=set(),
-        uid=None, password=None, **kwargs):
-        service = self.serviceForRecordType(recordType)
-        return service.createRecord(recordType, guid=guid,
-            shortNames=shortNames, authIDs=authIDs, fullName=fullName,
-            firstName=firstName, lastName=lastName,
-            emailAddresses=emailAddresses, uid=uid, password=password, **kwargs)
-
-
-    def updateRecord(self, recordType, guid=None, shortNames=(), authIDs=set(),
-        fullName=None, firstName=None, lastName=None, emailAddresses=set(),
-        uid=None, password=None, **kwargs):
-        service = self.serviceForRecordType(recordType)
-        return service.updateRecord(recordType, guid=guid,
-            shortNames=shortNames,
-            authIDs=authIDs, fullName=fullName, firstName=firstName,
-            lastName=lastName, emailAddresses=emailAddresses, uid=uid,
-            password=password, **kwargs)
-
-
-    def destroyRecord(self, recordType, guid=None):
-        service = self.serviceForRecordType(recordType)
-        return service.destroyRecord(recordType, guid=guid)
-
-
-    def setRealm(self, realmName):
-        """
-        Set a new realm name for this and nested services
-        """
-        self.realmName = realmName
-        for service in self._recordTypes.values():
-            service.setRealm(realmName)
-
-
-    def setPrincipalCollection(self, principalCollection):
-        """
-        Set the principal service that the directory relies on for doing proxy tests.
-
-        @param principalService: the principal service.
-        @type principalService: L{DirectoryProvisioningResource}
-        """
-        self.principalCollection = principalCollection
-        for service in self._recordTypes.values():
-            service.setPrincipalCollection(principalCollection)
-
-
-
-class DuplicateRecordTypeError(DirectoryError):
-    """
-    Duplicate record type.
-    """

Deleted: CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/appleopendirectory.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/appleopendirectory.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/appleopendirectory.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -1,1584 +0,0 @@
-# -*- test-case-name: twistedcaldav.directory.test.test_opendirectory -*-
-##
-# Copyright (c) 2006-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-"""
-Apple OpenDirectory directory service implementation.
-"""
-
-__all__ = [
-    "OpenDirectoryService",
-    "OpenDirectoryInitError",
-]
-
-import sys
-import time
-from uuid import UUID
-
-from twisted.internet.defer import succeed, inlineCallbacks, returnValue
-from twisted.cred.credentials import UsernamePassword
-from txweb2.auth.digest import DigestedCredentials
-from twext.python.log import Logger
-
-from twistedcaldav.directory.cachingdirectory import CachingDirectoryService, \
-    CachingDirectoryRecord
-from twistedcaldav.directory.directory import DirectoryService, DirectoryRecord
-from twistedcaldav.directory.directory import DirectoryError, UnknownRecordTypeError
-from twistedcaldav.directory.util import splitIntoBatches
-from twistedcaldav.directory.principal import cuAddressConverter
-
-from calendarserver.platform.darwin.od import opendirectory, dsattributes, dsquery
-
-
-
-class OpenDirectoryService(CachingDirectoryService):
-    """
-    OpenDirectory implementation of L{IDirectoryService}.
-    """
-    log = Logger()
-
-    baseGUID = "891F8321-ED02-424C-BA72-89C32F215C1E"
-
-    def __repr__(self):
-        return "<%s %r: %r>" % (self.__class__.__name__, self.realmName, self.node)
-
-
-    def __init__(self, params, odModule=None):
-        """
-        @param params: a dictionary containing the following keys:
-
-            - node: an OpenDirectory node name to bind to.
-
-            - restrictEnabledRecords: C{True} if a group in the directory is to
-              be used to determine which calendar users are enabled.
-
-            - restrictToGroup: C{str} guid or name of group used to restrict
-              enabled users.
-
-            - cacheTimeout: C{int} number of minutes before cache is
-              invalidated.
-
-            - negativeCache: C{False} cache the fact that a record wasn't found
-        """
-        defaults = {
-            'node' : '/Search',
-            'restrictEnabledRecords' : False,
-            'restrictToGroup' : '',
-            'cacheTimeout' : 1, # Minutes
-            'batchSize' : 100, # for splitting up large queries
-            'negativeCaching' : False,
-            'recordTypes' : (
-                self.recordType_users,
-                self.recordType_groups,
-            ),
-            'augmentService' : None,
-            'groupMembershipCache' : None,
-        }
-        ignored = ('requireComputerRecord',)
-        params = self.getParams(params, defaults, ignored)
-
-        self._recordTypes = params['recordTypes']
-
-        super(OpenDirectoryService, self).__init__(params['cacheTimeout'],
-                                                   params['negativeCaching'])
-
-        if odModule is None:
-            odModule = opendirectory
-        self.odModule = odModule
-
-        try:
-            directory = self.odModule.odInit(params['node'])
-        except self.odModule.ODError, e:
-            self.log.error("OpenDirectory (node=%s) Initialization error: %s" % (params['node'], e))
-            raise
-
-        self.augmentService = params['augmentService']
-        self.groupMembershipCache = params['groupMembershipCache']
-        self.realmName = params['node']
-        self.directory = directory
-        self.node = params['node']
-        self.restrictEnabledRecords = params['restrictEnabledRecords']
-        self.restrictToGroup = params['restrictToGroup']
-        self.batchSize = params['batchSize']
-        try:
-            UUID(self.restrictToGroup)
-        except:
-            self.restrictToGUID = False
-        else:
-            self.restrictToGUID = True
-        self.restrictedTimestamp = 0
-
-        # Set up the /Local/Default node if it's in the search path so we can
-        # send custom queries to it
-        self.localNode = None
-        try:
-            if self.node == "/Search":
-                result = self.odModule.getNodeAttributes(self.directory, "/Search",
-                    (dsattributes.kDS1AttrSearchPath,))
-                if "/Local/Default" in result[dsattributes.kDS1AttrSearchPath]:
-                    try:
-                        self.localNode = self.odModule.odInit("/Local/Default")
-                    except self.odModule.ODError, e:
-                        self.log.error("Failed to open /Local/Default): %s" % (e,))
-        except AttributeError:
-            pass
-
-
-    @property
-    def restrictedGUIDs(self):
-        """
-        Look up (and cache) the set of guids that are members of the
-        restrictToGroup.  If restrictToGroup is not set, return None to
-        indicate there are no group restrictions.
-        """
-        if self.restrictEnabledRecords:
-            if time.time() - self.restrictedTimestamp > self.cacheTimeout:
-                attributeToMatch = dsattributes.kDS1AttrGeneratedUID if self.restrictToGUID else dsattributes.kDSNAttrRecordName
-                valueToMatch = self.restrictToGroup
-                self.log.debug("Doing restricted group membership check")
-                self.log.debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
-                    self.directory,
-                    attributeToMatch,
-                    valueToMatch,
-                    dsattributes.eDSExact,
-                    False,
-                    dsattributes.kDSStdRecordTypeGroups,
-                    [dsattributes.kDSNAttrGroupMembers, dsattributes.kDSNAttrNestedGroups, ],
-                ))
-                results = self.odModule.queryRecordsWithAttribute_list(
-                    self.directory,
-                    attributeToMatch,
-                    valueToMatch,
-                    dsattributes.eDSExact,
-                    False,
-                    dsattributes.kDSStdRecordTypeGroups,
-                    [dsattributes.kDSNAttrGroupMembers, dsattributes.kDSNAttrNestedGroups, ],
-                )
-
-                if len(results) == 1:
-                    members = results[0][1].get(dsattributes.kDSNAttrGroupMembers, [])
-                    nestedGroups = results[0][1].get(dsattributes.kDSNAttrNestedGroups, [])
-                else:
-                    members = []
-                    nestedGroups = []
-                self._cachedRestrictedGUIDs = set(self._expandGroupMembership(members, nestedGroups, returnGroups=True))
-                self.log.debug("Got %d restricted group members" % (len(self._cachedRestrictedGUIDs),))
-                self.restrictedTimestamp = time.time()
-            return self._cachedRestrictedGUIDs
-        else:
-            # No restrictions
-            return None
-
-
-    def __cmp__(self, other):
-        if not isinstance(other, DirectoryRecord):
-            return super(DirectoryRecord, self).__eq__(other)
-
-        for attr in ("directory", "node"):
-            diff = cmp(getattr(self, attr), getattr(other, attr))
-            if diff != 0:
-                return diff
-        return 0
-
-
-    def __hash__(self):
-        h = hash(self.__class__.__name__)
-        for attr in ("node",):
-            h = (h + hash(getattr(self, attr))) & sys.maxint
-        return h
-
-
-    def _expandGroupMembership(self, members, nestedGroups, processedGUIDs=None, returnGroups=False):
-
-        if processedGUIDs is None:
-            processedGUIDs = set()
-
-        if isinstance(members, str):
-            members = [members]
-
-        if isinstance(nestedGroups, str):
-            nestedGroups = [nestedGroups]
-
-        for memberGUID in members:
-            if memberGUID not in processedGUIDs:
-                processedGUIDs.add(memberGUID)
-                yield memberGUID
-
-        for groupGUID in nestedGroups:
-            if groupGUID in processedGUIDs:
-                continue
-
-            self.log.debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
-                self.directory,
-                dsattributes.kDS1AttrGeneratedUID,
-                groupGUID,
-                dsattributes.eDSExact,
-                False,
-                dsattributes.kDSStdRecordTypeGroups,
-                [dsattributes.kDSNAttrGroupMembers, dsattributes.kDSNAttrNestedGroups]
-            ))
-            result = self.odModule.queryRecordsWithAttribute_list(
-                self.directory,
-                dsattributes.kDS1AttrGeneratedUID,
-                groupGUID,
-                dsattributes.eDSExact,
-                False,
-                dsattributes.kDSStdRecordTypeGroups,
-                [dsattributes.kDSNAttrGroupMembers, dsattributes.kDSNAttrNestedGroups]
-            )
-
-            if not result:
-                self.log.error("Couldn't find group %s when trying to expand nested groups."
-                             % (groupGUID,))
-                continue
-
-            group = result[0][1]
-
-            processedGUIDs.add(groupGUID)
-            if returnGroups:
-                yield groupGUID
-
-            for GUID in self._expandGroupMembership(
-                group.get(dsattributes.kDSNAttrGroupMembers, []),
-                group.get(dsattributes.kDSNAttrNestedGroups, []),
-                processedGUIDs,
-                returnGroups,
-            ):
-                yield GUID
-
-
-    def recordTypes(self):
-        return self._recordTypes
-
-
-    def listRecords(self, recordType):
-        """
-        Retrieve all the records of recordType from the directory, but for
-        expediency don't index them or cache them locally, nor in memcached.
-        """
-
-        records = []
-
-        attrs = [
-            dsattributes.kDS1AttrGeneratedUID,
-            dsattributes.kDSNAttrRecordName,
-            dsattributes.kDS1AttrDistinguishedName,
-        ]
-
-        if recordType == DirectoryService.recordType_users:
-            ODRecordType = self._toODRecordTypes[recordType]
-
-        elif recordType in (
-            DirectoryService.recordType_resources,
-            DirectoryService.recordType_locations,
-        ):
-            attrs.append(dsattributes.kDSNAttrResourceInfo)
-            ODRecordType = self._toODRecordTypes[recordType]
-
-        elif recordType == DirectoryService.recordType_groups:
-            attrs.append(dsattributes.kDSNAttrGroupMembers)
-            attrs.append(dsattributes.kDSNAttrNestedGroups)
-            ODRecordType = dsattributes.kDSStdRecordTypeGroups
-
-        self.log.debug("Querying OD for all %s records" % (recordType,))
-        results = self.odModule.listAllRecordsWithAttributes_list(
-            self.directory, ODRecordType, attrs)
-        self.log.debug("Retrieved %d %s records" % (len(results), recordType,))
-
-        for key, value in results:
-            recordGUID = value.get(dsattributes.kDS1AttrGeneratedUID)
-            if not recordGUID:
-                self.log.warn("Ignoring record missing GUID: %s %s" %
-                    (key, value,))
-                continue
-
-            # Skip if group restriction is in place and guid is not
-            # a member (but don't skip any groups)
-            if (recordType != self.recordType_groups and
-                self.restrictedGUIDs is not None):
-                if str(recordGUID) not in self.restrictedGUIDs:
-                    continue
-
-            recordShortNames = self._uniqueTupleFromAttribute(
-                value.get(dsattributes.kDSNAttrRecordName))
-            recordFullName = value.get(
-                dsattributes.kDS1AttrDistinguishedName)
-
-            proxyGUIDs = ()
-            readOnlyProxyGUIDs = ()
-
-            if recordType in (
-                DirectoryService.recordType_resources,
-                DirectoryService.recordType_locations,
-            ):
-                resourceInfo = value.get(dsattributes.kDSNAttrResourceInfo)
-                if resourceInfo is not None:
-                    if type(resourceInfo) is not str:
-                        resourceInfo = resourceInfo[0]
-                    try:
-                        (
-                            _ignore_autoSchedule,
-                            proxy,
-                            readOnlyProxy
-                        ) = self.parseResourceInfo(
-                            resourceInfo,
-                            recordGUID,
-                            recordType,
-                            recordShortNames[0]
-                        )
-                    except ValueError:
-                        continue
-                    if proxy:
-                        proxyGUIDs = (proxy,)
-                    if readOnlyProxy:
-                        readOnlyProxyGUIDs = (readOnlyProxy,)
-
-            # Special case for groups, which have members.
-            if recordType == self.recordType_groups:
-                memberGUIDs = value.get(dsattributes.kDSNAttrGroupMembers)
-                if memberGUIDs is None:
-                    memberGUIDs = ()
-                elif type(memberGUIDs) is str:
-                    memberGUIDs = (memberGUIDs,)
-                nestedGUIDs = value.get(dsattributes.kDSNAttrNestedGroups)
-                if nestedGUIDs:
-                    if type(nestedGUIDs) is str:
-                        nestedGUIDs = (nestedGUIDs,)
-                    memberGUIDs += tuple(nestedGUIDs)
-                else:
-                    nestedGUIDs = ()
-            else:
-                memberGUIDs = ()
-                nestedGUIDs = ()
-
-            record = OpenDirectoryRecord(
-                service=self,
-                recordType=recordType,
-                guid=recordGUID,
-                nodeName="",
-                shortNames=recordShortNames,
-                authIDs=(),
-                fullName=recordFullName,
-                firstName="",
-                lastName="",
-                emailAddresses="",
-                memberGUIDs=memberGUIDs,
-                nestedGUIDs=nestedGUIDs,
-                extProxies=proxyGUIDs,
-                extReadOnlyProxies=readOnlyProxyGUIDs,
-            )
-
-            # (Copied from below)
-            # Look up augment information
-            # TODO: this needs to be deferred but for now we hard code
-            # the deferred result because we know it is completing
-            # immediately.
-            if self.augmentService is not None:
-                d = self.augmentService.getAugmentRecord(record.guid,
-                    recordType)
-                d.addCallback(lambda x: record.addAugmentInformation(x))
-            records.append(record)
-
-        self.log.debug("ListRecords returning %d %s records" % (len(records),
-            recordType))
-
-        return records
-
-
-    def groupsForGUID(self, guid):
-
-        attrs = [
-            dsattributes.kDS1AttrGeneratedUID,
-        ]
-
-        recordType = dsattributes.kDSStdRecordTypeGroups
-
-        guids = set()
-
-        self.log.debug("Looking up which groups %s is a member of" % (guid,))
-        try:
-            self.log.debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
-                self.directory,
-                dsattributes.kDSNAttrGroupMembers,
-                guid,
-                dsattributes.eDSExact,
-                False,
-                recordType,
-                attrs,
-            ))
-            results = self.odModule.queryRecordsWithAttribute_list(
-                self.directory,
-                dsattributes.kDSNAttrGroupMembers,
-                guid,
-                dsattributes.eDSExact,
-                False,
-                recordType,
-                attrs,
-            )
-        except self.odModule.ODError, ex:
-            self.log.error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
-            raise
-
-        for (_ignore_recordShortName, value) in results:
-
-            # Now get useful record info.
-            recordGUID = value.get(dsattributes.kDS1AttrGeneratedUID)
-            if recordGUID:
-                guids.add(recordGUID)
-
-        try:
-            self.log.debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
-                self.directory,
-                dsattributes.kDSNAttrNestedGroups,
-                guid,
-                dsattributes.eDSExact,
-                False,
-                recordType,
-                attrs,
-            ))
-            results = self.odModule.queryRecordsWithAttribute_list(
-                self.directory,
-                dsattributes.kDSNAttrNestedGroups,
-                guid,
-                dsattributes.eDSExact,
-                False,
-                recordType,
-                attrs,
-            )
-        except self.odModule.ODError, ex:
-            self.log.error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
-            raise
-
-        for (_ignore_recordShortName, value) in results:
-
-            # Now get useful record info.
-            recordGUID = value.get(dsattributes.kDS1AttrGeneratedUID)
-            if recordGUID:
-                guids.add(recordGUID)
-
-        self.log.debug("%s is a member of %d groups" % (guid, len(guids)))
-
-        return guids
-
-    _ODFields = {
-        'fullName' : {
-            'odField' : dsattributes.kDS1AttrDistinguishedName,
-            'appliesTo' : set([
-                dsattributes.kDSStdRecordTypeUsers,
-                dsattributes.kDSStdRecordTypeGroups,
-                dsattributes.kDSStdRecordTypeResources,
-                dsattributes.kDSStdRecordTypePlaces,
-            ]),
-        },
-        'firstName' : {
-            'odField' : dsattributes.kDS1AttrFirstName,
-            'appliesTo' : set([
-                dsattributes.kDSStdRecordTypeUsers,
-            ]),
-        },
-        'lastName' : {
-            'odField' : dsattributes.kDS1AttrLastName,
-            'appliesTo' : set([
-                dsattributes.kDSStdRecordTypeUsers,
-            ]),
-        },
-        'emailAddresses' : {
-            'odField' : dsattributes.kDSNAttrEMailAddress,
-            'appliesTo' : set([
-                dsattributes.kDSStdRecordTypeUsers,
-                dsattributes.kDSStdRecordTypeGroups,
-            ]),
-        },
-        'recordName' : {
-            'odField' : dsattributes.kDSNAttrRecordName,
-            'appliesTo' : set([
-                dsattributes.kDSStdRecordTypeUsers,
-                dsattributes.kDSStdRecordTypeGroups,
-                dsattributes.kDSStdRecordTypeResources,
-                dsattributes.kDSStdRecordTypePlaces,
-            ]),
-        },
-        'guid' : {
-            'odField' : dsattributes.kDS1AttrGeneratedUID,
-            'appliesTo' : set([
-                dsattributes.kDSStdRecordTypeUsers,
-                dsattributes.kDSStdRecordTypeGroups,
-                dsattributes.kDSStdRecordTypeResources,
-                dsattributes.kDSStdRecordTypePlaces,
-            ]),
-        },
-    }
-
-    _toODRecordTypes = {
-        DirectoryService.recordType_users :
-            dsattributes.kDSStdRecordTypeUsers,
-        DirectoryService.recordType_groups :
-            dsattributes.kDSStdRecordTypeGroups,
-        DirectoryService.recordType_resources :
-            dsattributes.kDSStdRecordTypeResources,
-        DirectoryService.recordType_locations :
-            dsattributes.kDSStdRecordTypePlaces,
-    }
-
-    _fromODRecordTypes = dict([(b, a) for a, b in _toODRecordTypes.iteritems()])
-
-    def _uniqueTupleFromAttribute(self, attribute):
-        if attribute:
-            if isinstance(attribute, str):
-                return (attribute,)
-            else:
-                s = set()
-                return tuple([(s.add(x), x)[1] for x in attribute if x not in s])
-        else:
-            return ()
-
-
-    def _setFromAttribute(self, attribute, lower=False):
-        if attribute:
-            if isinstance(attribute, str):
-                return set((attribute.lower() if lower else attribute,))
-            else:
-                return set([item.lower() if lower else item for item in attribute])
-        else:
-            return ()
-
-
-    def recordsMatchingTokens(self, tokens, context=None, lookupMethod=None):
-        """
-        @param tokens: The tokens to search on
-        @type tokens: C{list} of C{str} (utf-8 bytes)
-        @param context: An indication of what the end user is searching
-            for; "attendee", "location", or None
-        @type context: C{str}
-        @return: a deferred sequence of L{IDirectoryRecord}s which
-            match the given tokens and optional context.
-
-        Each token is searched for within each record's full name and
-        email address; if each token is found within a record that
-        record is returned in the results.
-
-        If context is None, all record types are considered.  If
-        context is "location", only locations are considered.  If
-        context is "attendee", only users, groups, and resources
-        are considered.
-        """
-
-        if lookupMethod is None:
-            lookupMethod = self.odModule.queryRecordsWithAttributes_list
-
-        def collectResults(results):
-            self.log.debug("Got back %d records from OD" % (len(results),))
-            for _ignore_key, value in results:
-                # self.log.debug("OD result: {key} {value}", key=key, value=value)
-                try:
-                    recordNodeName = value.get(
-                        dsattributes.kDSNAttrMetaNodeLocation)
-                    recordShortNames = self._uniqueTupleFromAttribute(
-                        value.get(dsattributes.kDSNAttrRecordName))
-
-                    recordGUID = value.get(dsattributes.kDS1AttrGeneratedUID)
-
-                    recordType = value.get(dsattributes.kDSNAttrRecordType)
-                    if isinstance(recordType, list):
-                        recordType = recordType[0]
-                    if not recordType:
-                        continue
-                    recordType = self._fromODRecordTypes[recordType]
-
-                    # Skip if group restriction is in place and guid is not
-                    # a member (but don't skip any groups)
-                    if (recordType != self.recordType_groups and
-                        self.restrictedGUIDs is not None):
-                        if str(recordGUID) not in self.restrictedGUIDs:
-                            continue
-
-                    recordAuthIDs = self._setFromAttribute(
-                        value.get(dsattributes.kDSNAttrAltSecurityIdentities))
-                    recordFullName = value.get(
-                        dsattributes.kDS1AttrDistinguishedName)
-                    recordFirstName = value.get(dsattributes.kDS1AttrFirstName)
-                    recordLastName = value.get(dsattributes.kDS1AttrLastName)
-                    recordEmailAddresses = self._setFromAttribute(
-                        value.get(dsattributes.kDSNAttrEMailAddress),
-                        lower=True)
-
-                    # Special case for groups, which have members.
-                    if recordType == self.recordType_groups:
-                        memberGUIDs = value.get(dsattributes.kDSNAttrGroupMembers)
-                        if memberGUIDs is None:
-                            memberGUIDs = ()
-                        elif type(memberGUIDs) is str:
-                            memberGUIDs = (memberGUIDs,)
-                        nestedGUIDs = value.get(dsattributes.kDSNAttrNestedGroups)
-                        if nestedGUIDs:
-                            if type(nestedGUIDs) is str:
-                                nestedGUIDs = (nestedGUIDs,)
-                            memberGUIDs += tuple(nestedGUIDs)
-                        else:
-                            nestedGUIDs = ()
-                    else:
-                        nestedGUIDs = ()
-                        memberGUIDs = ()
-
-                    # Create records but don't store them in our index or
-                    # send them to memcached, because these are transient,
-                    # existing only so we can create principal resource
-                    # objects that are used to generate the REPORT result.
-
-                    record = OpenDirectoryRecord(
-                        service=self,
-                        recordType=recordType,
-                        guid=recordGUID,
-                        nodeName=recordNodeName,
-                        shortNames=recordShortNames,
-                        authIDs=recordAuthIDs,
-                        fullName=recordFullName,
-                        firstName=recordFirstName,
-                        lastName=recordLastName,
-                        emailAddresses=recordEmailAddresses,
-                        memberGUIDs=memberGUIDs,
-                        nestedGUIDs=nestedGUIDs,
-                        extProxies=(),
-                        extReadOnlyProxies=(),
-                    )
-
-                    # (Copied from below)
-                    # Look up augment information
-                    # TODO: this needs to be deferred but for now we hard code
-                    # the deferred result because we know it is completing
-                    # immediately.
-                    if self.augmentService is not None:
-                        d = self.augmentService.getAugmentRecord(record.guid,
-                            recordType)
-                        d.addCallback(lambda x: record.addAugmentInformation(x))
-
-                    yield record
-
-                except KeyError:
-                    pass
-
-
-        def multiQuery(directory, queries, recordTypes, attrs):
-            byGUID = {}
-            sets = []
-
-            caseInsensitive = True
-            for compound in queries:
-                compound = compound.generate()
-
-                try:
-                    startTime = time.time()
-                    queryResults = lookupMethod(
-                        directory,
-                        compound,
-                        caseInsensitive,
-                        recordTypes,
-                        attrs,
-                    )
-                    totalTime = time.time() - startTime
-
-                    newSet = set()
-                    for recordName, data in queryResults:
-                        guid = data.get(dsattributes.kDS1AttrGeneratedUID, None)
-                        if guid:
-                            byGUID[guid] = (recordName, data)
-                            newSet.add(guid)
-
-                    self.log.debug("Attendee OD query: Types %s, Query %s, %.2f sec, %d results" %
-                        (recordTypes, compound, totalTime, len(queryResults)))
-                    sets.append(newSet)
-
-                except self.odModule.ODError, e:
-                    self.log.error("Ignoring OD Error: %d %s" %
-                        (e.message[1], e.message[0]))
-                    continue
-
-            results = []
-            for guid in set.intersection(*sets):
-                recordName, data = byGUID.get(guid, None)
-                if data is not None:
-                    results.append((data[dsattributes.kDSNAttrRecordName], data))
-            return results
-
-        localQueries = buildLocalQueriesFromTokens(tokens, self._ODFields)
-        nestedQuery = buildNestedQueryFromTokens(tokens, self._ODFields)
-
-        # Starting with the record types corresponding to the context...
-        recordTypes = self.recordTypesForSearchContext(context)
-        # ...limit to the types this service supports...
-        recordTypes = [r for r in recordTypes if r in self.recordTypes()]
-        # ...and map those to OD representations...
-        recordTypes = [self._toODRecordTypes[r] for r in recordTypes]
-
-        if recordTypes:
-            # Perform the complex/nested query.  If there was more than one
-            # token, this won't match anything in /Local, therefore we run
-            # the un-nested queries below and AND the results ourselves in
-            # multiQuery.
-            results = multiQuery(
-                self.directory,
-                [nestedQuery],
-                recordTypes,
-                [
-                    dsattributes.kDS1AttrGeneratedUID,
-                    dsattributes.kDSNAttrRecordName,
-                    dsattributes.kDSNAttrAltSecurityIdentities,
-                    dsattributes.kDSNAttrRecordType,
-                    dsattributes.kDS1AttrDistinguishedName,
-                    dsattributes.kDS1AttrFirstName,
-                    dsattributes.kDS1AttrLastName,
-                    dsattributes.kDSNAttrEMailAddress,
-                    dsattributes.kDSNAttrMetaNodeLocation,
-                    dsattributes.kDSNAttrGroupMembers,
-                    dsattributes.kDSNAttrNestedGroups,
-                ]
-            )
-            if self.localNode is not None and len(tokens) > 1:
-                # /Local is in our search path and the complex query above
-                # would not have matched anything in /Local.  So now run
-                # the un-nested queries.
-                results.extend(
-                    multiQuery(
-                        self.localNode,
-                        localQueries,
-                        recordTypes,
-                        [
-                            dsattributes.kDS1AttrGeneratedUID,
-                            dsattributes.kDSNAttrRecordName,
-                            dsattributes.kDSNAttrAltSecurityIdentities,
-                            dsattributes.kDSNAttrRecordType,
-                            dsattributes.kDS1AttrDistinguishedName,
-                            dsattributes.kDS1AttrFirstName,
-                            dsattributes.kDS1AttrLastName,
-                            dsattributes.kDSNAttrEMailAddress,
-                            dsattributes.kDSNAttrMetaNodeLocation,
-                            dsattributes.kDSNAttrGroupMembers,
-                            dsattributes.kDSNAttrNestedGroups,
-                        ]
-                    )
-                )
-            return succeed(collectResults(results))
-        else:
-            return succeed([])
-
-
-    def recordsMatchingFields(self, fields, operand="or", recordType=None,
-        lookupMethod=None):
-
-        if lookupMethod is None:
-            lookupMethod = self.odModule.queryRecordsWithAttribute_list
-
-        # Note that OD applies case-sensitivity globally across the entire
-        # query, not per expression, so the current code uses whatever is
-        # specified in the last field in the fields list
-
-        def collectResults(results):
-            self.log.debug("Got back %d records from OD" % (len(results),))
-            for _ignore_key, value in results:
-                # self.log.debug("OD result: {key} {value}", key=key, value=value)
-                try:
-                    recordNodeName = value.get(
-                        dsattributes.kDSNAttrMetaNodeLocation)
-                    recordShortNames = self._uniqueTupleFromAttribute(
-                        value.get(dsattributes.kDSNAttrRecordName))
-
-                    recordGUID = value.get(dsattributes.kDS1AttrGeneratedUID)
-
-                    recordType = value.get(dsattributes.kDSNAttrRecordType)
-                    if isinstance(recordType, list):
-                        recordType = recordType[0]
-                    if not recordType:
-                        continue
-                    recordType = self._fromODRecordTypes[recordType]
-
-                    # Skip if group restriction is in place and guid is not
-                    # a member (but don't skip any groups)
-                    if (recordType != self.recordType_groups and
-                        self.restrictedGUIDs is not None):
-                        if str(recordGUID) not in self.restrictedGUIDs:
-                            continue
-
-                    recordAuthIDs = self._setFromAttribute(
-                        value.get(dsattributes.kDSNAttrAltSecurityIdentities))
-                    recordFullName = value.get(
-                        dsattributes.kDS1AttrDistinguishedName)
-                    recordFirstName = value.get(dsattributes.kDS1AttrFirstName)
-                    recordLastName = value.get(dsattributes.kDS1AttrLastName)
-                    recordEmailAddresses = self._setFromAttribute(
-                        value.get(dsattributes.kDSNAttrEMailAddress),
-                        lower=True)
-
-                    # Special case for groups, which have members.
-                    if recordType == self.recordType_groups:
-                        memberGUIDs = value.get(dsattributes.kDSNAttrGroupMembers)
-                        if memberGUIDs is None:
-                            memberGUIDs = ()
-                        elif type(memberGUIDs) is str:
-                            memberGUIDs = (memberGUIDs,)
-                        nestedGUIDs = value.get(dsattributes.kDSNAttrNestedGroups)
-                        if nestedGUIDs:
-                            if type(nestedGUIDs) is str:
-                                nestedGUIDs = (nestedGUIDs,)
-                            memberGUIDs += tuple(nestedGUIDs)
-                        else:
-                            nestedGUIDs = ()
-                    else:
-                        nestedGUIDs = ()
-                        memberGUIDs = ()
-
-                    # Create records but don't store them in our index or
-                    # send them to memcached, because these are transient,
-                    # existing only so we can create principal resource
-                    # objects that are used to generate the REPORT result.
-
-                    record = OpenDirectoryRecord(
-                        service=self,
-                        recordType=recordType,
-                        guid=recordGUID,
-                        nodeName=recordNodeName,
-                        shortNames=recordShortNames,
-                        authIDs=recordAuthIDs,
-                        fullName=recordFullName,
-                        firstName=recordFirstName,
-                        lastName=recordLastName,
-                        emailAddresses=recordEmailAddresses,
-                        memberGUIDs=memberGUIDs,
-                        nestedGUIDs=nestedGUIDs,
-                        extProxies=(),
-                        extReadOnlyProxies=(),
-                    )
-
-                    # (Copied from below)
-                    # Look up augment information
-                    # TODO: this needs to be deferred but for now we hard code
-                    # the deferred result because we know it is completing
-                    # immediately.
-                    if self.augmentService is not None:
-                        d = self.augmentService.getAugmentRecord(record.guid,
-                            recordType)
-                        d.addCallback(lambda x: record.addAugmentInformation(x))
-
-                    yield record
-
-                except KeyError:
-                    pass
-
-
-        def multiQuery(directory, queries, attrs, operand):
-            byGUID = {}
-            sets = []
-
-            for query, recordTypes in queries.iteritems():
-                ODField, value, caseless, matchType = query
-                if matchType == "starts-with":
-                    comparison = dsattributes.eDSStartsWith
-                elif matchType == "contains":
-                    comparison = dsattributes.eDSContains
-                else:
-                    comparison = dsattributes.eDSExact
-
-                self.log.debug("Calling OD: Types %s, Field %s, Value %s, Match %s, Caseless %s" %
-                    (recordTypes, ODField, value, matchType, caseless))
-
-                try:
-                    queryResults = lookupMethod(
-                        directory,
-                        ODField,
-                        value,
-                        comparison,
-                        caseless,
-                        recordTypes,
-                        attrs,
-                    )
-
-                    if operand == dsquery.expression.OR:
-                        for recordName, data in queryResults:
-                            guid = data.get(dsattributes.kDS1AttrGeneratedUID, None)
-                            if guid:
-                                byGUID[guid] = (recordName, data)
-                    else: # AND
-                        newSet = set()
-                        for recordName, data in queryResults:
-                            guid = data.get(dsattributes.kDS1AttrGeneratedUID, None)
-                            if guid:
-                                byGUID[guid] = (recordName, data)
-                                newSet.add(guid)
-
-                        sets.append(newSet)
-
-                except self.odModule.ODError, e:
-                    self.log.error("Ignoring OD Error: %d %s" %
-                        (e.message[1], e.message[0]))
-                    continue
-
-            if operand == dsquery.expression.OR:
-                return byGUID.values()
-
-            else:
-                results = []
-                for guid in set.intersection(*sets):
-                    recordName, data = byGUID.get(guid, None)
-                    if data is not None:
-                        results.append((data[dsattributes.kDSNAttrRecordName], data))
-                return results
-
-        operand = (dsquery.expression.OR if operand == "or"
-            else dsquery.expression.AND)
-
-        if recordType is None:
-            # The client is looking for records in any of the four types
-            recordTypes = set(self._toODRecordTypes.values())
-        else:
-            # The client is after only one recordType
-            recordTypes = [self._toODRecordTypes[recordType]]
-
-        queries = buildQueries(recordTypes, fields, self._ODFields)
-
-        results = multiQuery(
-            self.directory,
-            queries,
-            [
-                dsattributes.kDS1AttrGeneratedUID,
-                dsattributes.kDSNAttrRecordName,
-                dsattributes.kDSNAttrAltSecurityIdentities,
-                dsattributes.kDSNAttrRecordType,
-                dsattributes.kDS1AttrDistinguishedName,
-                dsattributes.kDS1AttrFirstName,
-                dsattributes.kDS1AttrLastName,
-                dsattributes.kDSNAttrEMailAddress,
-                dsattributes.kDSNAttrMetaNodeLocation,
-                dsattributes.kDSNAttrGroupMembers,
-                dsattributes.kDSNAttrNestedGroups,
-            ],
-            operand
-        )
-        return succeed(collectResults(results))
-
-
-    def queryDirectory(self, recordTypes, indexType, indexKey,
-        lookupMethod=None):
-
-        if lookupMethod is None:
-            lookupMethod = self.odModule.queryRecordsWithAttribute_list
-
-        origIndexKey = indexKey
-        if indexType == self.INDEX_TYPE_CUA:
-            # The directory doesn't contain CUAs, so we need to convert
-            # the CUA to the appropriate field name and value:
-            queryattr, indexKey = cuAddressConverter(indexKey)
-            # queryattr will be one of:
-            # guid, emailAddresses, or recordName
-            # ...which will need to be mapped to DS
-            queryattr = self._ODFields[queryattr]['odField']
-
-        else:
-            queryattr = {
-                self.INDEX_TYPE_SHORTNAME : dsattributes.kDSNAttrRecordName,
-                self.INDEX_TYPE_GUID      : dsattributes.kDS1AttrGeneratedUID,
-                self.INDEX_TYPE_AUTHID    : dsattributes.kDSNAttrAltSecurityIdentities,
-            }.get(indexType)
-            assert queryattr is not None, "Invalid type for record faulting query"
-        # Make all OD queries case insensitive
-        caseInsensitive = True
-
-        results = []
-        for recordType in recordTypes:
-
-            attrs = [
-                dsattributes.kDS1AttrGeneratedUID,
-                dsattributes.kDSNAttrRecordName,
-                dsattributes.kDSNAttrAltSecurityIdentities,
-                dsattributes.kDSNAttrRecordType,
-                dsattributes.kDS1AttrDistinguishedName,
-                dsattributes.kDS1AttrFirstName,
-                dsattributes.kDS1AttrLastName,
-                dsattributes.kDSNAttrEMailAddress,
-                dsattributes.kDSNAttrMetaNodeLocation,
-            ]
-
-            if recordType == DirectoryService.recordType_users:
-                listRecordTypes = [self._toODRecordTypes[recordType]]
-
-            elif recordType in (
-                DirectoryService.recordType_resources,
-                DirectoryService.recordType_locations,
-            ):
-                if queryattr == dsattributes.kDSNAttrEMailAddress:
-                    continue
-
-                listRecordTypes = [self._toODRecordTypes[recordType]]
-
-            elif recordType == DirectoryService.recordType_groups:
-
-                if queryattr == dsattributes.kDSNAttrEMailAddress:
-                    continue
-
-                listRecordTypes = [dsattributes.kDSStdRecordTypeGroups]
-                attrs.append(dsattributes.kDSNAttrGroupMembers)
-                attrs.append(dsattributes.kDSNAttrNestedGroups)
-
-            else:
-                raise UnknownRecordTypeError("Unknown OpenDirectory record type: %s" % (recordType))
-
-            # Because we're getting transient OD error -14987, try 3 times:
-            for _ignore in xrange(3):
-                try:
-                    self.log.debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
-                        self.directory,
-                        queryattr,
-                        indexKey,
-                        dsattributes.eDSExact,
-                        caseInsensitive,
-                        listRecordTypes,
-                        attrs,
-                    ))
-                    lookedUp = lookupMethod(
-                            self.directory,
-                            queryattr,
-                            indexKey,
-                            dsattributes.eDSExact,
-                            caseInsensitive,
-                            listRecordTypes,
-                            attrs,
-                        )
-                    results.extend(lookedUp)
-
-                except self.odModule.ODError, ex:
-                    if ex.message[1] == -14987:
-                        # Fall through and retry
-                        self.log.error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
-                    elif ex.message[1] == -14140 or ex.message[1] == -14200:
-                        # Unsupported attribute on record - don't fail
-                        return
-                    else:
-                        self.log.error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
-                        raise
-                else:
-                    # Success, so break the retry loop
-                    break
-
-        self.log.debug("opendirectory.queryRecordsWithAttribute_list matched records: %s" % (len(results),))
-
-        enabledRecords = []
-        disabledRecords = []
-
-        for (recordShortName, value) in results:
-
-            # Now get useful record info.
-            recordGUID = value.get(dsattributes.kDS1AttrGeneratedUID)
-            recordShortNames = self._uniqueTupleFromAttribute(value.get(dsattributes.kDSNAttrRecordName))
-            recordType = value.get(dsattributes.kDSNAttrRecordType)
-            if isinstance(recordType, list):
-                recordType = recordType[0]
-            recordAuthIDs = self._setFromAttribute(value.get(dsattributes.kDSNAttrAltSecurityIdentities))
-            recordFullName = value.get(dsattributes.kDS1AttrDistinguishedName)
-            recordFirstName = value.get(dsattributes.kDS1AttrFirstName)
-            recordLastName = value.get(dsattributes.kDS1AttrLastName)
-            recordEmailAddresses = self._setFromAttribute(value.get(dsattributes.kDSNAttrEMailAddress), lower=True)
-            recordNodeName = value.get(dsattributes.kDSNAttrMetaNodeLocation)
-
-            if not recordType:
-                self.log.debug("Record (unknown)%s in node %s has no recordType; ignoring."
-                               % (recordShortName, recordNodeName))
-                continue
-
-            recordType = self._fromODRecordTypes[recordType]
-
-            if not recordGUID:
-                self.log.debug("Record (%s)%s in node %s has no GUID; ignoring."
-                               % (recordType, recordShortName, recordNodeName))
-                continue
-
-            if recordGUID.lower().startswith("ffffeeee-dddd-cccc-bbbb-aaaa"):
-                self.log.debug("Ignoring system record (%s)%s in node %s."
-                               % (recordType, recordShortName, recordNodeName))
-                continue
-
-            # If restrictToGroup is in effect, all guids which are not a member
-            # of that group are disabled (overriding the augments db).
-            if (self.restrictedGUIDs is not None):
-                unrestricted = recordGUID in self.restrictedGUIDs
-            else:
-                unrestricted = True
-
-            # Special case for groups, which have members.
-            if recordType == self.recordType_groups:
-                memberGUIDs = value.get(dsattributes.kDSNAttrGroupMembers)
-                if memberGUIDs is None:
-                    memberGUIDs = ()
-                elif type(memberGUIDs) is str:
-                    memberGUIDs = (memberGUIDs,)
-                nestedGUIDs = value.get(dsattributes.kDSNAttrNestedGroups)
-                if nestedGUIDs:
-                    if type(nestedGUIDs) is str:
-                        nestedGUIDs = (nestedGUIDs,)
-                    memberGUIDs += tuple(nestedGUIDs)
-                else:
-                    nestedGUIDs = ()
-            else:
-                memberGUIDs = ()
-                nestedGUIDs = ()
-
-            # Special case for resources and locations
-            autoSchedule = False
-            proxyGUIDs = ()
-            readOnlyProxyGUIDs = ()
-            if recordType in (DirectoryService.recordType_resources, DirectoryService.recordType_locations):
-                resourceInfo = value.get(dsattributes.kDSNAttrResourceInfo)
-                if resourceInfo is not None:
-                    if type(resourceInfo) is not str:
-                        resourceInfo = resourceInfo[0]
-                    try:
-                        autoSchedule, proxy, read_only_proxy = self.parseResourceInfo(resourceInfo, recordGUID, recordType, recordShortName)
-                    except ValueError:
-                        continue
-                    if proxy:
-                        proxyGUIDs = (proxy,)
-                    if read_only_proxy:
-                        readOnlyProxyGUIDs = (read_only_proxy,)
-
-            record = OpenDirectoryRecord(
-                service=self,
-                recordType=recordType,
-                guid=recordGUID,
-                nodeName=recordNodeName,
-                shortNames=recordShortNames,
-                authIDs=recordAuthIDs,
-                fullName=recordFullName,
-                firstName=recordFirstName,
-                lastName=recordLastName,
-                emailAddresses=recordEmailAddresses,
-                memberGUIDs=memberGUIDs,
-                nestedGUIDs=nestedGUIDs,
-                extProxies=proxyGUIDs,
-                extReadOnlyProxies=readOnlyProxyGUIDs,
-            )
-
-            # Look up augment information
-            # TODO: this needs to be deferred but for now we hard code the deferred result because
-            # we know it is completing immediately.
-            if self.augmentService is not None:
-                d = self.augmentService.getAugmentRecord(record.guid,
-                    recordType)
-                d.addCallback(lambda x: record.addAugmentInformation(x))
-
-            # Override based on ResourceInfo
-            if autoSchedule:
-                record.autoSchedule = True
-
-            if not unrestricted:
-                self.log.debug("%s is not enabled because it's not a member of group: %s" % (recordGUID, self.restrictToGroup))
-                record.enabledForCalendaring = False
-                record.enabledForAddressBooks = False
-
-            record.applySACLs()
-
-            if record.enabledForCalendaring:
-                enabledRecords.append(record)
-            else:
-                disabledRecords.append(record)
-
-        record = None
-        if len(enabledRecords) == 1:
-            record = enabledRecords[0]
-        elif len(enabledRecords) == 0 and len(disabledRecords) == 1:
-            record = disabledRecords[0]
-        elif indexType == self.INDEX_TYPE_GUID and len(enabledRecords) > 1:
-            self.log.error("Duplicate records found for GUID %s:" % (indexKey,))
-            for duplicateRecord in enabledRecords:
-                self.log.error("Duplicate: %s" % (", ".join(duplicateRecord.shortNames)))
-
-        if record:
-            if isinstance(origIndexKey, unicode):
-                origIndexKey = origIndexKey.encode("utf-8")
-            self.log.debug("Storing (%s %s) %s in internal cache" % (indexType, origIndexKey, record))
-
-            self.recordCacheForType(recordType).addRecord(record, indexType, origIndexKey)
-
-
-    def getResourceInfo(self):
-        """
-        Resource information including proxy assignments for resource and
-        locations, as well as auto-schedule settings, used to live in the
-        directory.  This method fetches old resource info for migration
-        purposes.
-        """
-        attrs = [
-            dsattributes.kDS1AttrGeneratedUID,
-            dsattributes.kDSNAttrResourceInfo,
-        ]
-
-        for recordType in (dsattributes.kDSStdRecordTypePlaces, dsattributes.kDSStdRecordTypeResources):
-            try:
-                self.log.debug("opendirectory.listAllRecordsWithAttributes_list(%r,%r,%r)" % (
-                    self.directory,
-                    recordType,
-                    attrs,
-                ))
-                results = self.odModule.listAllRecordsWithAttributes_list(
-                    self.directory,
-                    recordType,
-                    attrs,
-                )
-            except self.odModule.ODError, ex:
-                self.log.error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
-                raise
-
-            for (recordShortName, value) in results:
-                recordGUID = value.get(dsattributes.kDS1AttrGeneratedUID)
-                resourceInfo = value.get(dsattributes.kDSNAttrResourceInfo)
-                if resourceInfo is not None:
-                    if type(resourceInfo) is not str:
-                        resourceInfo = resourceInfo[0]
-                    try:
-                        autoSchedule, proxy, readOnlyProxy = self.parseResourceInfo(resourceInfo,
-                            recordGUID, recordType, recordShortName)
-                    except ValueError:
-                        continue
-                    yield recordGUID, autoSchedule, proxy, readOnlyProxy
-
-
-    def isAvailable(self):
-        """
-        Returns True if all configured directory nodes are accessible, False otherwise
-        """
-
-        if self.node == "/Search":
-            result = self.odModule.getNodeAttributes(self.directory, "/Search",
-                (dsattributes.kDS1AttrSearchPath,))
-            nodes = result[dsattributes.kDS1AttrSearchPath]
-        else:
-            nodes = [self.node]
-
-        try:
-            for node in nodes:
-                self.odModule.getNodeAttributes(self.directory, node, [dsattributes.kDSNAttrNodePath])
-        except self.odModule.ODError:
-            self.log.warn("OpenDirectory Node %s not available" % (node,))
-            return False
-
-        return True
-
-
-    @inlineCallbacks
-    def getGroups(self, guids):
-        """
-        Returns a set of group records for the list of guids passed in.  For
-        any group that also contains subgroups, those subgroups' records are
-        also returned, and so on.
-        """
-
-        recordsByGUID = {}
-        valuesToFetch = guids
-
-        loop = 1
-        while valuesToFetch:
-            self.log.debug("getGroups loop %d" % (loop,))
-
-            results = []
-
-            for batch in splitIntoBatches(valuesToFetch, self.batchSize):
-                fields = []
-                for value in batch:
-                    fields.append(["guid", value, False, "equals"])
-                self.log.debug("getGroups fetching batch of %d" %
-                    (len(fields),))
-                result = list((yield self.recordsMatchingFields(fields,
-                    recordType=self.recordType_groups)))
-                results.extend(result)
-                self.log.debug("getGroups got back batch of %d for subtotal of %d" %
-                    (len(result), len(results)))
-
-            # Reset values for next iteration
-            valuesToFetch = set()
-
-            for record in results:
-                guid = record.guid
-                if guid not in recordsByGUID:
-                    recordsByGUID[guid] = record
-
-                # record.nestedGUIDs() contains the sub groups of this group
-                for memberGUID in record.nestedGUIDs():
-                    if memberGUID not in recordsByGUID:
-                        self.log.debug("getGroups group %s contains group %s" %
-                            (record.guid, memberGUID))
-                        valuesToFetch.add(memberGUID)
-
-            loop += 1
-
-        returnValue(recordsByGUID.values())
-
-
-
-def buildQueries(recordTypes, fields, mapping):
-    """
-    Determine how many queries need to be performed in order to work around opendirectory
-    quirks, where searching on fields that don't apply to a given recordType returns incorrect
-    results (either none, or all records).
-    """
-
-    queries = {}
-    for recordType in recordTypes:
-        for field, value, caseless, matchType in fields:
-            if field in mapping:
-                if recordType in mapping[field]['appliesTo']:
-                    ODField = mapping[field]['odField']
-                    key = (ODField, value, caseless, matchType)
-                    queries.setdefault(key, []).append(recordType)
-
-    return queries
-
-
-
-def buildLocalQueriesFromTokens(tokens, mapping):
-    """
-    OD /Local doesn't support nested complex queries, so create a list of
-    complex queries that will be ANDed together in recordsMatchingTokens()
-
-    @param tokens: The tokens to search on
-    @type tokens: C{list} of C{str}
-    @param mapping: The mapping of DirectoryRecord attributes to OD attributes
-    @type mapping: C{dict}
-    @return: A list of expression objects
-    @type: C{list}
-    """
-
-    if len(tokens) == 0:
-        return None
-
-    fields = [
-        ("fullName", dsattributes.eDSContains),
-        ("emailAddresses", dsattributes.eDSStartsWith),
-    ]
-
-    results = []
-    for token in tokens:
-        queries = []
-        for field, comparison in fields:
-            ODField = mapping[field]['odField']
-            query = dsquery.match(ODField, token, comparison)
-            queries.append(query)
-        results.append(dsquery.expression(dsquery.expression.OR, queries))
-    return results
-
-
-
-def buildNestedQueryFromTokens(tokens, mapping):
-    """
-    Build a DS query espression such that all the tokens must appear in either
-    the fullName (anywhere), emailAddresses (at the beginning) or record name
-    (at the beginning).
-
-    @param tokens: The tokens to search on
-    @type tokens: C{list} of C{str}
-    @param mapping: The mapping of DirectoryRecord attributes to OD attributes
-    @type mapping: C{dict}
-    @return: The nested expression object
-    @type: dsquery.expression
-    """
-
-    if len(tokens) == 0:
-        return None
-
-    fields = [
-        ("fullName", dsattributes.eDSContains),
-        ("emailAddresses", dsattributes.eDSStartsWith),
-        ("recordName", dsattributes.eDSStartsWith),
-    ]
-
-    outer = []
-    for token in tokens:
-        inner = []
-        for field, comparison in fields:
-            ODField = mapping[field]['odField']
-            query = dsquery.match(ODField, token, comparison)
-            inner.append(query)
-        outer.append(dsquery.expression(dsquery.expression.OR, inner))
-    return dsquery.expression(dsquery.expression.AND, outer)
-
-
-
-class OpenDirectoryRecord(CachingDirectoryRecord):
-    """
-    OpenDirectory implementation of L{IDirectoryRecord}.
-    """
-    def __init__(
-        self, service, recordType, guid, nodeName, shortNames, authIDs,
-        fullName, firstName, lastName, emailAddresses, memberGUIDs, nestedGUIDs,
-        extProxies, extReadOnlyProxies,
-    ):
-        super(OpenDirectoryRecord, self).__init__(
-            service=service,
-            recordType=recordType,
-            guid=guid,
-            shortNames=shortNames,
-            authIDs=authIDs,
-            fullName=fullName,
-            firstName=firstName,
-            lastName=lastName,
-            emailAddresses=emailAddresses,
-            extProxies=extProxies,
-            extReadOnlyProxies=extReadOnlyProxies,
-        )
-        self.nodeName = nodeName
-
-        self._memberGUIDs = tuple(memberGUIDs)
-        self._nestedGUIDs = tuple(nestedGUIDs)
-        self._groupMembershipGUIDs = None
-
-
-    def __repr__(self):
-        if self.service.realmName == self.nodeName:
-            location = self.nodeName
-        else:
-            location = "%s->%s" % (self.service.realmName, self.nodeName)
-
-        return "<%s[%s@%s(%s)] %s(%s) %r>" % (
-            self.__class__.__name__,
-            self.recordType,
-            self.service.guid,
-            location,
-            self.guid,
-            ",".join(self.shortNames),
-            self.fullName
-        )
-
-
-    def members(self):
-        if self.recordType != self.service.recordType_groups:
-            return
-
-        for guid in self._memberGUIDs:
-            userRecord = self.service.recordWithGUID(guid)
-            if userRecord is not None:
-                yield userRecord
-
-
-    def groups(self):
-        if self._groupMembershipGUIDs is None:
-            self._groupMembershipGUIDs = self.service.groupsForGUID(self.guid)
-
-        for guid in self._groupMembershipGUIDs:
-            record = self.service.recordWithGUID(guid)
-            if record:
-                yield record
-
-
-    def memberGUIDs(self):
-        return set(self._memberGUIDs)
-
-
-    def nestedGUIDs(self):
-        return set(self._nestedGUIDs)
-
-
-    def verifyCredentials(self, credentials):
-        if isinstance(credentials, UsernamePassword):
-            # Check cached password
-            try:
-                if credentials.password == self.password:
-                    return True
-            except AttributeError:
-                pass
-
-            # Check with directory services
-            try:
-                if self.service.odModule.authenticateUserBasic(self.service.directory, self.nodeName, self.shortNames[0], credentials.password):
-                    # Cache the password to avoid future DS queries
-                    self.password = credentials.password
-                    return True
-            except self.service.odModule.ODError, e:
-                self.log.error("OpenDirectory (node=%s) error while performing basic authentication for user %s: %s"
-                            % (self.service.realmName, self.shortNames[0], e))
-
-            return False
-
-        elif isinstance(credentials, DigestedCredentials):
-            #
-            # We need a special format for the "challenge" and "response" strings passed into OpenDirectory, as it is
-            # picky about exactly what it receives.
-            #
-            try:
-                if "algorithm" not in credentials.fields:
-                    credentials.fields["algorithm"] = "md5"
-                challenge = 'Digest realm="%(realm)s", nonce="%(nonce)s", algorithm=%(algorithm)s' % credentials.fields
-                response = (
-                    'Digest username="%(username)s", '
-                    'realm="%(realm)s", '
-                    'nonce="%(nonce)s", '
-                    'uri="%(uri)s", '
-                    'response="%(response)s",'
-                    'algorithm=%(algorithm)s'
-                ) % credentials.fields
-            except KeyError, e:
-                self.log.error(
-                    "OpenDirectory (node=%s) error while performing digest authentication for user %s: "
-                    "missing digest response field: %s in: %s"
-                    % (self.service.realmName, self.shortNames[0], e, credentials.fields)
-                )
-                return False
-
-            try:
-                if self.digestcache[credentials.fields["uri"]] == response:
-                    return True
-            except (AttributeError, KeyError):
-                pass
-
-            try:
-                if self.service.odModule.authenticateUserDigest(
-                    self.service.directory,
-                    self.nodeName,
-                    self.shortNames[0],
-                    challenge,
-                    response,
-                    credentials.method
-                ):
-                    try:
-                        cache = self.digestcache
-                    except AttributeError:
-                        cache = self.digestcache = {}
-
-                    cache[credentials.fields["uri"]] = response
-
-                    return True
-                else:
-                    self.log.debug(
-"""OpenDirectory digest authentication failed with:
-    Nodename:  %s
-    Username:  %s
-    Challenge: %s
-    Response:  %s
-    Method:    %s
-""" % (self.nodeName, self.shortNames[0], challenge, response,
-       credentials.method))
-
-            except self.service.odModule.ODError, e:
-                self.log.error(
-                    "OpenDirectory (node=%s) error while performing digest authentication for user %s: %s"
-                    % (self.service.realmName, self.shortNames[0], e)
-                )
-                return False
-
-            return False
-
-        return super(OpenDirectoryRecord, self).verifyCredentials(credentials)
-
-
-
-class OpenDirectoryInitError(DirectoryError):
-    """
-    OpenDirectory initialization error.
-    """

Modified: CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/augment.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/augment.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/augment.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -46,6 +46,7 @@
     "automatic",
 ))
 
+
 class AugmentRecord(object):
     """
     Augmented directory record information
@@ -59,7 +60,7 @@
         enabledForCalendaring=False,
         autoSchedule=False,
         autoScheduleMode="default",
-        autoAcceptGroup="",
+        autoAcceptGroup=None,
         enabledForAddressBooks=False,
         enabledForLogin=True,
     ):
@@ -75,13 +76,15 @@
         self.clonedFromDefault = False
 
 recordTypesMap = {
-    "users" : "User",
-    "groups" : "Group",
-    "locations" : "Location",
-    "resources" : "Resource",
-    "addresses" : "Address",
+    "users": "User",
+    "groups": "Group",
+    "locations": "Location",
+    "resources": "Resource",
+    "addresses": "Address",
+    "wikis": "Wiki",
 }
 
+
 class AugmentDB(object):
     """
     Abstract base class for an augment record database.
@@ -128,7 +131,6 @@
 
         @return: L{Deferred}
         """
-
         recordType = recordTypesMap[recordType]
 
         result = (yield self._lookupAugmentRecord(uid))
@@ -266,9 +268,9 @@
         self.xmlFiles = [fullServerPath(config.DataRoot, path) for path in xmlFiles]
         self.xmlFileStats = {}
         for path in self.xmlFiles:
-            self.xmlFileStats[path] = (0, 0) # mtime, size
+            self.xmlFileStats[path] = (0, 0)  # mtime, size
 
-        self.statSeconds = statSeconds # Don't stat more often than this value
+        self.statSeconds = statSeconds  # Don't stat more often than this value
         self.lastCached = 0
         self.db = {}
 

Deleted: CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/cachingdirectory.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/cachingdirectory.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/cachingdirectory.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -1,473 +0,0 @@
-# -*- test-case-name: twistedcaldav.directory.test.test_cachedirectory -*-
-##
-# Copyright (c) 2009-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-"""
-Caching directory service implementation.
-"""
-
-__all__ = [
-    "CachingDirectoryService",
-    "CachingDirectoryRecord",
-    "DictRecordTypeCache",
-]
-
-
-import time
-
-import base64
-
-from twext.python.log import Logger
-
-from twistedcaldav.config import config
-from twistedcaldav.memcacheclient import ClientFactory, MemcacheError
-from twistedcaldav.directory.directory import DirectoryService, DirectoryRecord, DirectoryError, UnknownRecordTypeError
-from txdav.caldav.datastore.scheduling.cuaddress import normalizeCUAddr
-from twistedcaldav.directory.util import normalizeUUID
-
-
-class RecordTypeCache(object):
-    """
-    Abstract class for a record type cache. We will likely have dict and memcache implementations of this.
-    """
-
-    def __init__(self, directoryService, recordType):
-
-        self.directoryService = directoryService
-        self.recordType = recordType
-
-
-    def addRecord(self, record, indexType, indexKey, useMemcache=True,
-        neverExpire=False):
-        raise NotImplementedError()
-
-
-    def removeRecord(self, record):
-        raise NotImplementedError()
-
-
-    def findRecord(self, indexType, indexKey):
-        raise NotImplementedError()
-
-
-
-class DictRecordTypeCache(RecordTypeCache):
-    """
-    Cache implementation using a dict, and uses memcached to share records
-    with other instances.
-    """
-    log = Logger()
-
-    def __init__(self, directoryService, recordType):
-
-        super(DictRecordTypeCache, self).__init__(directoryService, recordType)
-        self.records = set()
-        self.recordsIndexedBy = {
-            CachingDirectoryService.INDEX_TYPE_GUID     : {},
-            CachingDirectoryService.INDEX_TYPE_SHORTNAME: {},
-            CachingDirectoryService.INDEX_TYPE_CUA    : {},
-            CachingDirectoryService.INDEX_TYPE_AUTHID   : {},
-        }
-        self.directoryService = directoryService
-        self.lastPurgedTime = time.time()
-
-
-    def addRecord(self, record, indexType, indexKey, useMemcache=True,
-        neverExpire=False):
-
-        useMemcache = useMemcache and config.Memcached.Pools.Default.ClientEnabled
-        if neverExpire:
-            record.neverExpire()
-
-        self.records.add(record)
-
-        # Also index/cache on guid
-        indexTypes = [(indexType, indexKey)]
-        if indexType != CachingDirectoryService.INDEX_TYPE_GUID:
-            indexTypes.append((CachingDirectoryService.INDEX_TYPE_GUID,
-                record.guid))
-
-        for indexType, indexKey in indexTypes:
-            self.recordsIndexedBy[indexType][indexKey] = record
-            if useMemcache:
-                key = self.directoryService.generateMemcacheKey(indexType, indexKey,
-                    record.recordType)
-                self.log.debug("Memcache: storing %s" % (key,))
-                try:
-                    self.directoryService.memcacheSet(key, record)
-                except DirectoryMemcacheError:
-                    self.log.error("Memcache: failed to store %s" % (key,))
-                    pass
-
-
-    def removeRecord(self, record):
-        if record in self.records:
-            self.records.remove(record)
-            self.log.debug("Removed record %s" % (record.guid,))
-            for indexType in self.directoryService.indexTypes():
-                try:
-                    indexData = getattr(record, CachingDirectoryService.indexTypeToRecordAttribute[indexType])
-                except AttributeError:
-                    continue
-                if isinstance(indexData, basestring):
-                    indexData = [indexData]
-                for item in indexData:
-                    try:
-                        del self.recordsIndexedBy[indexType][item]
-                    except KeyError:
-                        pass
-
-
-    def findRecord(self, indexType, indexKey):
-        self.purgeExpiredRecords()
-        return self.recordsIndexedBy[indexType].get(indexKey)
-
-
-    def purgeExpiredRecords(self):
-        """
-        Scan the cached records and remove any that have expired.
-        Does nothing if we've scanned within the past cacheTimeout seconds.
-        """
-        if time.time() - self.lastPurgedTime > self.directoryService.cacheTimeout:
-            for record in list(self.records):
-                if record.isExpired():
-                    self.removeRecord(record)
-            self.lastPurgedTime = time.time()
-
-
-
-class CachingDirectoryService(DirectoryService):
-    """
-    Caching Directory implementation of L{IDirectoryService}.
-
-    This is class must be overridden to provide a concrete implementation.
-    """
-    log = Logger()
-
-    INDEX_TYPE_GUID = "guid"
-    INDEX_TYPE_SHORTNAME = "shortname"
-    INDEX_TYPE_CUA = "cua"
-    INDEX_TYPE_AUTHID = "authid"
-
-    indexTypeToRecordAttribute = {
-        "guid"     : "guid",
-        "shortname": "shortNames",
-        "cua"      : "calendarUserAddresses",
-        "authid"   : "authIDs",
-    }
-
-    def __init__(
-        self,
-        cacheTimeout=1,
-        negativeCaching=False,
-        cacheClass=DictRecordTypeCache,
-    ):
-        """
-        @param cacheTimeout: C{int} number of minutes before cache is invalidated.
-        """
-
-        self.cacheTimeout = cacheTimeout * 60
-        self.negativeCaching = negativeCaching
-
-        self.cacheClass = cacheClass
-        self._initCaches()
-
-        super(CachingDirectoryService, self).__init__()
-
-
-    def _getMemcacheClient(self, refresh=False):
-        if refresh or not hasattr(self, "memcacheClient"):
-            self.memcacheClient = ClientFactory.getClient(['%s:%s' %
-                (config.Memcached.Pools.Default.BindAddress, config.Memcached.Pools.Default.Port)],
-                debug=0, pickleProtocol=2)
-        return self.memcacheClient
-
-
-    def memcacheSet(self, key, record):
-
-        hideService = isinstance(record, DirectoryRecord)
-
-        try:
-            if hideService:
-                record.service = None # so we don't pickle service
-
-            key = base64.b64encode(key)
-            if not self._getMemcacheClient().set(key, record, time=self.cacheTimeout):
-                self.log.error("Could not write to memcache, retrying")
-                if not self._getMemcacheClient(refresh=True).set(
-                    key, record,
-                    time=self.cacheTimeout
-                ):
-                    self.log.error("Could not write to memcache again, giving up")
-                    del self.memcacheClient
-                    raise DirectoryMemcacheError("Failed to write to memcache")
-        finally:
-            if hideService:
-                record.service = self
-
-
-    def memcacheGet(self, key):
-
-        key = base64.b64encode(key)
-        try:
-            record = self._getMemcacheClient().get(key)
-            if record is not None and isinstance(record, DirectoryRecord):
-                record.service = self
-        except MemcacheError:
-            self.log.error("Could not read from memcache, retrying")
-            try:
-                record = self._getMemcacheClient(refresh=True).get(key)
-                if record is not None and isinstance(record, DirectoryRecord):
-                    record.service = self
-            except MemcacheError:
-                self.log.error("Could not read from memcache again, giving up")
-                del self.memcacheClient
-                raise DirectoryMemcacheError("Failed to read from memcache")
-        return record
-
-
-    def generateMemcacheKey(self, indexType, indexKey, recordType):
-        """
-        Return a key that can be used to store/retrieve a record in memcache.
-        if short-name is the indexType the recordType be encoded into the key.
-
-        @param indexType: one of the indexTypes( ) values
-        @type indexType: C{str}
-        @param indexKey: the value being indexed
-        @type indexKey: C{str}
-        @param recordType: the type of record being cached
-        @type recordType: C{str}
-        @return: a memcache key comprised of the passed-in values and the directory
-            service's baseGUID
-        @rtype: C{str}
-        """
-        keyVersion = 2
-        if indexType == CachingDirectoryService.INDEX_TYPE_SHORTNAME:
-            return "dir|v%d|%s|%s|%s|%s" % (keyVersion, self.baseGUID, recordType,
-                indexType, indexKey)
-        else:
-            return "dir|v%d|%s|%s|%s" % (keyVersion, self.baseGUID, indexType,
-                indexKey)
-
-
-    def _initCaches(self):
-        self._recordCaches = dict([
-            (recordType, self.cacheClass(self, recordType))
-            for recordType in self.recordTypes()
-        ])
-
-        self._disabledKeys = dict([(indexType, dict()) for indexType in self.indexTypes()])
-
-
-    def indexTypes(self):
-
-        return (
-            CachingDirectoryService.INDEX_TYPE_GUID,
-            CachingDirectoryService.INDEX_TYPE_SHORTNAME,
-            CachingDirectoryService.INDEX_TYPE_CUA,
-            CachingDirectoryService.INDEX_TYPE_AUTHID,
-        )
-
-
-    def recordCacheForType(self, recordType):
-        try:
-            return self._recordCaches[recordType]
-        except KeyError:
-            raise UnknownRecordTypeError(recordType)
-
-
-    def listRecords(self, recordType):
-        return self.recordCacheForType(recordType).records
-
-
-    def recordWithShortName(self, recordType, shortName):
-        return self._lookupRecord((recordType,), CachingDirectoryService.INDEX_TYPE_SHORTNAME, shortName)
-
-
-    def recordWithCalendarUserAddress(self, address):
-        address = normalizeCUAddr(address)
-        record = None
-        if address.startswith("mailto:"):
-            record = self._lookupRecord(None, CachingDirectoryService.INDEX_TYPE_CUA, address)
-            return record if record and record.enabledForCalendaring else None
-        else:
-            return DirectoryService.recordWithCalendarUserAddress(self, address)
-
-
-    def recordWithAuthID(self, authID):
-        return self._lookupRecord(None, CachingDirectoryService.INDEX_TYPE_AUTHID, authID)
-
-
-    def recordWithGUID(self, guid):
-        guid = normalizeUUID(guid)
-        return self._lookupRecord(None, CachingDirectoryService.INDEX_TYPE_GUID, guid)
-
-    recordWithUID = recordWithGUID
-
-    def _lookupRecord(self, recordTypes, indexType, indexKey):
-
-        if recordTypes is None:
-            recordTypes = self.recordTypes()
-        else:
-            # Only use recordTypes this service supports:
-            supportedRecordTypes = self.recordTypes()
-            recordTypes = [t for t in recordTypes if t in supportedRecordTypes]
-            if not recordTypes:
-                return None
-
-        def lookup():
-            for recordType in recordTypes:
-                record = self.recordCacheForType(recordType).findRecord(indexType, indexKey)
-
-                if record:
-                    if record.isExpired():
-                        self.recordCacheForType(recordType).removeRecord(record)
-                        return None
-                    else:
-                        return record
-            else:
-                return None
-
-        record = lookup()
-        if record:
-            return record
-
-        if self.negativeCaching:
-
-            # Check negative cache (take cache entry timeout into account)
-            try:
-                disabledTime = self._disabledKeys[indexType][indexKey]
-                if time.time() - disabledTime < self.cacheTimeout:
-                    return None
-            except KeyError:
-                pass
-
-        # Check memcache
-        if config.Memcached.Pools.Default.ClientEnabled:
-
-            # The only time the recordType arg matters is when indexType is
-            # short-name, and in that case recordTypes will contain exactly
-            # one recordType, so using recordTypes[0] here is always safe:
-            key = self.generateMemcacheKey(indexType, indexKey, recordTypes[0])
-
-            self.log.debug("Memcache: checking %s" % (key,))
-
-            try:
-                record = self.memcacheGet(key)
-            except DirectoryMemcacheError:
-                self.log.error("Memcache: failed to get %s" % (key,))
-                record = None
-
-            if record is None:
-                self.log.debug("Memcache: miss %s" % (key,))
-            else:
-                self.log.debug("Memcache: hit %s" % (key,))
-                self.recordCacheForType(record.recordType).addRecord(record, indexType, indexKey, useMemcache=False)
-                return record
-
-            if self.negativeCaching:
-
-                # Check negative memcache
-                try:
-                    val = self.memcacheGet("-%s" % (key,))
-                except DirectoryMemcacheError:
-                    self.log.error("Memcache: failed to get -%s" % (key,))
-                    val = None
-                if val == 1:
-                    self.log.debug("Memcache: negative %s" % (key,))
-                    self._disabledKeys[indexType][indexKey] = time.time()
-                    return None
-
-        # Try query
-        self.log.debug("Faulting record for attribute '%s' with value '%s'" % (indexType, indexKey,))
-        self.queryDirectory(recordTypes, indexType, indexKey)
-
-        # Now try again from cache
-        record = lookup()
-        if record:
-            self.log.debug("Found record for attribute '%s' with value '%s'" % (indexType, indexKey,))
-            return record
-
-        if self.negativeCaching:
-
-            # Add to negative cache with timestamp
-            self.log.debug("Failed to fault record for attribute '%s' with value '%s'" % (indexType, indexKey,))
-            self._disabledKeys[indexType][indexKey] = time.time()
-
-            if config.Memcached.Pools.Default.ClientEnabled:
-                self.log.debug("Memcache: storing (negative) %s" % (key,))
-                try:
-                    self.memcacheSet("-%s" % (key,), 1)
-                except DirectoryMemcacheError:
-                    self.log.error("Memcache: failed to set -%s" % (key,))
-                    pass
-
-        return None
-
-
-    def queryDirectory(self, recordTypes, indexType, indexKey):
-        raise NotImplementedError()
-
-
-
-class CachingDirectoryRecord(DirectoryRecord):
-
-    def __init__(
-        self, service, recordType, guid,
-        shortNames=(), authIDs=set(),
-        fullName=None, firstName=None, lastName=None, emailAddresses=set(),
-        uid=None, **kwargs
-    ):
-        super(CachingDirectoryRecord, self).__init__(
-            service,
-            recordType,
-            guid,
-            shortNames=shortNames,
-            authIDs=authIDs,
-            fullName=fullName,
-            firstName=firstName,
-            lastName=lastName,
-            emailAddresses=emailAddresses,
-            uid=uid,
-            **kwargs
-        )
-
-        self.cachedTime = time.time()
-
-
-    def neverExpire(self):
-        self.cachedTime = 0
-
-
-    def isExpired(self):
-        """
-        Returns True if this record was created more than cacheTimeout
-        seconds ago
-        """
-        if (
-            self.cachedTime != 0 and
-            time.time() - self.cachedTime > self.service.cacheTimeout
-        ):
-            return True
-        else:
-            return False
-
-
-
-class DirectoryMemcacheError(DirectoryError):
-    """
-    Error communicating with memcached.
-    """

Modified: CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/calendar.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/calendar.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/calendar.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -35,11 +35,10 @@
 from twisted.internet.defer import succeed, inlineCallbacks, returnValue
 
 from twistedcaldav.config import config
-from twistedcaldav.directory.idirectory import IDirectoryService
 from twistedcaldav.directory.common import uidsResourceName, \
     CommonUIDProvisioningResource, CommonHomeTypeProvisioningResource
 
-from twistedcaldav.directory.wiki import getWikiACL
+from txdav.who.wiki import getWikiACL
 from twistedcaldav.extensions import ReadOnlyResourceMixIn, DAVResource, \
     DAVResourceWithChildrenMixin
 from twistedcaldav.resource import CalendarHomeResource
@@ -48,7 +47,10 @@
 
 log = Logger()
 
+
 # FIXME: copied from resource.py to avoid circular dependency
+
+
 class CalDAVComplianceMixIn(object):
     def davComplianceClasses(self):
         return (
@@ -65,7 +67,7 @@
     DAVResource,
 ):
     def defaultAccessControlList(self):
-        return config.ProvisioningResourceACL
+        return succeed(config.ProvisioningResourceACL)
 
 
     def etag(self):
@@ -91,7 +93,8 @@
 
         super(DirectoryCalendarHomeProvisioningResource, self).__init__()
 
-        self.directory = IDirectoryService(directory)
+        # MOVE2WHO
+        self.directory = directory  # IDirectoryService(directory)
         self._url = url
         self._newStore = store
 
@@ -101,9 +104,27 @@
         #
         # Create children
         #
-        for recordType in self.directory.recordTypes():
-            self.putChild(recordType, DirectoryCalendarHomeTypeProvisioningResource(self, recordType))
+        # ...just users, locations, and resources though.  If we iterate all of
+        # the directory's recordTypes, we also get the proxy sub principal types
+        # and other things which don't have calendars.
 
+        self.supportedChildTypes = (
+            self.directory.recordType.user,
+            self.directory.recordType.location,
+            self.directory.recordType.resource,
+        )
+
+        for recordType, recordTypeName in [
+            (r, self.directory.recordTypeToOldName(r)) for r in
+            self.supportedChildTypes
+        ]:
+            self.putChild(
+                recordTypeName,
+                DirectoryCalendarHomeTypeProvisioningResource(
+                    self, recordTypeName, recordType
+                )
+            )
+
         self.putChild(uidsResourceName, DirectoryCalendarHomeUIDProvisioningResource(self))
 
 
@@ -112,7 +133,10 @@
 
 
     def listChildren(self):
-        return self.directory.recordTypes()
+        return [
+            self.directory.recordTypeToOldName(r) for r in
+            self.supportedChildTypes
+        ]
 
 
     def principalCollections(self):
@@ -127,12 +151,13 @@
         return self.directory.principalCollection.principalForRecord(record)
 
 
+    @inlineCallbacks
     def homeForDirectoryRecord(self, record, request):
-        uidResource = self.getChild(uidsResourceName)
+        uidResource = yield self.getChild(uidsResourceName)
         if uidResource is None:
-            return None
+            returnValue(None)
         else:
-            return uidResource.homeResourceForRecord(record, request)
+            returnValue((yield uidResource.homeResourceForRecord(record, request)))
 
 
     ##
@@ -149,42 +174,43 @@
 
 
 class DirectoryCalendarHomeTypeProvisioningResource(
-        CommonHomeTypeProvisioningResource,
-        DirectoryCalendarProvisioningResource
-    ):
+    CommonHomeTypeProvisioningResource,
+    DirectoryCalendarProvisioningResource
+):
     """
     Resource which provisions calendar home collections of a specific
     record type as needed.
     """
-    def __init__(self, parent, recordType):
+    def __init__(self, parent, name, recordType):
         """
         @param parent: the parent of this resource
         @param recordType: the directory record type to provision.
         """
         assert parent is not None
+        assert name is not None
         assert recordType is not None
 
         super(DirectoryCalendarHomeTypeProvisioningResource, self).__init__()
 
         self.directory = parent.directory
+        self.name = name
         self.recordType = recordType
         self._parent = parent
 
 
     def url(self):
-        return joinURL(self._parent.url(), self.recordType)
+        return joinURL(self._parent.url(), self.name)
 
 
+    @inlineCallbacks
     def listChildren(self):
         if config.EnablePrincipalListings:
-
-            def _recordShortnameExpand():
-                for record in self.directory.listRecords(self.recordType):
-                    if record.enabledForCalendaring:
-                        for shortName in record.shortNames:
-                            yield shortName
-
-            return _recordShortnameExpand()
+            children = []
+            for record in (yield self.directory.recordsWithRecordType(self.recordType)):
+                if record.hasCalendars:
+                    for shortName in record.shortNames:
+                        children.append(shortName)
+            returnValue(children)
         else:
             # Not a listable collection
             raise HTTPError(responsecode.FORBIDDEN)
@@ -203,7 +229,7 @@
 
 
     def displayName(self):
-        return self.recordType
+        return self.name
 
 
     ##
@@ -220,13 +246,13 @@
 
 
 class DirectoryCalendarHomeUIDProvisioningResource (
-        CommonUIDProvisioningResource,
-        DirectoryCalendarProvisioningResource
-    ):
+    CommonUIDProvisioningResource,
+    DirectoryCalendarProvisioningResource
+):
 
     homeResourceTypeName = 'calendars'
 
-    enabledAttribute = 'enabledForCalendaring'
+    enabledAttribute = 'hasCalendars'
 
     def homeResourceCreator(self, record, transaction):
         return DirectoryCalendarHomeResource.createHomeResource(
@@ -258,7 +284,7 @@
             else:
                 # ...otherwise permissions are fixed, and are not subject to
                 # inheritance rules, etc.
-                return succeed(self.defaultAccessControlList())
+                return self.defaultAccessControlList()
 
         d = getWikiACL(self, request)
         d.addCallback(gotACL)

Modified: CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/calendaruserproxy.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/calendaruserproxy.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/calendaruserproxy.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -29,37 +29,41 @@
 
 import itertools
 import time
+import uuid
 
+from twext.python.log import Logger
+from twext.who.idirectory import RecordType as BaseRecordType
 from twisted.internet.defer import succeed, inlineCallbacks, returnValue
-from txweb2 import responsecode
-from txweb2.http import HTTPError, StatusResponse
-from txdav.xml import element as davxml
-from txdav.xml.base import dav_namespace
-from txweb2.dav.util import joinURL
-from txweb2.dav.noneprops import NonePropertyStore
+from twisted.python.modules import getModule
+from twisted.web.template import XMLFile, Element, renderer
+from twistedcaldav.config import config, fullServerPath
+from twistedcaldav.database import (
+    AbstractADBAPIDatabase, ADBAPISqliteMixin, ADBAPIPostgreSQLMixin
+)
+from twistedcaldav.directory.util import normalizeUUID
+from twistedcaldav.directory.util import (
+    formatLink, formatLinks, formatPrincipals
+)
 
-from twext.python.log import Logger
-
-from twisted.web.template import XMLFile, Element, renderer
-from twisted.python.modules import getModule
+from twistedcaldav.extensions import (
+    DAVPrincipalResource, DAVResourceWithChildrenMixin
+)
 from twistedcaldav.extensions import DirectoryElement
-from twistedcaldav.directory.principal import formatLink
-from twistedcaldav.directory.principal import formatLinks
-from twistedcaldav.directory.principal import formatPrincipals
-
-from twistedcaldav.directory.util import normalizeUUID
-from twistedcaldav.config import config, fullServerPath
-from twistedcaldav.database import AbstractADBAPIDatabase, ADBAPISqliteMixin, \
-    ADBAPIPostgreSQLMixin
-from twistedcaldav.extensions import DAVPrincipalResource, \
-    DAVResourceWithChildrenMixin
 from twistedcaldav.extensions import ReadOnlyWritePropertiesResourceMixIn
 from twistedcaldav.memcacher import Memcacher
 from twistedcaldav.resource import CalDAVComplianceMixIn
+from txdav.who.delegates import RecordType as DelegateRecordType
+from txdav.xml import element as davxml
+from txdav.xml.base import dav_namespace
+from txweb2 import responsecode
+from txweb2.dav.noneprops import NonePropertyStore
+from txweb2.dav.util import joinURL
+from txweb2.http import HTTPError, StatusResponse
 
 thisModule = getModule(__name__)
 log = Logger()
 
+
 class PermissionsMixIn (ReadOnlyWritePropertiesResourceMixIn):
     def defaultAccessControlList(self):
         aces = (
@@ -86,13 +90,13 @@
             for principal in config.AdminPrincipals
         ))
 
-        return davxml.ACL(*aces)
+        return succeed(davxml.ACL(*aces))
 
 
     def accessControlList(self, request, inheritance=True, expanding=False,
                           inherited_aces=None):
         # Permissions here are fixed, and are not subject to inheritance rules, etc.
-        return succeed(self.defaultAccessControlList())
+        return self.defaultAccessControlList()
 
 
 
@@ -119,13 +123,20 @@
         record = self.resource.parent.record
         resource = self.resource
         parent = self.resource.parent
+        try:
+            if isinstance(record.guid, uuid.UUID):
+                guid = str(record.guid).upper()
+            else:
+                guid = record.guid
+        except AttributeError:
+            guid = ""
         return tag.fillSlots(
             directoryGUID=record.service.guid,
             realm=record.service.realmName,
-            guid=record.guid,
-            recordType=record.recordType,
+            guid=guid,
+            recordType=record.service.recordTypeToOldName(record.recordType),
             shortNames=record.shortNames,
-            fullName=record.fullName,
+            fullName=record.displayName,
             principalUID=parent.principalUID(),
             principalURL=formatLink(parent.principalURL()),
             proxyPrincipalUID=resource.principalUID(),
@@ -209,9 +220,13 @@
 
     def resourceType(self):
         if self.proxyType == "calendar-proxy-read":
-            return davxml.ResourceType.calendarproxyread #@UndefinedVariable
+            return davxml.ResourceType.calendarproxyread  # @UndefinedVariable
         elif self.proxyType == "calendar-proxy-write":
-            return davxml.ResourceType.calendarproxywrite #@UndefinedVariable
+            return davxml.ResourceType.calendarproxywrite  # @UndefinedVariable
+        elif self.proxyType == "calendar-proxy-read-for":
+            return davxml.ResourceType.calendarproxyreadfor  # @UndefinedVariable
+        elif self.proxyType == "calendar-proxy-write-for":
+            return davxml.ResourceType.calendarproxywritefor  # @UndefinedVariable
         else:
             return super(CalendarUserProxyPrincipalResource, self).resourceType()
 
@@ -270,7 +285,7 @@
         principals = []
         newUIDs = set()
         for uri in members:
-            principal = self.pcollection._principalForURI(uri)
+            principal = yield self.pcollection._principalForURI(uri)
             # Invalid principals MUST result in an error.
             if principal is None or principal.principalURL() != uri:
                 raise HTTPError(StatusResponse(
@@ -282,7 +297,9 @@
             newUIDs.add(principal.principalUID())
 
         # Get the old set of UIDs
-        oldUIDs = (yield self._index().getMembers(self.uid))
+        # oldUIDs = (yield self._index().getMembers(self.uid))
+        oldPrincipals = yield self.groupMembers()
+        oldUIDs = [p.principalUID() for p in oldPrincipals]
 
         # Change membership
         yield self.setGroupMemberSetPrincipals(principals)
@@ -293,19 +310,24 @@
 
         changedUIDs = newUIDs.symmetric_difference(oldUIDs)
         for uid in changedUIDs:
-            principal = self.pcollection.principalForUID(uid)
+            principal = yield self.pcollection.principalForUID(uid)
             if principal:
                 yield principal.cacheNotifier.changed()
 
         returnValue(True)
 
 
+    @inlineCallbacks
     def setGroupMemberSetPrincipals(self, principals):
-        # Map the principals to UIDs.
-        return self._index().setGroupMembers(
-            self.uid,
-            [p.principalUID() for p in principals],
+
+        # Find our pseudo-record
+        record = yield self.parent.record.service.recordWithShortName(
+            self._recordTypeFromProxyType(),
+            self.parent.principalUID()
         )
+        # Set the members
+        memberRecords = [p.record for p in principals]
+        yield record.setMembers(memberRecords)
 
 
     ##
@@ -349,7 +371,7 @@
 
 
     @inlineCallbacks
-    def _expandMemberUIDs(self, uid=None, relatives=None, uids=None, infinity=False):
+    def _expandMemberPrincipals(self, uid=None, relatives=None, uids=None, infinity=False):
         if uid is None:
             uid = self.principalUID()
         if relatives is None:
@@ -360,14 +382,14 @@
         if uid not in uids:
             from twistedcaldav.directory.principal import DirectoryPrincipalResource
             uids.add(uid)
-            principal = self.pcollection.principalForUID(uid)
+            principal = yield self.pcollection.principalForUID(uid)
             if isinstance(principal, CalendarUserProxyPrincipalResource):
                 members = yield self._directGroupMembers()
                 for member in members:
                     if member.principalUID() not in uids:
                         relatives.add(member)
                         if infinity:
-                            yield self._expandMemberUIDs(member.principalUID(), relatives, uids, infinity=infinity)
+                            yield self._expandMemberPrincipals(member.principalUID(), relatives, uids, infinity=infinity)
             elif isinstance(principal, DirectoryPrincipalResource):
                 if infinity:
                     members = yield principal.expandedGroupMembers()
@@ -378,30 +400,45 @@
         returnValue(relatives)
 
 
+    def _recordTypeFromProxyType(self):
+        return {
+            "calendar-proxy-read": DelegateRecordType.readDelegateGroup,
+            "calendar-proxy-write": DelegateRecordType.writeDelegateGroup,
+            "calendar-proxy-read-for": DelegateRecordType.readDelegatorGroup,
+            "calendar-proxy-write-for": DelegateRecordType.writeDelegatorGroup,
+        }.get(self.proxyType)
+
+
     @inlineCallbacks
     def _directGroupMembers(self):
-        # Get member UIDs from database and map to principal resources
-        members = yield self._index().getMembers(self.uid)
-        found = []
-        for uid in members:
-            p = self.pcollection.principalForUID(uid)
-            if p:
-                # Only principals enabledForLogin can be a delegate
-                # (and groups as well)
-                if (p.record.enabledForLogin or
-                    p.record.recordType == p.record.service.recordType_groups):
-                    found.append(p)
-                # Make sure any outstanding deletion timer entries for
-                # existing principals are removed
-                yield self._index().refreshPrincipal(uid)
-            else:
-                self.log.warn("Delegate is missing from directory: %s" % (uid,))
+        """
+        Fault in the record representing the sub principal for this proxy type
+        (either read-only or read-write), then fault in the direct members of
+        that record.
+        """
+        memberPrincipals = []
+        record = yield self.parent.record.service.recordWithShortName(
+            self._recordTypeFromProxyType(),
+            self.parent.principalUID()
+        )
+        if record is not None:
+            memberRecords = yield record.members()
+            for record in memberRecords:
+                if record is not None:
+                    principal = yield self.pcollection.principalForRecord(
+                        record
+                    )
+                    if principal is not None:
+                        if (
+                            principal.record.loginAllowed or
+                            principal.record.recordType is BaseRecordType.group
+                        ):
+                            memberPrincipals.append(principal)
+        returnValue(memberPrincipals)
 
-        returnValue(found)
 
-
     def groupMembers(self):
-        return self._expandMemberUIDs()
+        return self._expandMemberPrincipals()
 
 
     @inlineCallbacks
@@ -410,18 +447,12 @@
         Return the complete, flattened set of principals belonging to this
         group.
         """
-        returnValue((yield self._expandMemberUIDs(infinity=True)))
+        returnValue((yield self._expandMemberPrincipals(infinity=True)))
 
 
     def groupMemberships(self):
-        # Get membership UIDs and map to principal resources
-        d = self._index().getMemberships(self.uid)
-        d.addCallback(lambda memberships: [
-            p for p
-            in [self.pcollection.principalForUID(uid) for uid in memberships]
-            if p
-        ])
-        return d
+        # Unlikely to ever want to put a subprincipal into a group
+        return succeed([])
 
 
     @inlineCallbacks
@@ -437,7 +468,7 @@
         @return: True if principal is a proxy (of the correct type) of our parent
         @rtype: C{boolean}
         """
-        readWrite = self.isProxyType(True) # is read-write
+        readWrite = self.isProxyType(True)  # is read-write
         if principal and self.parent in (yield principal.proxyFor(readWrite)):
             returnValue(True)
         returnValue(False)
@@ -630,7 +661,7 @@
 
             overdue = yield self._memcacher.checkDeletionTimer(principalUID)
 
-            if overdue == False:
+            if overdue is False:
                 # Do nothing
                 returnValue(None)
 
@@ -855,9 +886,9 @@
         )
         if alreadyDone is None:
             for (groupname, member) in (
-                    (yield self._db_all_values_for_sql(
-                        "select GROUPNAME, MEMBER from GROUPS"))
-                ):
+                (yield self._db_all_values_for_sql(
+                    "select GROUPNAME, MEMBER from GROUPS"))
+            ):
                 grouplist = groupname.split("#")
                 grouplist[0] = normalizeUUID(grouplist[0])
                 newGroupName = "#".join(grouplist)

Modified: CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/calendaruserproxyloader.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/calendaruserproxyloader.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/calendaruserproxyloader.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -29,7 +29,6 @@
 from twext.python.log import Logger
 
 from twistedcaldav.config import config, fullServerPath
-from twistedcaldav.directory import calendaruserproxy
 from twistedcaldav.xmlutil import readXML
 
 log = Logger()
@@ -44,6 +43,7 @@
 
 ATTRIBUTE_REPEAT = "repeat"
 
+
 class XMLCalendarUserProxyLoader(object):
     """
     XML calendar user proxy configuration file parser and loader.
@@ -52,10 +52,11 @@
         return "<%s %r>" % (self.__class__.__name__, self.xmlFile)
 
 
-    def __init__(self, xmlFile):
+    def __init__(self, xmlFile, service):
 
         self.items = []
         self.xmlFile = fullServerPath(config.DataRoot, xmlFile)
+        self.service = service
 
         # Read in XML
         try:
@@ -131,7 +132,7 @@
     @inlineCallbacks
     def updateProxyDB(self):
 
-        db = calendaruserproxy.ProxyDBService
+        db = self.service
         for item in self.items:
             guid, write_proxies, read_proxies = item
             yield db.setGroupMembers("%s#%s" % (guid, "calendar-proxy-write"), write_proxies)

Modified: CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/common.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/common.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/common.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -37,6 +37,7 @@
 
 uidsResourceName = "__uids__"
 
+
 class CommonUIDProvisioningResource(object):
     """
     Common ancestor for addressbook/calendar UID provisioning resources.
@@ -68,10 +69,10 @@
         name = record.uid
 
         if record is None:
-            log.debug("No directory record with GUID %r" % (name,))
+            log.debug("No directory record with UID %r" % (name,))
             returnValue(None)
 
-        if not getattr(record, self.enabledAttribute):
+        if not getattr(record, self.enabledAttribute, False):
             log.debug("Directory record %r is not enabled for %s" % (
                 record, self.homeResourceTypeName))
             returnValue(None)
@@ -94,7 +95,7 @@
         if name == "":
             returnValue((self, ()))
 
-        record = self.directory.recordWithUID(name)
+        record = yield self.directory.recordWithUID(name)
         if record:
             child = yield self.homeResourceForRecord(record, request)
             returnValue((child, segments[1:]))
@@ -149,7 +150,7 @@
         if name == "":
             returnValue((self, segments[1:]))
 
-        record = self.directory.recordWithShortName(self.recordType, name)
+        record = yield self.directory.recordWithShortName(self.recordType, name)
         if record is None:
             returnValue(
                 (NotFoundResource(principalCollections=self._parent.principalCollections()), [])

Modified: CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/directory-principal-resource.html
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/directory-principal-resource.html	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/directory-principal-resource.html	2014-04-04 17:20:27 UTC (rev 13158)
@@ -11,10 +11,7 @@
 GUID: <t:slot name="principalGUID"/>
 Record type: <t:slot name="recordType"/>
 Short names: <t:slot name="shortNames"/>
-Security Identities: <t:slot name="securityIDs"/>
 Full name: <t:slot name="fullName"/>
-First name: <t:slot name="firstName"/>
-Last name: <t:slot name="lastName"/>
 Email addresses:
 <t:slot name="emailAddresses" />Principal UID: <t:slot name="principalUID"/>
 Principal URL: <t:slot name="principalURL"/>

Deleted: CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/directory.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/directory.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/directory.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -1,1509 +0,0 @@
-# -*- test-case-name: twistedcaldav.directory.test -*-
-##
-# Copyright (c) 2006-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-
-"""
-Generic directory service classes.
-"""
-
-__all__ = [
-    "DirectoryService",
-    "DirectoryRecord",
-    "DirectoryError",
-    "DirectoryConfigurationError",
-    "UnknownRecordTypeError",
-    "GroupMembershipCacheUpdater",
-]
-
-from plistlib import readPlistFromString
-
-from twext.python.log import Logger
-from txweb2.dav.auth import IPrincipalCredentials
-from txweb2.dav.util import joinURL
-
-from twisted.cred.checkers import ICredentialsChecker
-from twisted.cred.error import UnauthorizedLogin
-from twisted.internet.defer import succeed, inlineCallbacks, returnValue
-from twisted.python.filepath import FilePath
-
-from twistedcaldav.config import config
-from twistedcaldav.directory.idirectory import IDirectoryService, IDirectoryRecord
-from twistedcaldav.directory.util import uuidFromName, normalizeUUID
-from twistedcaldav.memcacher import Memcacher
-from txdav.caldav.datastore.scheduling.cuaddress import normalizeCUAddr
-from txdav.caldav.datastore.scheduling.ischedule.localservers import Servers
-
-from txdav.caldav.icalendardirectoryservice import ICalendarStoreDirectoryService, \
-    ICalendarStoreDirectoryRecord
-
-from xml.parsers.expat import ExpatError
-
-from zope.interface import implements
-
-import cPickle as pickle
-import datetime
-import grp
-import itertools
-import os
-import pwd
-import sys
-import types
-from urllib import unquote
-
-log = Logger()
-
-
-class DirectoryService(object):
-    implements(IDirectoryService, ICalendarStoreDirectoryService, ICredentialsChecker)
-
-    log = Logger()
-
-    ##
-    # IDirectoryService
-    ##
-
-    realmName = None
-
-    recordType_users = "users"
-    recordType_people = "people"
-    recordType_groups = "groups"
-    recordType_locations = "locations"
-    recordType_resources = "resources"
-    recordType_addresses = "addresses"
-
-    searchContext_location = "location"
-    searchContext_resource = "resource"
-    searchContext_user = "user"
-    searchContext_group = "group"
-    searchContext_attendee = "attendee"
-
-    aggregateService = None
-
-    def _generatedGUID(self):
-        if not hasattr(self, "_guid"):
-            realmName = self.realmName
-
-            assert self.baseGUID, "Class %s must provide a baseGUID attribute" % (self.__class__.__name__,)
-
-            if realmName is None:
-                self.log.error("Directory service %s has no realm name or GUID; generated service GUID will not be unique." % (self,))
-                realmName = ""
-            else:
-                self.log.info("Directory service %s has no GUID; generating service GUID from realm name." % (self,))
-
-            self._guid = uuidFromName(self.baseGUID, realmName)
-
-        return self._guid
-
-    baseGUID = None
-    guid = property(_generatedGUID)
-
-    # Needed by twistedcaldav.directorybackedaddressbook
-    liveQuery = False
-
-    def setRealm(self, realmName):
-        self.realmName = realmName
-
-
-    def available(self):
-        """
-        By default, the directory is available.  This may return a boolean or a
-        Deferred which fires a boolean.
-
-        A return value of "False" means that the directory is currently
-        unavailable due to the service starting up.
-        """
-        return True
-    # end directorybackedaddressbook requirements
-
-    ##
-    # ICredentialsChecker
-    ##
-
-    # For ICredentialsChecker
-    credentialInterfaces = (IPrincipalCredentials,)
-
-    def requestAvatarId(self, credentials):
-        credentials = IPrincipalCredentials(credentials)
-
-        # FIXME: ?
-        # We were checking if principal is enabled; seems unnecessary in current
-        # implementation because you shouldn't have a principal object for a
-        # disabled directory principal.
-
-        if credentials.authnPrincipal is None:
-            raise UnauthorizedLogin("No such user: %s" % (credentials.credentials.username,))
-
-        # See if record is enabledForLogin
-        if not credentials.authnPrincipal.record.isLoginEnabled():
-            raise UnauthorizedLogin("User not allowed to log in: %s" %
-                (credentials.credentials.username,))
-
-        # Handle Kerberos as a separate behavior
-        try:
-            from twistedcaldav.authkerb import NegotiateCredentials
-        except ImportError:
-            NegotiateCredentials = None
-
-        if NegotiateCredentials and isinstance(credentials.credentials,
-                                               NegotiateCredentials):
-            # If we get here with Kerberos, then authentication has already succeeded
-            return (
-                credentials.authnPrincipal.principalURL(),
-                credentials.authzPrincipal.principalURL(),
-                credentials.authnPrincipal,
-                credentials.authzPrincipal,
-            )
-        else:
-            if credentials.authnPrincipal.record.verifyCredentials(credentials.credentials):
-                return (
-                    credentials.authnPrincipal.principalURL(),
-                    credentials.authzPrincipal.principalURL(),
-                    credentials.authnPrincipal,
-                    credentials.authzPrincipal,
-                )
-            else:
-                raise UnauthorizedLogin("Incorrect credentials for %s" % (credentials.credentials.username,))
-
-
-    def recordTypes(self):
-        raise NotImplementedError("Subclass must implement recordTypes()")
-
-
-    def listRecords(self, recordType):
-        raise NotImplementedError("Subclass must implement listRecords()")
-
-
-    def recordWithShortName(self, recordType, shortName):
-        for record in self.listRecords(recordType):
-            if shortName in record.shortNames:
-                return record
-        return None
-
-
-    def recordWithUID(self, uid):
-        uid = normalizeUUID(uid)
-        for record in self.allRecords():
-            if record.uid == uid:
-                return record
-        return None
-
-
-    def recordWithGUID(self, guid):
-        guid = normalizeUUID(guid)
-        for record in self.allRecords():
-            if record.guid == guid:
-                return record
-        return None
-
-
-    def recordWithAuthID(self, authID):
-        for record in self.allRecords():
-            if authID in record.authIDs:
-                return record
-        return None
-
-
-    def recordWithCalendarUserAddress(self, address):
-        address = normalizeCUAddr(address)
-        record = None
-        if address.startswith("urn:uuid:"):
-            guid = address[9:]
-            record = self.recordWithGUID(guid)
-        elif address.startswith("mailto:"):
-            for record in self.allRecords():
-                if address[7:] in record.emailAddresses:
-                    break
-            else:
-                return None
-        elif address.startswith("/principals/"):
-            parts = map(unquote, address.split("/"))
-            if len(parts) == 4:
-                if parts[2] == "__uids__":
-                    guid = parts[3]
-                    record = self.recordWithGUID(guid)
-                else:
-                    record = self.recordWithShortName(parts[2], parts[3])
-
-        return record if record and record.enabledForCalendaring else None
-
-
-    def recordWithCachedGroupsAlias(self, recordType, alias):
-        """
-        @param recordType: the type of the record to look up.
-        @param alias: the cached-groups alias of the record to look up.
-        @type alias: C{str}
-
-        @return: a deferred L{IDirectoryRecord} with the given cached-groups
-            alias, or C{None} if no such record is found.
-        """
-        # The default implementation uses guid
-        return succeed(self.recordWithGUID(alias))
-
-
-    def allRecords(self):
-        for recordType in self.recordTypes():
-            for record in self.listRecords(recordType):
-                yield record
-
-
-    def recordsMatchingFieldsWithCUType(self, fields, operand="or",
-        cuType=None):
-        if cuType:
-            recordType = DirectoryRecord.fromCUType(cuType)
-        else:
-            recordType = None
-
-        return self.recordsMatchingFields(fields, operand=operand,
-            recordType=recordType)
-
-
-    def recordTypesForSearchContext(self, context):
-        """
-        Map calendarserver-principal-search REPORT context value to applicable record types
-
-        @param context: The context value to map
-        @type context: C{str}
-        @returns: The list of record types the context maps to
-        @rtype: C{list} of C{str}
-        """
-        if context == self.searchContext_location:
-            recordTypes = [self.recordType_locations]
-        elif context == self.searchContext_resource:
-            recordTypes = [self.recordType_resources]
-        elif context == self.searchContext_user:
-            recordTypes = [self.recordType_users]
-        elif context == self.searchContext_group:
-            recordTypes = [self.recordType_groups]
-        elif context == self.searchContext_attendee:
-            recordTypes = [self.recordType_users, self.recordType_groups,
-                self.recordType_resources]
-        else:
-            recordTypes = list(self.recordTypes())
-        return recordTypes
-
-
-    def recordsMatchingTokens(self, tokens, context=None):
-        """
-        @param tokens: The tokens to search on
-        @type tokens: C{list} of C{str} (utf-8 bytes)
-        @param context: An indication of what the end user is searching
-            for; "attendee", "location", or None
-        @type context: C{str}
-        @return: a deferred sequence of L{IDirectoryRecord}s which
-            match the given tokens and optional context.
-
-        Each token is searched for within each record's full name and
-        email address; if each token is found within a record that
-        record is returned in the results.
-
-        If context is None, all record types are considered.  If
-        context is "location", only locations are considered.  If
-        context is "attendee", only users, groups, and resources
-        are considered.
-        """
-
-        # Default, bruteforce method; override with one optimized for each
-        # service
-
-        def fieldMatches(fieldValue, value):
-            if fieldValue is None:
-                return False
-            elif type(fieldValue) in types.StringTypes:
-                fieldValue = (fieldValue,)
-
-            for testValue in fieldValue:
-                testValue = testValue.lower()
-                value = value.lower()
-
-                try:
-                    testValue.index(value)
-                    return True
-                except ValueError:
-                    pass
-
-            return False
-
-        def recordMatches(record):
-            for token in tokens:
-                for fieldName in ["fullName", "emailAddresses"]:
-                    try:
-                        fieldValue = getattr(record, fieldName)
-                        if fieldMatches(fieldValue, token):
-                            break
-                    except AttributeError:
-                        # No value
-                        pass
-                else:
-                    return False
-            return True
-
-
-        def yieldMatches(recordTypes):
-            try:
-                for recordType in [r for r in recordTypes if r in self.recordTypes()]:
-                    for record in self.listRecords(recordType):
-                        if recordMatches(record):
-                            yield record
-
-            except UnknownRecordTypeError:
-                # Skip this service since it doesn't understand this record type
-                pass
-
-        recordTypes = self.recordTypesForSearchContext(context)
-        return succeed(yieldMatches(recordTypes))
-
-
-    def recordsMatchingFields(self, fields, operand="or", recordType=None):
-        # Default, bruteforce method; override with one optimized for each
-        # service
-
-        def fieldMatches(fieldValue, value, caseless, matchType):
-            if fieldValue is None:
-                return False
-            elif type(fieldValue) in types.StringTypes:
-                fieldValue = (fieldValue,)
-
-            for testValue in fieldValue:
-                if caseless:
-                    testValue = testValue.lower()
-                    value = value.lower()
-
-                if matchType == 'starts-with':
-                    if testValue.startswith(value):
-                        return True
-                elif matchType == 'contains':
-                    try:
-                        testValue.index(value)
-                        return True
-                    except ValueError:
-                        pass
-                else: # exact
-                    if testValue == value:
-                        return True
-
-            return False
-
-        def recordMatches(record):
-            if operand == "and":
-                for fieldName, value, caseless, matchType in fields:
-                    try:
-                        fieldValue = getattr(record, fieldName)
-                        if not fieldMatches(fieldValue, value, caseless,
-                            matchType):
-                            return False
-                    except AttributeError:
-                        # No property => no match
-                        return False
-                # we hit on every property
-                return True
-            else: # "or"
-                for fieldName, value, caseless, matchType in fields:
-                    try:
-                        fieldValue = getattr(record, fieldName)
-                        if fieldMatches(fieldValue, value, caseless,
-                            matchType):
-                            return True
-                    except AttributeError:
-                        # No value
-                        pass
-                # we didn't hit any
-                return False
-
-        def yieldMatches(recordType):
-            try:
-                if recordType is None:
-                    recordTypes = list(self.recordTypes())
-                else:
-                    recordTypes = (recordType,)
-
-                for recordType in recordTypes:
-                    for record in self.listRecords(recordType):
-                        if recordMatches(record):
-                            yield record
-
-            except UnknownRecordTypeError:
-                # Skip this service since it doesn't understand this record type
-                pass
-
-        return succeed(yieldMatches(recordType))
-
-
-    def getGroups(self, guids):
-        """
-        This implementation returns all groups, not just the ones specified
-        by guids
-        """
-        return succeed(self.listRecords(self.recordType_groups))
-
-
-    def getResourceInfo(self):
-        return ()
-
-
-    def isAvailable(self):
-        return True
-
-
-    def getParams(self, params, defaults, ignore=None):
-        """ Checks configuration parameters for unexpected/ignored keys, and
-            applies default values. """
-
-        keys = set(params.keys())
-
-        result = {}
-        for key in defaults.iterkeys():
-            if key in params:
-                result[key] = params[key]
-                keys.remove(key)
-            else:
-                result[key] = defaults[key]
-
-        if ignore:
-            for key in ignore:
-                if key in params:
-                    self.log.warn("Ignoring obsolete directory service parameter: %s" % (key,))
-                    keys.remove(key)
-
-        if keys:
-            raise DirectoryConfigurationError("Invalid directory service parameter(s): %s" % (", ".join(list(keys)),))
-        return result
-
-
-    def parseResourceInfo(self, plist, guid, recordType, shortname):
-        """
-        Parse ResourceInfo plist and extract information that the server needs.
-
-        @param plist: the plist that is the attribute value.
-        @type plist: str
-        @param guid: the directory GUID of the record being parsed.
-        @type guid: str
-        @param shortname: the record shortname of the record being parsed.
-        @type shortname: str
-        @return: a C{tuple} of C{bool} for auto-accept, C{str} for proxy GUID, C{str} for read-only proxy GUID.
-        """
-        try:
-            plist = readPlistFromString(plist)
-            wpframework = plist.get("com.apple.WhitePagesFramework", {})
-            autoaccept = wpframework.get("AutoAcceptsInvitation", False)
-            proxy = wpframework.get("CalendaringDelegate", None)
-            read_only_proxy = wpframework.get("ReadOnlyCalendaringDelegate", None)
-            autoAcceptGroup = wpframework.get("AutoAcceptGroup", "")
-        except (ExpatError, AttributeError), e:
-            self.log.error(
-                "Failed to parse ResourceInfo attribute of record (%s)%s (guid=%s): %s\n%s" %
-                (recordType, shortname, guid, e, plist,)
-            )
-            raise ValueError("Invalid ResourceInfo")
-
-        return (autoaccept, proxy, read_only_proxy, autoAcceptGroup)
-
-
-    def getExternalProxyAssignments(self):
-        """
-        Retrieve proxy assignments for locations and resources from the
-        directory and return a list of (principalUID, ([memberUIDs)) tuples,
-        suitable for passing to proxyDB.setGroupMembers( )
-
-        This generic implementation fetches all locations and resources.
-        More specialized implementations can perform whatever operation is
-        most efficient for their particular directory service.
-        """
-        assignments = []
-
-        resources = itertools.chain(
-            self.listRecords(self.recordType_locations),
-            self.listRecords(self.recordType_resources)
-        )
-        for record in resources:
-            guid = record.guid
-            if record.enabledForCalendaring:
-                assignments.append(("%s#calendar-proxy-write" % (guid,),
-                                   record.externalProxies()))
-                assignments.append(("%s#calendar-proxy-read" % (guid,),
-                                   record.externalReadOnlyProxies()))
-
-        return assignments
-
-
-    def createRecord(self, recordType, guid=None, shortNames=(), authIDs=set(),
-        fullName=None, firstName=None, lastName=None, emailAddresses=set(),
-        uid=None, password=None, **kwargs):
-        """
-        Create/persist a directory record based on the given values
-        """
-        raise NotImplementedError("Subclass must implement createRecord")
-
-
-    def updateRecord(self, recordType, guid=None, shortNames=(), authIDs=set(),
-        fullName=None, firstName=None, lastName=None, emailAddresses=set(),
-        uid=None, password=None, **kwargs):
-        """
-        Update/persist a directory record based on the given values
-        """
-        raise NotImplementedError("Subclass must implement updateRecord")
-
-
-    def destroyRecord(self, recordType, guid=None):
-        """
-        Remove a directory record from the directory
-        """
-        raise NotImplementedError("Subclass must implement destroyRecord")
-
-
-    def createRecords(self, data):
-        """
-        Create directory records in bulk
-        """
-        raise NotImplementedError("Subclass must implement createRecords")
-
-
-    def setPrincipalCollection(self, principalCollection):
-        """
-        Set the principal service that the directory relies on for doing proxy tests.
-
-        @param principalService: the principal service.
-        @type principalService: L{DirectoryProvisioningResource}
-        """
-        self.principalCollection = principalCollection
-
-
-    def isProxyFor(self, test, other):
-        """
-        Test whether one record is a calendar user proxy for the specified record.
-
-        @param test: record to test
-        @type test: L{DirectoryRecord}
-        @param other: record to check against
-        @type other: L{DirectoryRecord}
-
-        @return: C{True} if test is a proxy of other.
-        @rtype: C{bool}
-        """
-        return self.principalCollection.isProxyFor(test, other)
-
-
-
-class GroupMembershipCache(Memcacher):
-    """
-    Caches group membership information
-
-    This cache is periodically updated by a side car so that worker processes
-    never have to ask the directory service directly for group membership
-    information.
-
-    Keys in this cache are:
-
-    "groups-for:<GUID>" : comma-separated list of groups that GUID is a member
-    of.  Note that when using LDAP, the key for this is an LDAP DN.
-
-    "group-cacher-populated" : contains a datestamp indicating the most recent
-    population.
-    """
-    log = Logger()
-
-    def __init__(self, namespace, pickle=True, no_invalidation=False,
-        key_normalization=True, expireSeconds=0, lockSeconds=60):
-
-        super(GroupMembershipCache, self).__init__(namespace, pickle=pickle,
-            no_invalidation=no_invalidation,
-            key_normalization=key_normalization)
-
-        self.expireSeconds = expireSeconds
-        self.lockSeconds = lockSeconds
-
-
-    def setGroupsFor(self, guid, memberships):
-        self.log.debug("set groups-for %s : %s" % (guid, memberships))
-        return self.set("groups-for:%s" %
-            (str(guid)), memberships,
-            expireTime=self.expireSeconds)
-
-
-    def getGroupsFor(self, guid):
-        self.log.debug("get groups-for %s" % (guid,))
-        def _value(value):
-            if value:
-                return value
-            else:
-                return set()
-        d = self.get("groups-for:%s" % (str(guid),))
-        d.addCallback(_value)
-        return d
-
-
-    def deleteGroupsFor(self, guid):
-        self.log.debug("delete groups-for %s" % (guid,))
-        return self.delete("groups-for:%s" % (str(guid),))
-
-
-    def setPopulatedMarker(self):
-        self.log.debug("set group-cacher-populated")
-        return self.set("group-cacher-populated", str(datetime.datetime.now()))
-
-
-    @inlineCallbacks
-    def isPopulated(self):
-        self.log.debug("is group-cacher-populated")
-        value = (yield self.get("group-cacher-populated"))
-        returnValue(value is not None)
-
-
-    def acquireLock(self):
-        """
-        Acquire a memcached lock named group-cacher-lock
-
-        return: Deferred firing True if successful, False if someone already has
-            the lock
-        """
-        self.log.debug("add group-cacher-lock")
-        return self.add("group-cacher-lock", "1", expireTime=self.lockSeconds)
-
-
-    def extendLock(self):
-        """
-        Update the expiration time of the memcached lock
-        Return: Deferred firing True if successful, False otherwise
-        """
-        self.log.debug("extend group-cacher-lock")
-        return self.set("group-cacher-lock", "1", expireTime=self.lockSeconds)
-
-
-    def releaseLock(self):
-        """
-        Release the memcached lock
-        Return: Deferred firing True if successful, False otherwise
-        """
-        self.log.debug("delete group-cacher-lock")
-        return self.delete("group-cacher-lock")
-
-
-
-class GroupMembershipCacheUpdater(object):
-    """
-    Responsible for updating memcached with group memberships.  This will run
-    in a sidecar.  There are two sources of proxy data to pull from: the local
-    proxy database, and the location/resource info in the directory system.
-    """
-    log = Logger()
-
-    def __init__(self, proxyDB, directory, updateSeconds, expireSeconds,
-        lockSeconds, cache=None, namespace=None, useExternalProxies=False,
-        externalProxiesSource=None):
-        self.proxyDB = proxyDB
-        self.directory = directory
-        self.updateSeconds = updateSeconds
-        self.useExternalProxies = useExternalProxies
-        if useExternalProxies and externalProxiesSource is None:
-            externalProxiesSource = self.directory.getExternalProxyAssignments
-        self.externalProxiesSource = externalProxiesSource
-
-        if cache is None:
-            assert namespace is not None, "namespace must be specified if GroupMembershipCache is not provided"
-            cache = GroupMembershipCache(namespace, expireSeconds=expireSeconds,
-                lockSeconds=lockSeconds)
-        self.cache = cache
-
-
-    @inlineCallbacks
-    def getGroups(self, guids=None):
-        """
-        Retrieve all groups and their member info (but don't actually fault in
-        the records of the members), and return two dictionaries.  The first
-        contains group records; the keys for this dictionary are the identifiers
-        used by the directory service to specify members.  In OpenDirectory
-        these would be guids, but in LDAP these could be DNs, or some other
-        attribute.  This attribute can be retrieved from a record using
-        record.cachedGroupsAlias().
-        The second dictionary returned maps that member attribute back to the
-        corresponding guid.  These dictionaries are used to reverse-index the
-        groups that users are in by expandedMembers().
-
-        @param guids: if provided, retrieve only the groups corresponding to
-            these guids (including their sub groups)
-        @type guids: list of guid strings
-        """
-        groups = {}
-        aliases = {}
-
-        if guids is None: # get all group guids
-            records = self.directory.listRecords(self.directory.recordType_groups)
-        else: # get only the ones we know have been delegated to
-            records = (yield self.directory.getGroups(guids))
-
-        for record in records:
-            alias = record.cachedGroupsAlias()
-            groups[alias] = record.memberGUIDs()
-            aliases[record.guid] = alias
-
-        returnValue((groups, aliases))
-
-
-    def expandedMembers(self, groups, guid, members=None, seen=None):
-        """
-        Return the complete, flattened set of members of a group, including
-        all sub-groups, based on the group hierarchy described in the
-        groups dictionary.
-        """
-        if members is None:
-            members = set()
-        if seen is None:
-            seen = set()
-
-        if guid not in seen:
-            seen.add(guid)
-            for member in groups[guid]:
-                members.add(member)
-                if member in groups: # it's a group then
-                    self.expandedMembers(groups, member, members=members,
-                                         seen=seen)
-        return members
-
-
-    @inlineCallbacks
-    def updateCache(self, fast=False):
-        """
-        Iterate the proxy database to retrieve all the principals who have been
-        delegated to.  Fault these principals in.  For any of these principals
-        that are groups, expand the members of that group and store those in
-        the cache
-
-        If fast=True, we're in quick-start mode, used only by the master process
-        to start servicing requests as soon as possible.  In this mode we look
-        for DataRoot/memberships_cache which is a pickle of a dictionary whose
-        keys are guids (except when using LDAP where the keys will be DNs), and
-        the values are lists of group guids.  If the cache file does not exist
-        we switch to fast=False.
-
-        The return value is mainly used for unit tests; it's a tuple containing
-        the (possibly modified) value for fast, and the number of members loaded
-        into the cache (which can be zero if fast=True and isPopulated(), or
-        fast=False and the cache is locked by someone else).
-
-        The pickled snapshot file is a dict whose keys represent a record and
-        the values are the guids of the groups that record is a member of.  The
-        keys are normally guids except in the case of a directory system like LDAP
-        where there can be a different attribute used for referring to members,
-        such as a DN.
-        """
-
-        # TODO: add memcached eviction protection
-
-        useLock = True
-
-        # See if anyone has completely populated the group membership cache
-        isPopulated = (yield self.cache.isPopulated())
-
-        if fast:
-            # We're in quick-start mode.  Check first to see if someone has
-            # populated the membership cache, and if so, return immediately
-            if isPopulated:
-                self.log.info("Group membership cache is already populated")
-                returnValue((fast, 0, 0))
-
-            # We don't care what others are doing right now, we need to update
-            useLock = False
-
-        self.log.info("Updating group membership cache")
-
-        dataRoot = FilePath(config.DataRoot)
-        membershipsCacheFile = dataRoot.child("memberships_cache")
-        extProxyCacheFile = dataRoot.child("external_proxy_cache")
-
-        if not membershipsCacheFile.exists():
-            self.log.info("Group membership snapshot file does not yet exist")
-            fast = False
-            previousMembers = {}
-            callGroupsChanged = False
-        else:
-            self.log.info("Group membership snapshot file exists: %s" %
-                (membershipsCacheFile.path,))
-            callGroupsChanged = True
-            try:
-                previousMembers = pickle.loads(membershipsCacheFile.getContent())
-            except:
-                self.log.warn("Could not parse snapshot; will regenerate cache")
-                fast = False
-                previousMembers = {}
-                callGroupsChanged = False
-
-        if useLock:
-            self.log.info("Attempting to acquire group membership cache lock")
-            acquiredLock = (yield self.cache.acquireLock())
-            if not acquiredLock:
-                self.log.info("Group membership cache lock held by another process")
-                returnValue((fast, 0, 0))
-            self.log.info("Acquired lock")
-
-        if not fast and self.useExternalProxies:
-
-            # Load in cached copy of external proxies so we can diff against them
-            previousAssignments = []
-            if extProxyCacheFile.exists():
-                self.log.info("External proxies snapshot file exists: %s" %
-                    (extProxyCacheFile.path,))
-                try:
-                    previousAssignments = pickle.loads(extProxyCacheFile.getContent())
-                except:
-                    self.log.warn("Could not parse external proxies snapshot")
-                    previousAssignments = []
-
-            if useLock:
-                yield self.cache.extendLock()
-
-            self.log.info("Retrieving proxy assignments from directory")
-            assignments = self.externalProxiesSource()
-            self.log.info("%d proxy assignments retrieved from directory" %
-                (len(assignments),))
-
-            if useLock:
-                yield self.cache.extendLock()
-
-            changed, removed = diffAssignments(previousAssignments, assignments)
-            # changed is the list of proxy assignments (either new or updates).
-            # removed is the list of principals who used to have an external
-            #   delegate but don't anymore.
-
-            # populate proxy DB from external resource info
-            if changed:
-                self.log.info("Updating proxy assignments")
-                assignmentCount = 0
-                totalNumAssignments = len(changed)
-                currentAssignmentNum = 0
-                for principalUID, members in changed:
-                    currentAssignmentNum += 1
-                    if currentAssignmentNum % 1000 == 0:
-                        self.log.info("...proxy assignment %d of %d" % (currentAssignmentNum,
-                            totalNumAssignments))
-                    try:
-                        current = (yield self.proxyDB.getMembers(principalUID))
-                        if members != current:
-                            assignmentCount += 1
-                            yield self.proxyDB.setGroupMembers(principalUID, members)
-                    except Exception, e:
-                        self.log.error("Unable to update proxy assignment: principal=%s, members=%s, error=%s" % (principalUID, members, e))
-                self.log.info("Updated %d assignment%s in proxy database" %
-                    (assignmentCount, "" if assignmentCount == 1 else "s"))
-
-            if removed:
-                self.log.info("Deleting proxy assignments")
-                assignmentCount = 0
-                totalNumAssignments = len(removed)
-                currentAssignmentNum = 0
-                for principalUID in removed:
-                    currentAssignmentNum += 1
-                    if currentAssignmentNum % 1000 == 0:
-                        self.log.info("...proxy assignment %d of %d" % (currentAssignmentNum,
-                            totalNumAssignments))
-                    try:
-                        assignmentCount += 1
-                        yield self.proxyDB.setGroupMembers(principalUID, [])
-                    except Exception, e:
-                        self.log.error("Unable to remove proxy assignment: principal=%s, members=%s, error=%s" % (principalUID, members, e))
-                self.log.info("Removed %d assignment%s from proxy database" %
-                    (assignmentCount, "" if assignmentCount == 1 else "s"))
-
-            # Store external proxy snapshot
-            self.log.info("Taking snapshot of external proxies to %s" %
-                (extProxyCacheFile.path,))
-            extProxyCacheFile.setContent(pickle.dumps(assignments))
-
-        if fast:
-            # If there is an on-disk snapshot of the membership information,
-            # load that and put into memcached, bypassing the faulting in of
-            # any records, so that the server can start up quickly.
-
-            self.log.info("Loading group memberships from snapshot")
-            members = pickle.loads(membershipsCacheFile.getContent())
-
-        else:
-            # Fetch the group hierarchy from the directory, fetch the list
-            # of delegated-to guids, intersect those and build a dictionary
-            # containing which delegated-to groups a user is a member of
-
-            self.log.info("Retrieving list of all proxies")
-            # This is always a set of guids:
-            delegatedGUIDs = set((yield self.proxyDB.getAllMembers()))
-            self.log.info("There are %d proxies" % (len(delegatedGUIDs),))
-            self.log.info("Retrieving group hierarchy from directory")
-
-            # "groups" maps a group to its members; the keys and values consist
-            # of whatever directory attribute is used to refer to members.  The
-            # attribute value comes from record.cachedGroupsAlias().
-            # "aliases" maps the record.cachedGroupsAlias() value for a group
-            # back to the group's guid.
-            groups, aliases = (yield self.getGroups(guids=delegatedGUIDs))
-            groupGUIDs = set(aliases.keys())
-            self.log.info("%d groups retrieved from the directory" %
-                (len(groupGUIDs),))
-
-            delegatedGUIDs = delegatedGUIDs.intersection(groupGUIDs)
-            self.log.info("%d groups are proxies" % (len(delegatedGUIDs),))
-
-            # Reverse index the group membership from cache
-            members = {}
-            for groupGUID in delegatedGUIDs:
-                groupMembers = self.expandedMembers(groups, aliases[groupGUID])
-                # groupMembers is in cachedGroupsAlias() format
-                for member in groupMembers:
-                    memberships = members.setdefault(member, set())
-                    memberships.add(groupGUID)
-
-            self.log.info("There are %d users delegated-to via groups" %
-                (len(members),))
-
-            # Store snapshot
-            self.log.info("Taking snapshot of group memberships to %s" %
-                (membershipsCacheFile.path,))
-            membershipsCacheFile.setContent(pickle.dumps(members))
-
-            # Update ownership
-            uid = gid = -1
-            if config.UserName:
-                uid = pwd.getpwnam(config.UserName).pw_uid
-            if config.GroupName:
-                gid = grp.getgrnam(config.GroupName).gr_gid
-            os.chown(membershipsCacheFile.path, uid, gid)
-            if extProxyCacheFile.exists():
-                os.chown(extProxyCacheFile.path, uid, gid)
-
-        self.log.info("Storing %d group memberships in memcached" %
-                       (len(members),))
-        changedMembers = set()
-        totalNumMembers = len(members)
-        currentMemberNum = 0
-        for member, groups in members.iteritems():
-            currentMemberNum += 1
-            if currentMemberNum % 1000 == 0:
-                self.log.info("...membership %d of %d" % (currentMemberNum,
-                    totalNumMembers))
-            # self.log.debug("%s is in %s" % (member, groups))
-            yield self.cache.setGroupsFor(member, groups)
-            if groups != previousMembers.get(member, None):
-                # This principal has had a change in group membership
-                # so invalidate the PROPFIND response cache
-                changedMembers.add(member)
-            try:
-                # Remove from previousMembers; anything still left in
-                # previousMembers when this loop is done will be
-                # deleted from cache (since only members that were
-                # previously in delegated-to groups but are no longer
-                # would still be in previousMembers)
-                del previousMembers[member]
-            except KeyError:
-                pass
-
-        # Remove entries for principals that no longer are in delegated-to
-        # groups
-        for member, groups in previousMembers.iteritems():
-            yield self.cache.deleteGroupsFor(member)
-            changedMembers.add(member)
-
-        # For principals whose group membership has changed, call groupsChanged()
-        if callGroupsChanged and not fast and hasattr(self.directory, "principalCollection"):
-            for member in changedMembers:
-                record = yield self.directory.recordWithCachedGroupsAlias(
-                    self.directory.recordType_users, member)
-                if record is not None:
-                    principal = self.directory.principalCollection.principalForRecord(record)
-                    if principal is not None:
-                        self.log.debug("Group membership changed for %s (%s)" %
-                            (record.shortNames[0], record.guid,))
-                        if hasattr(principal, "groupsChanged"):
-                            yield principal.groupsChanged()
-
-        yield self.cache.setPopulatedMarker()
-
-        if useLock:
-            self.log.info("Releasing lock")
-            yield self.cache.releaseLock()
-
-        self.log.info("Group memberships cache updated")
-
-        returnValue((fast, len(members), len(changedMembers)))
-
-
-
-def diffAssignments(old, new):
-    """
-    Compare two proxy assignment lists and return their differences in the form of
-    two lists -- one for added/updated assignments, and one for removed assignments.
-    @param old: list of (group, set(members)) tuples
-    @type old: C{list}
-    @param new: list of (group, set(members)) tuples
-    @type new: C{list}
-    @return: Tuple of two lists; the first list contains tuples of (proxy-principal,
-        set(members)), and represents all the new or updated assignments.  The
-        second list contains all the proxy-principals which used to have a delegate
-        but don't anymore.
-    """
-    old = dict(old)
-    new = dict(new)
-    changed = []
-    removed = []
-    for key in old.iterkeys():
-        if key not in new:
-            removed.append(key)
-        else:
-            if old[key] != new[key]:
-                changed.append((key, new[key]))
-    for key in new.iterkeys():
-        if key not in old:
-            changed.append((key, new[key]))
-    return changed, removed
-
-
-
-class DirectoryRecord(object):
-    log = Logger()
-
-    implements(IDirectoryRecord, ICalendarStoreDirectoryRecord)
-
-    def __repr__(self):
-        return "<%s[%s@%s(%s)] %s(%s) %r @ %s>" % (
-            self.__class__.__name__,
-            self.recordType,
-            self.service.guid,
-            self.service.realmName,
-            self.guid,
-            ",".join(self.shortNames),
-            self.fullName,
-            self.serverURI(),
-        )
-
-
-    def __init__(
-        self, service, recordType, guid=None,
-        shortNames=(), authIDs=set(), fullName=None,
-        firstName=None, lastName=None, emailAddresses=set(),
-        calendarUserAddresses=set(),
-        autoSchedule=False, autoScheduleMode=None,
-        autoAcceptGroup="",
-        enabledForCalendaring=None,
-        enabledForAddressBooks=None,
-        uid=None,
-        enabledForLogin=True,
-        extProxies=(), extReadOnlyProxies=(),
-        **kwargs
-    ):
-        assert service.realmName is not None
-        assert recordType
-        assert shortNames and isinstance(shortNames, tuple)
-
-        guid = normalizeUUID(guid)
-
-        if uid is None:
-            uid = guid
-
-        if fullName is None:
-            fullName = ""
-
-        self.service = service
-        self.recordType = recordType
-        self.guid = guid
-        self.uid = uid
-        self.enabled = False
-        self.serverID = ""
-        self.shortNames = shortNames
-        self.authIDs = authIDs
-        self.fullName = fullName
-        self.firstName = firstName
-        self.lastName = lastName
-        self.emailAddresses = emailAddresses
-        self.enabledForCalendaring = enabledForCalendaring
-        self.autoSchedule = autoSchedule
-        self.autoScheduleMode = autoScheduleMode
-        self.autoAcceptGroup = autoAcceptGroup
-        self.enabledForAddressBooks = enabledForAddressBooks
-        self.enabledForLogin = enabledForLogin
-        self.extProxies = extProxies
-        self.extReadOnlyProxies = extReadOnlyProxies
-        self.extras = kwargs
-
-
-    def get_calendarUserAddresses(self):
-        """
-        Dynamically construct a calendarUserAddresses attribute which describes
-        this L{DirectoryRecord}.
-
-        @see: L{IDirectoryRecord.calendarUserAddresses}.
-        """
-        if not self.enabledForCalendaring:
-            return frozenset()
-        cuas = set(
-            ["mailto:%s" % (emailAddress,)
-             for emailAddress in self.emailAddresses]
-        )
-        if self.guid:
-            cuas.add("urn:uuid:%s" % (self.guid,))
-            cuas.add(joinURL("/principals", "__uids__", self.guid) + "/")
-        for shortName in self.shortNames:
-            cuas.add(joinURL("/principals", self.recordType, shortName,) + "/")
-
-        return frozenset(cuas)
-
-    calendarUserAddresses = property(get_calendarUserAddresses)
-
-    def __cmp__(self, other):
-        if not isinstance(other, DirectoryRecord):
-            return NotImplemented
-
-        for attr in ("service", "recordType", "shortNames", "guid"):
-            diff = cmp(getattr(self, attr), getattr(other, attr))
-            if diff != 0:
-                return diff
-        return 0
-
-
-    def __hash__(self):
-        h = hash(self.__class__.__name__)
-        for attr in ("service", "recordType", "shortNames", "guid",
-                     "enabled", "enabledForCalendaring"):
-            h = (h + hash(getattr(self, attr))) & sys.maxint
-
-        return h
-
-
-    def cacheToken(self):
-        """
-        Generate a token that can be uniquely used to identify the state of this record for use
-        in a cache.
-        """
-        return hash((
-            self.__class__.__name__,
-            self.service.realmName,
-            self.recordType,
-            self.shortNames,
-            self.guid,
-            self.enabled,
-            self.enabledForCalendaring,
-        ))
-
-
-    def addAugmentInformation(self, augment):
-
-        if augment:
-            self.enabled = augment.enabled
-            self.serverID = augment.serverID
-            self.enabledForCalendaring = augment.enabledForCalendaring
-            self.enabledForAddressBooks = augment.enabledForAddressBooks
-            self.autoSchedule = augment.autoSchedule
-            self.autoScheduleMode = augment.autoScheduleMode
-            self.autoAcceptGroup = augment.autoAcceptGroup
-            self.enabledForLogin = augment.enabledForLogin
-
-            if (self.enabledForCalendaring or self.enabledForAddressBooks) and self.recordType == self.service.recordType_groups:
-                self.enabledForCalendaring = False
-                self.enabledForAddressBooks = False
-
-                # For augment records cloned from the Default augment record,
-                # don't emit this message:
-                if not augment.clonedFromDefault:
-                    self.log.error("Group '%s(%s)' cannot be enabled for calendaring or address books" % (self.guid, self.shortNames[0],))
-
-        else:
-            # Groups are by default always enabled
-            self.enabled = (self.recordType == self.service.recordType_groups)
-            self.serverID = ""
-            self.enabledForCalendaring = False
-            self.enabledForAddressBooks = False
-            self.enabledForLogin = False
-
-
-    def applySACLs(self):
-        """
-        Disable calendaring and addressbooks as dictated by SACLs
-        """
-
-        if config.EnableSACLs and self.CheckSACL:
-            username = self.shortNames[0]
-            if self.CheckSACL(username, "calendar") != 0:
-                self.log.debug("%s is not enabled for calendaring due to SACL"
-                               % (username,))
-                self.enabledForCalendaring = False
-            if self.CheckSACL(username, "addressbook") != 0:
-                self.log.debug("%s is not enabled for addressbooks due to SACL"
-                               % (username,))
-                self.enabledForAddressBooks = False
-
-
-    def displayName(self):
-        return self.fullName if self.fullName else self.shortNames[0]
-
-
-    def isLoginEnabled(self):
-        """
-        Returns True if the user should be allowed to log in, based on the
-        enabledForLogin attribute, which is currently controlled by the
-        DirectoryService implementation.
-        """
-        return self.enabledForLogin
-
-
-    def members(self):
-        return ()
-
-
-    def expandedMembers(self, members=None, seen=None):
-        """
-        Return the complete, flattened set of members of a group, including
-        all sub-groups.
-        """
-        if members is None:
-            members = set()
-        if seen is None:
-            seen = set()
-
-        if self not in seen:
-            seen.add(self)
-            for member in self.members():
-                members.add(member)
-                if member.recordType == self.service.recordType_groups:
-                    member.expandedMembers(members=members, seen=seen)
-
-        return members
-
-
-    def groups(self):
-        return ()
-
-
-    def cachedGroups(self):
-        """
-        Return the set of groups (guids) this record is a member of, based on
-        the data cached by cacheGroupMembership( )
-        """
-        return self.service.groupMembershipCache.getGroupsFor(self.cachedGroupsAlias())
-
-
-    def cachedGroupsAlias(self):
-        """
-        The GroupMembershipCache uses keys based on this value.  Normally it's
-        a record's guid but in a directory system like LDAP which can use a
-        different attribute to refer to group members, we need to be able to
-        look up an entry in the GroupMembershipCache by that attribute.
-        Subclasses which don't use record.guid to look up group membership
-        should override this method.
-        """
-        return self.guid
-
-
-    def externalProxies(self):
-        """
-        Return the set of proxies defined in the directory service, as opposed
-        to assignments in the proxy DB itself.
-        """
-        return set(self.extProxies)
-
-
-    def externalReadOnlyProxies(self):
-        """
-        Return the set of read-only proxies defined in the directory service,
-        as opposed to assignments in the proxy DB itself.
-        """
-        return set(self.extReadOnlyProxies)
-
-
-    def memberGUIDs(self):
-        """
-        Return the set of GUIDs that are members of this group
-        """
-        return set()
-
-
-    def verifyCredentials(self, credentials):
-        return False
-
-
-    def calendarsEnabled(self):
-        return config.EnableCalDAV and self.enabledForCalendaring
-
-
-    def canonicalCalendarUserAddress(self):
-        """
-            Return a CUA for this principal, preferring in this order:
-            urn:uuid: form
-            mailto: form
-            first in calendarUserAddresses list
-        """
-
-        cua = ""
-        for candidate in self.calendarUserAddresses:
-            # Pick the first one, but urn:uuid: and mailto: can override
-            if not cua:
-                cua = candidate
-            # But always immediately choose the urn:uuid: form
-            if candidate.startswith("urn:uuid:"):
-                cua = candidate
-                break
-            # Prefer mailto: if no urn:uuid:
-            elif candidate.startswith("mailto:"):
-                cua = candidate
-        return cua
-
-
-    def enabledAsOrganizer(self):
-        if self.recordType == DirectoryService.recordType_users:
-            return True
-        elif self.recordType == DirectoryService.recordType_groups:
-            return config.Scheduling.Options.AllowGroupAsOrganizer
-        elif self.recordType == DirectoryService.recordType_locations:
-            return config.Scheduling.Options.AllowLocationAsOrganizer
-        elif self.recordType == DirectoryService.recordType_resources:
-            return config.Scheduling.Options.AllowResourceAsOrganizer
-        else:
-            return False
-
-    # Mapping from directory record.recordType to RFC2445 CUTYPE values
-    _cuTypes = {
-        'users' : 'INDIVIDUAL',
-        'groups' : 'GROUP',
-        'resources' : 'RESOURCE',
-        'locations' : 'ROOM',
-    }
-
-    def getCUType(self):
-        return self._cuTypes.get(self.recordType, "UNKNOWN")
-
-
-    @classmethod
-    def fromCUType(cls, cuType):
-        for key, val in cls._cuTypes.iteritems():
-            if val == cuType:
-                return key
-        return None
-
-
-    def canAutoSchedule(self, organizer):
-        if config.Scheduling.Options.AutoSchedule.Enabled:
-            if (config.Scheduling.Options.AutoSchedule.Always or
-                self.autoSchedule or
-                self.autoAcceptFromOrganizer(organizer)):
-                if (self.getCUType() != "INDIVIDUAL" or
-                    config.Scheduling.Options.AutoSchedule.AllowUsers):
-                    return True
-        return False
-
-
-    def getAutoScheduleMode(self, organizer):
-        autoScheduleMode = self.autoScheduleMode
-        if self.autoAcceptFromOrganizer(organizer):
-            autoScheduleMode = "automatic"
-        return autoScheduleMode
-
-
-    def autoAcceptFromOrganizer(self, organizer):
-        if organizer is not None and self.autoAcceptGroup is not None:
-            service = self.service.aggregateService or self.service
-            organizerRecord = service.recordWithCalendarUserAddress(organizer)
-            if organizerRecord is not None:
-                if organizerRecord.guid in self.autoAcceptMembers():
-                    return True
-        return False
-
-
-    def serverURI(self):
-        """
-        URL of the server hosting this record. Return None if hosted on this server.
-        """
-        if config.Servers.Enabled and self.serverID:
-            return Servers.getServerURIById(self.serverID)
-        else:
-            return None
-
-
-    def server(self):
-        """
-        Server hosting this record. Return None if hosted on this server.
-        """
-        if config.Servers.Enabled and self.serverID:
-            return Servers.getServerById(self.serverID)
-        else:
-            return None
-
-
-    def thisServer(self):
-        s = self.server()
-        return s.thisServer if s is not None else True
-
-
-    def autoAcceptMembers(self):
-        """
-        Return the list of GUIDs for which this record will automatically accept
-        invites from (assuming no conflicts).  This list is based on the group
-        assigned to record.autoAcceptGroup.  Cache the expanded group membership
-        within the record.
-
-        @return: the list of members of the autoAcceptGroup, or an empty list if
-            not assigned
-        @rtype: C{list} of GUID C{str}
-        """
-        if not hasattr(self, "_cachedAutoAcceptMembers"):
-            self._cachedAutoAcceptMembers = []
-            if self.autoAcceptGroup:
-                service = self.service.aggregateService or self.service
-                groupRecord = service.recordWithGUID(self.autoAcceptGroup)
-                if groupRecord is not None:
-                    self._cachedAutoAcceptMembers = [m.guid for m in groupRecord.expandedMembers()]
-
-        return self._cachedAutoAcceptMembers
-
-
-    def isProxyFor(self, other):
-        """
-        Test whether the record is a calendar user proxy for the specified record.
-
-        @param other: record to test
-        @type other: L{DirectoryRecord}
-
-        @return: C{True} if it is a proxy.
-        @rtype: C{bool}
-        """
-        return self.service.isProxyFor(self, other)
-
-
-
-class DirectoryError(RuntimeError):
-    """
-    Generic directory error.
-    """
-
-
-
-class DirectoryConfigurationError(DirectoryError):
-    """
-    Invalid directory configuration.
-    """
-
-
-
-class UnknownRecordTypeError(DirectoryError):
-    """
-    Unknown directory record type.
-    """
-    def __init__(self, recordType):
-        DirectoryError.__init__(self, "Invalid record type: %s" % (recordType,))
-        self.recordType = recordType
-
-
-# So CheckSACL will be parameterized
-# We do this after DirectoryRecord is defined
-try:
-    from calendarserver.platform.darwin._sacl import CheckSACL
-    DirectoryRecord.CheckSACL = CheckSACL
-except ImportError:
-    DirectoryRecord.CheckSACL = None

Deleted: CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/idirectory.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/idirectory.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/idirectory.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -1,180 +0,0 @@
-##
-# Copyright (c) 2006-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-"""
-Directory service interfaces.
-"""
-
-__all__ = [
-    "IDirectoryService",
-    "IDirectoryRecord",
-]
-
-from zope.interface import Attribute, Interface
-
-class IDirectoryService(Interface):
-    """
-    Directory Service
-    """
-    realmName = Attribute("The name of the authentication realm this service represents.")
-    guid = Attribute("A GUID for this service.")
-
-    def recordTypes(): #@NoSelf
-        """
-        @return: a sequence of strings denoting the record types that
-            are kept in the directory.  For example: C{["users",
-            "groups", "resources"]}.
-        """
-
-    def listRecords(recordType): #@NoSelf
-        """
-        @param type: the type of records to retrieve.
-        @return: an iterable of records of the given type.
-        """
-
-    def recordWithShortName(recordType, shortName): #@NoSelf
-        """
-        @param recordType: the type of the record to look up.
-        @param shortName: the short name of the record to look up.
-        @return: an L{IDirectoryRecord} with the given short name, or
-            C{None} if no such record exists.
-        """
-
-    def recordWithUID(uid): #@NoSelf
-        """
-        @param uid: the UID of the record to look up.
-        @return: an L{IDirectoryRecord} with the given UID, or C{None}
-            if no such record exists.
-        """
-
-    def recordWithGUID(guid): #@NoSelf
-        """
-        @param guid: the GUID of the record to look up.
-        @return: an L{IDirectoryRecord} with the given GUID, or
-            C{None} if no such record exists.
-        """
-
-    def recordWithCalendarUserAddress(address): #@NoSelf
-        """
-        @param address: the calendar user address of the record to look up.
-        @type address: C{str}
-
-        @return: an L{IDirectoryRecord} with the given calendar user
-            address, or C{None} if no such record is found.  Note that
-            some directory services may not be able to locate records
-            by calendar user address, or may return partial results.
-            Note also that the calendar server may add to the list of
-            valid calendar user addresses for a user, and the
-            directory service may not be aware of these addresses.
-        """
-
-    def recordWithCachedGroupsAlias(recordType, alias): #@NoSelf
-        """
-        @param recordType: the type of the record to look up.
-        @param alias: the cached-groups alias of the record to look up.
-        @type alias: C{str}
-
-        @return: a deferred L{IDirectoryRecord} with the given cached-groups
-            alias, or C{None} if no such record is found.
-        """
-
-    def recordsMatchingFields(fields): #@NoSelf
-        """
-        @return: a deferred sequence of L{IDirectoryRecord}s which
-            match the given fields.
-        """
-
-    def recordsMatchingTokens(tokens, context=None): #@NoSelf
-        """
-        @param tokens: The tokens to search on
-        @type tokens: C{list} of C{str} (utf-8 bytes)
-
-        @param context: An indication of what the end user is searching for;
-            "attendee", "location", or None
-        @type context: C{str}
-
-        @return: a deferred sequence of L{IDirectoryRecord}s which match the
-            given tokens and optional context.
-
-            Each token is searched for within each record's full name and email
-            address; if each token is found within a record that record is
-            returned in the results.
-
-            If context is None, all record types are considered.  If context is
-            "location", only locations are considered.  If context is
-            "attendee", only users, groups, and resources are considered.
-        """
-
-    def setRealm(realmName): #@NoSelf
-        """
-        Set a new realm name for this (and nested services if any)
-
-        @param realmName: the realm name this service should use.
-        """
-
-
-
-class IDirectoryRecord(Interface):
-    """
-    Directory Record
-    """
-    service = Attribute("The L{IDirectoryService} this record exists in.")
-    recordType = Attribute("The type of this record.")
-    guid = Attribute("The GUID of this record.")
-    uid = Attribute("The UID of this record.")
-    enabled = Attribute("Determines whether this record should allow a principal to be created.")
-    serverID = Attribute("Identifies the server that actually hosts data for the record.")
-    shortNames = Attribute("The names for this record.")
-    authIDs = Attribute("Alternative security identities for this record.")
-    fullName = Attribute("The full name of this record.")
-    firstName = Attribute("The first name of this record.")
-    lastName = Attribute("The last name of this record.")
-    emailAddresses = Attribute("The email addresses of this record.")
-    enabledForCalendaring = Attribute("Determines whether this record creates a principal with a calendar home.")
-    enabledForAddressBooks = Attribute("Determines whether this record creates a principal with an address book home.")
-    calendarUserAddresses = Attribute(
-        """
-        An iterable of C{str}s representing calendar user addresses for this
-        L{IDirectoryRecord}.
-
-        A "calendar user address", as defined by U{RFC 2445 section
-        4.3.3<http://xml.resource.org/public/rfc/html/rfc2445.html#anchor50>},
-        is simply an URI which identifies this user.  Some of these URIs are
-        relative references to URLs from the root of the calendar server's HTTP
-        hierarchy.
-        """
-    )
-
-    def members(): #@NoSelf
-        """
-        @return: an iterable of L{IDirectoryRecord}s for the members of this
-            (group) record.
-        """
-
-    def groups(): #@NoSelf
-        """
-        @return: an iterable of L{IDirectoryRecord}s for the groups this
-            record is a member of.
-        """
-
-    def verifyCredentials(credentials): #@NoSelf
-        """
-        Verify that the given credentials can authenticate the principal
-        represented by this record.
-        @param credentials: the credentials to authenticate with.
-        @return: C{True} if the given credentials match this record,
-            C{False} otherwise.
-        """

Deleted: CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/ldapdirectory.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/ldapdirectory.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/ldapdirectory.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -1,2034 +0,0 @@
-##
-# Copyright (c) 2008-2009 Aymeric Augustin. All rights reserved.
-# Copyright (c) 2006-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-"""
-LDAP directory service implementation.  Supports principal-property-search
-and restrictToGroup features.
-
-The following attributes from standard schemas are used:
-* Core (RFC 4519):
-    . cn | commonName
-    . givenName
-    . member (if not using NIS groups)
-    . ou
-    . sn | surname
-    . uid | userid (if using NIS groups)
-* COSINE (RFC 4524):
-    . mail
-* InetOrgPerson (RFC 2798):
-    . displayName (if cn is unavailable)
-* NIS (RFC):
-    . gecos (if cn is unavailable)
-    . memberUid (if using NIS groups)
-"""
-
-__all__ = [
-    "LdapDirectoryService",
-]
-
-import ldap.async
-from ldap.filter import escape_filter_chars as ldapEsc
-
-try:
-    # Note: PAM support is currently untested
-    import PAM
-    pamAvailable = True
-except ImportError:
-    pamAvailable = False
-
-import time
-from twisted.cred.credentials import UsernamePassword
-from twistedcaldav.directory.cachingdirectory import (
-    CachingDirectoryService, CachingDirectoryRecord
-)
-from twistedcaldav.directory.directory import DirectoryConfigurationError
-from twistedcaldav.directory.augment import AugmentRecord
-from twistedcaldav.directory.util import splitIntoBatches, normalizeUUID
-from twisted.internet.defer import succeed, inlineCallbacks, returnValue
-from twisted.internet.threads import deferToThread
-from twext.python.log import Logger
-from txweb2.http import HTTPError, StatusResponse
-from txweb2 import responsecode
-
-
-
-class LdapDirectoryService(CachingDirectoryService):
-    """
-    LDAP based implementation of L{IDirectoryService}.
-    """
-    log = Logger()
-
-    baseGUID = "5A871574-0C86-44EE-B11B-B9440C3DC4DD"
-
-    def __repr__(self):
-        return "<%s %r: %r>" % (
-            self.__class__.__name__, self.realmName, self.uri
-        )
-
-
-    def __init__(self, params):
-        """
-        @param params: a dictionary containing the following keys:
-            cacheTimeout, realmName, uri, tls, tlsCACertFile, tlsCACertDir,
-            tlsRequireCert, credentials, rdnSchema, groupSchema, resourceSchema
-            poddingSchema
-        """
-
-        defaults = {
-            "augmentService": None,
-            "groupMembershipCache": None,
-            "cacheTimeout": 1,  # Minutes
-            "negativeCaching": False,
-            "warningThresholdSeconds": 3,
-            "batchSize": 500,  # for splitting up large queries
-            "requestTimeoutSeconds": 10,
-            "requestResultsLimit": 200,
-            "optimizeMultiName": False,
-            "queryLocationsImplicitly": True,
-            "restrictEnabledRecords": False,
-            "restrictToGroup": "",
-            "recordTypes": ("users", "groups"),
-            "uri": "ldap://localhost/",
-            "tls": False,
-            "tlsCACertFile": None,
-            "tlsCACertDir": None,
-            "tlsRequireCert": None,  # never, allow, try, demand, hard
-            "credentials": {
-                "dn": None,
-                "password": None,
-            },
-            "authMethod": "LDAP",
-            "rdnSchema": {
-                "base": "dc=example,dc=com",
-                "guidAttr": "entryUUID",
-                "users": {
-                    "rdn": "ou=People",
-                    "filter": None,  # additional filter for this type
-                    "loginEnabledAttr": "",  # attribute controlling login
-                    "loginEnabledValue": "yes",  # "True" value of above attribute
-                    "calendarEnabledAttr": "",  # attribute controlling enabledForCalendaring
-                    "calendarEnabledValue": "yes",  # "True" value of above attribute
-                    "mapping": {  # maps internal record names to LDAP
-                        "recordName": "uid",
-                        "fullName": "cn",
-                        "emailAddresses": ["mail"],  # multiple LDAP fields supported
-                        "firstName": "givenName",
-                        "lastName": "sn",
-                    },
-                },
-                "groups": {
-                    "rdn": "ou=Group",
-                    "filter": None,  # additional filter for this type
-                    "mapping": {  # maps internal record names to LDAP
-                        "recordName": "cn",
-                        "fullName": "cn",
-                        "emailAddresses": ["mail"],  # multiple LDAP fields supported
-                        "firstName": "givenName",
-                        "lastName": "sn",
-                    },
-                },
-                "locations": {
-                    "rdn": "ou=Places",
-                    "filter": None,  # additional filter for this type
-                    "calendarEnabledAttr": "",  # attribute controlling enabledForCalendaring
-                    "calendarEnabledValue": "yes",  # "True" value of above attribute
-                    "associatedAddressAttr": "",
-                    "mapping": {  # maps internal record names to LDAP
-                        "recordName": "cn",
-                        "fullName": "cn",
-                        "emailAddresses": ["mail"],  # multiple LDAP fields supported
-                    },
-                },
-                "resources": {
-                    "rdn": "ou=Resources",
-                    "filter": None,  # additional filter for this type
-                    "calendarEnabledAttr": "",  # attribute controlling enabledForCalendaring
-                    "calendarEnabledValue": "yes",  # "True" value of above attribute
-                    "mapping": {  # maps internal record names to LDAP
-                        "recordName": "cn",
-                        "fullName": "cn",
-                        "emailAddresses": ["mail"],  # multiple LDAP fields supported
-                    },
-                },
-                "addresses": {
-                    "rdn": "ou=Buildings",
-                    "filter": None,  # additional filter for this type
-                    "streetAddressAttr": "",
-                    "geoAttr": "",
-                    "mapping": {  # maps internal record names to LDAP
-                        "recordName": "cn",
-                        "fullName": "cn",
-                    },
-                },
-            },
-            "groupSchema": {
-                "membersAttr": "member",  # how members are specified
-                "nestedGroupsAttr": None,  # how nested groups are specified
-                "memberIdAttr": None,  # which attribute the above refer to (None means use DN)
-            },
-            "resourceSchema": {
-                # Either set this attribute to retrieve the plist version
-                # of resource-info, as in a Leopard OD server, or...
-                "resourceInfoAttr": None,
-                # ...set the above to None and instead specify these
-                # individually:
-                "autoScheduleAttr": None,
-                "autoScheduleEnabledValue": "yes",
-                "proxyAttr": None,  # list of GUIDs
-                "readOnlyProxyAttr": None,  # list of GUIDs
-                "autoAcceptGroupAttr": None,  # single group GUID
-            },
-            "poddingSchema": {
-                "serverIdAttr": None,  # maps to augments server-id
-            },
-        }
-        ignored = None
-        params = self.getParams(params, defaults, ignored)
-
-        self._recordTypes = params["recordTypes"]
-
-        super(LdapDirectoryService, self).__init__(params["cacheTimeout"],
-                                                   params["negativeCaching"])
-
-        self.warningThresholdSeconds = params["warningThresholdSeconds"]
-        self.batchSize = params["batchSize"]
-        self.requestTimeoutSeconds = params["requestTimeoutSeconds"]
-        self.requestResultsLimit = params["requestResultsLimit"]
-        self.optimizeMultiName = params["optimizeMultiName"]
-        if self.batchSize > self.requestResultsLimit:
-            self.batchSize = self.requestResultsLimit
-        self.queryLocationsImplicitly = params["queryLocationsImplicitly"]
-        self.augmentService = params["augmentService"]
-        self.groupMembershipCache = params["groupMembershipCache"]
-        self.realmName = params["uri"]
-        self.uri = params["uri"]
-        self.tls = params["tls"]
-        self.tlsCACertFile = params["tlsCACertFile"]
-        self.tlsCACertDir = params["tlsCACertDir"]
-        self.tlsRequireCert = params["tlsRequireCert"]
-        self.credentials = params["credentials"]
-        self.authMethod = params["authMethod"]
-        self.rdnSchema = params["rdnSchema"]
-        self.groupSchema = params["groupSchema"]
-        self.resourceSchema = params["resourceSchema"]
-        self.poddingSchema = params["poddingSchema"]
-
-        self.base = ldap.dn.str2dn(self.rdnSchema["base"])
-
-        # Certain attributes (such as entryUUID) may be hidden and not
-        # returned by default when queried for all attributes. Therefore it is
-        # necessary to explicitly pass all the possible attributes list
-        # for ldap searches.  Dynamically build the attribute list based on
-        # config.
-        attrSet = set()
-
-        if self.rdnSchema["guidAttr"]:
-            attrSet.add(self.rdnSchema["guidAttr"])
-        for recordType in self.recordTypes():
-            if self.rdnSchema[recordType]["attr"]:
-                attrSet.add(self.rdnSchema[recordType]["attr"])
-            for n in ("calendarEnabledAttr", "associatedAddressAttr",
-                      "streetAddressAttr", "geoAttr"):
-                if self.rdnSchema[recordType].get(n, False):
-                    attrSet.add(self.rdnSchema[recordType][n])
-            for attrList in self.rdnSchema[recordType]["mapping"].values():
-                if attrList:
-                    # Since emailAddresses can map to multiple LDAP fields,
-                    # support either string or list
-                    if isinstance(attrList, str):
-                        attrList = [attrList]
-                    for attr in attrList:
-                        attrSet.add(attr)
-            # Also put the guidAttr attribute into the mappings for each type
-            # so recordsMatchingFields can query on guid
-            self.rdnSchema[recordType]["mapping"]["guid"] = self.rdnSchema["guidAttr"]
-            # Also put the memberIdAttr attribute into the mappings for each type
-            # so recordsMatchingFields can query on memberIdAttr
-            self.rdnSchema[recordType]["mapping"]["memberIdAttr"] = self.groupSchema["memberIdAttr"]
-        if self.groupSchema["membersAttr"]:
-            attrSet.add(self.groupSchema["membersAttr"])
-        if self.groupSchema["nestedGroupsAttr"]:
-            attrSet.add(self.groupSchema["nestedGroupsAttr"])
-        if self.groupSchema["memberIdAttr"]:
-            attrSet.add(self.groupSchema["memberIdAttr"])
-        if self.rdnSchema["users"]["loginEnabledAttr"]:
-            attrSet.add(self.rdnSchema["users"]["loginEnabledAttr"])
-        if self.resourceSchema["resourceInfoAttr"]:
-            attrSet.add(self.resourceSchema["resourceInfoAttr"])
-        if self.resourceSchema["autoScheduleAttr"]:
-            attrSet.add(self.resourceSchema["autoScheduleAttr"])
-        if self.resourceSchema["autoAcceptGroupAttr"]:
-            attrSet.add(self.resourceSchema["autoAcceptGroupAttr"])
-        if self.resourceSchema["proxyAttr"]:
-            attrSet.add(self.resourceSchema["proxyAttr"])
-        if self.resourceSchema["readOnlyProxyAttr"]:
-            attrSet.add(self.resourceSchema["readOnlyProxyAttr"])
-        if self.poddingSchema["serverIdAttr"]:
-            attrSet.add(self.poddingSchema["serverIdAttr"])
-        self.attrlist = list(attrSet)
-
-        self.typeDNs = {}
-        for recordType in self.recordTypes():
-            self.typeDNs[recordType] = ldap.dn.str2dn(
-                self.rdnSchema[recordType]["rdn"].lower()
-            ) + self.base
-
-        self.ldap = None
-
-        # Separate LDAP connection used solely for authenticating clients
-        self.authLDAP = None
-
-        # Restricting access by directory group
-        self.restrictEnabledRecords = params['restrictEnabledRecords']
-        self.restrictToGroup = params['restrictToGroup']
-        self.restrictedTimestamp = 0
-
-
-    def recordTypes(self):
-        return self._recordTypes
-
-
-    def listRecords(self, recordType):
-
-        # Build base for this record Type
-        base = self.typeDNs[recordType]
-
-        # Build filter
-        filterstr = "(!(objectClass=organizationalUnit))"
-        typeFilter = self.rdnSchema[recordType].get("filter", "")
-        if typeFilter:
-            filterstr = "(&%s%s)" % (filterstr, typeFilter)
-
-        # Query the LDAP server
-        self.log.debug(
-            "Querying ldap for records matching base {base} and "
-            "filter {filter} for attributes {attrs}.",
-            base=ldap.dn.dn2str(base), filter=filterstr,
-            attrs=self.attrlist
-        )
-
-        # This takes a while, so if you don't want to have a "long request"
-        # warning logged, use this instead of timedSearch:
-        # results = self.ldap.search_s(ldap.dn.dn2str(base),
-        #     ldap.SCOPE_SUBTREE, filterstr=filterstr, attrlist=self.attrlist)
-        results = self.timedSearch(
-            ldap.dn.dn2str(base), ldap.SCOPE_SUBTREE,
-            filterstr=filterstr, attrlist=self.attrlist
-        )
-
-        records = []
-        numMissingGuids = 0
-        guidAttr = self.rdnSchema["guidAttr"]
-        for dn, attrs in results:
-            dn = normalizeDNstr(dn)
-
-            unrestricted = self.isAllowedByRestrictToGroup(dn, attrs)
-
-            try:
-                record = self._ldapResultToRecord(dn, attrs, recordType)
-                # self.log.debug("Got LDAP record {record}", record=record)
-            except MissingGuidException:
-                numMissingGuids += 1
-                continue
-
-            if not unrestricted:
-                self.log.debug(
-                    "{dn} is not enabled because it's not a member of group: "
-                    "{group}", dn=dn, group=self.restrictToGroup
-                )
-                record.enabledForCalendaring = False
-                record.enabledForAddressBooks = False
-
-            records.append(record)
-
-        if numMissingGuids:
-            self.log.info(
-                "{num} {recordType} records are missing {attr}",
-                num=numMissingGuids, recordType=recordType, attr=guidAttr
-            )
-
-        return records
-
-
-    @inlineCallbacks
-    def recordWithCachedGroupsAlias(self, recordType, alias):
-        """
-        @param recordType: the type of the record to look up.
-        @param alias: the cached-groups alias of the record to look up.
-        @type alias: C{str}
-
-        @return: a deferred L{IDirectoryRecord} with the given cached-groups
-            alias, or C{None} if no such record is found.
-        """
-        memberIdAttr = self.groupSchema["memberIdAttr"]
-        attributeToSearch = "memberIdAttr" if memberIdAttr else "dn"
-
-        fields = [[attributeToSearch, alias, False, "equals"]]
-        results = yield self.recordsMatchingFields(
-            fields, recordType=recordType
-        )
-        if results:
-            returnValue(results[0])
-        else:
-            returnValue(None)
-
-
-    def getExternalProxyAssignments(self):
-        """
-        Retrieve proxy assignments for locations and resources from the
-        directory and return a list of (principalUID, ([memberUIDs)) tuples,
-        suitable for passing to proxyDB.setGroupMembers( )
-        """
-        assignments = []
-
-        guidAttr = self.rdnSchema["guidAttr"]
-        readAttr = self.resourceSchema["readOnlyProxyAttr"]
-        writeAttr = self.resourceSchema["proxyAttr"]
-        if not (guidAttr and readAttr and writeAttr):
-            self.log.error(
-                "LDAP configuration requires guidAttr, proxyAttr, and "
-                "readOnlyProxyAttr in order to use external proxy assignments "
-                "efficiently; falling back to slower method"
-            )
-            # Fall back to the less-specialized version
-            return super(
-                LdapDirectoryService, self
-            ).getExternalProxyAssignments()
-
-        # Build filter
-        filterstr = "(|(%s=*)(%s=*))" % (readAttr, writeAttr)
-        # ...taking into account only calendar-enabled records
-        enabledAttr = self.rdnSchema["locations"]["calendarEnabledAttr"]
-        enabledValue = self.rdnSchema["locations"]["calendarEnabledValue"]
-        if enabledAttr and enabledValue:
-            filterstr = "(&(%s=%s)%s)" % (enabledAttr, enabledValue, filterstr)
-
-        attrlist = [guidAttr, readAttr, writeAttr]
-
-        # Query the LDAP server
-        self.log.debug(
-            "Querying ldap for records matching base {base} and filter "
-            "{filter} for attributes {attrs}.",
-            base=ldap.dn.dn2str(self.base), filter=filterstr,
-            attrs=attrlist
-        )
-
-        results = self.timedSearch(ldap.dn.dn2str(self.base),
-                                   ldap.SCOPE_SUBTREE, filterstr=filterstr,
-                                   attrlist=attrlist)
-
-        for dn, attrs in results:
-            dn = normalizeDNstr(dn)
-            guid = self._getUniqueLdapAttribute(attrs, guidAttr)
-            if guid:
-                guid = normalizeUUID(guid)
-                readDelegate = self._getUniqueLdapAttribute(attrs, readAttr)
-                if readDelegate:
-                    readDelegate = normalizeUUID(readDelegate)
-                    assignments.append(
-                        ("%s#calendar-proxy-read" % (guid,), [readDelegate])
-                    )
-                writeDelegate = self._getUniqueLdapAttribute(attrs, writeAttr)
-                if writeDelegate:
-                    writeDelegate = normalizeUUID(writeDelegate)
-                    assignments.append(
-                        ("%s#calendar-proxy-write" % (guid,), [writeDelegate])
-                    )
-
-        return assignments
-
-
-    def getLDAPConnection(self):
-        if self.ldap is None:
-            self.log.info("Connecting to LDAP {uri}", uri=repr(self.uri))
-            self.ldap = self.createLDAPConnection()
-            self.log.info(
-                "Connection established to LDAP {uri}", uri=repr(self.uri)
-            )
-            if self.credentials.get("dn", ""):
-                try:
-                    self.log.info(
-                        "Binding to LDAP {dn}",
-                        dn=repr(self.credentials.get("dn"))
-                    )
-                    self.ldap.simple_bind_s(
-                        self.credentials.get("dn"),
-                        self.credentials.get("password"),
-                    )
-                    self.log.info(
-                        "Successfully authenticated with LDAP as {dn}",
-                        dn=repr(self.credentials.get("dn"))
-                    )
-                except ldap.INVALID_CREDENTIALS:
-                    self.log.error(
-                        "Can't bind to LDAP {uri}: check credentials",
-                        uri=self.uri
-                    )
-                    raise DirectoryConfigurationError()
-
-        return self.ldap
-
-
-    def createLDAPConnection(self):
-        """
-        Create and configure LDAP connection
-        """
-        cxn = ldap.initialize(self.uri)
-
-        if self.tlsCACertFile:
-            cxn.set_option(ldap.OPT_X_TLS_CACERTFILE, self.tlsCACertFile)
-        if self.tlsCACertDir:
-            cxn.set_option(ldap.OPT_X_TLS_CACERTDIR, self.tlsCACertDir)
-
-        if self.tlsRequireCert == "never":
-            cxn.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_NEVER)
-        elif self.tlsRequireCert == "allow":
-            cxn.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_ALLOW)
-        elif self.tlsRequireCert == "try":
-            cxn.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_TRY)
-        elif self.tlsRequireCert == "demand":
-            cxn.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_DEMAND)
-        elif self.tlsRequireCert == "hard":
-            cxn.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_HARD)
-
-        if self.tls:
-            cxn.start_tls_s()
-
-        return cxn
-
-
-    def authenticate(self, dn, password):
-        """
-        Perform simple bind auth, raising ldap.INVALID_CREDENTIALS if
-        bad password
-        """
-        TRIES = 3
-
-        for _ignore_i in xrange(TRIES):
-            self.log.debug("Authenticating {dn}", dn=dn)
-
-            if self.authLDAP is None:
-                self.log.debug("Creating authentication connection to LDAP")
-                self.authLDAP = self.createLDAPConnection()
-
-            try:
-                startTime = time.time()
-                self.authLDAP.simple_bind_s(dn, password)
-                # Getting here means success, so break the retry loop
-                break
-
-            except ldap.INAPPROPRIATE_AUTH:
-                # Seen when using an empty password, treat as invalid creds
-                raise ldap.INVALID_CREDENTIALS()
-
-            except ldap.NO_SUCH_OBJECT:
-                self.log.error(
-                    "LDAP Authentication error for {dn}: NO_SUCH_OBJECT",
-                    dn=dn
-                )
-                # fall through to try again; could be transient
-
-            except ldap.INVALID_CREDENTIALS:
-                raise
-
-            except ldap.SERVER_DOWN:
-                self.log.error("Lost connection to LDAP server.")
-                self.authLDAP = None
-                # Fall through and retry if TRIES has been reached
-
-            except Exception, e:
-                self.log.error(
-                    "LDAP authentication failed with {e}.", e=e
-                )
-                raise
-
-            finally:
-                totalTime = time.time() - startTime
-                if totalTime > self.warningThresholdSeconds:
-                    self.log.error(
-                        "LDAP auth exceeded threshold: {time:.2f} seconds for "
-                        "{dn}", time=totalTime, dn=dn
-                    )
-
-        else:
-            self.log.error(
-                "Giving up on LDAP authentication after {count:d} tries.  "
-                "Responding with 503.", count=TRIES
-            )
-            raise HTTPError(StatusResponse(
-                responsecode.SERVICE_UNAVAILABLE, "LDAP server unavailable"
-            ))
-
-        self.log.debug("Authentication succeeded for {dn}", dn=dn)
-
-
-    def timedSearch(
-        self, base, scope, filterstr="(objectClass=*)", attrlist=None,
-        timeoutSeconds=-1, resultLimit=0
-    ):
-        """
-        Execute an LDAP query, retrying up to 3 times in case the LDAP server
-        has gone down and we need to reconnect. If it takes longer than the
-        configured threshold, emit a log error.
-        The number of records requested is controlled by resultLimit (0=no
-        limit).
-        If timeoutSeconds is not -1, the query will abort after the specified
-        number of seconds and the results retrieved so far are returned.
-        """
-        TRIES = 3
-
-        for i in xrange(TRIES):
-            try:
-                s = ldap.async.List(self.getLDAPConnection())
-                s.startSearch(
-                    base, scope, filterstr, attrList=attrlist,
-                    timeout=timeoutSeconds, sizelimit=resultLimit
-                )
-                startTime = time.time()
-                s.processResults()
-            except ldap.NO_SUCH_OBJECT:
-                return []
-            except ldap.FILTER_ERROR, e:
-                self.log.error(
-                    "LDAP filter error: {e} {filter}", e=e, filter=filterstr
-                )
-                return []
-            except ldap.SIZELIMIT_EXCEEDED, e:
-                self.log.debug(
-                    "LDAP result limit exceeded: {limit:d}", limit=resultLimit
-                )
-            except ldap.TIMELIMIT_EXCEEDED, e:
-                self.log.warn(
-                    "LDAP timeout exceeded: {t:d} seconds", t=timeoutSeconds
-                )
-            except ldap.SERVER_DOWN:
-                self.ldap = None
-                self.log.error(
-                    "LDAP server unavailable (tried {count:d} times)",
-                    count=(i + 1)
-                )
-                continue
-
-            # change format, ignoring resultsType
-            result = [
-                resultItem for _ignore_resultType, resultItem in s.allResults
-            ]
-
-            totalTime = time.time() - startTime
-            if totalTime > self.warningThresholdSeconds:
-                if filterstr and len(filterstr) > 100:
-                    filterstr = "%s..." % (filterstr[:100],)
-                self.log.error(
-                    "LDAP query exceeded threshold: {time:.2f} seconds for "
-                    "{base} {filter} {attrs} (#results={count:d})",
-                    time=totalTime, base=base, filter=filterstr,
-                    attrs=attrlist, count=len(result),
-                )
-            return result
-
-        raise HTTPError(StatusResponse(
-            responsecode.SERVICE_UNAVAILABLE, "LDAP server unavailable"
-        ))
-
-
-    def isAllowedByRestrictToGroup(self, dn, attrs):
-        """
-        Check to see if the principal with the given DN and LDAP attributes is
-        a member of the restrictToGroup.
-
-        @param dn: an LDAP dn
-        @type dn: C{str}
-        @param attrs: LDAP attributes
-        @type attrs: C{dict}
-        @return: True if principal is in the group (or restrictEnabledRecords if turned off).
-        @rtype: C{boolean}
-        """
-        if not self.restrictEnabledRecords:
-            return True
-        if self.groupSchema["memberIdAttr"]:
-            value = self._getUniqueLdapAttribute(
-                attrs, self.groupSchema["memberIdAttr"]
-            )
-        else:  # No memberIdAttr implies DN
-            value = dn
-        return value in self.restrictedPrincipals
-
-
-    @property
-    def restrictedPrincipals(self):
-        """
-        Look up (and cache) the set of guids that are members of the
-        restrictToGroup.  If restrictToGroup is not set, return None to
-        indicate there are no group restrictions.
-        """
-        if self.restrictEnabledRecords:
-
-            if time.time() - self.restrictedTimestamp > self.cacheTimeout:
-                # fault in the members of group of name self.restrictToGroup
-                recordType = self.recordType_groups
-                base = self.typeDNs[recordType]
-                # TODO: This shouldn't be hardcoded to cn
-                filterstr = "(cn=%s)" % (self.restrictToGroup,)
-                self.log.debug(
-                    "Retrieving ldap record with base {base} and filter "
-                    "{filter}.",
-                    base=ldap.dn.dn2str(base), filter=filterstr
-                )
-                result = self.timedSearch(
-                    ldap.dn.dn2str(base),
-                    ldap.SCOPE_SUBTREE,
-                    filterstr=filterstr,
-                    attrlist=self.attrlist
-                )
-
-                members = []
-                nestedGroups = []
-
-                if len(result) == 1:
-                    dn, attrs = result[0]
-                    dn = normalizeDNstr(dn)
-                    if self.groupSchema["membersAttr"]:
-                        members = self._getMultipleLdapAttributes(
-                            attrs,
-                            self.groupSchema["membersAttr"]
-                        )
-                        if not self.groupSchema["memberIdAttr"]:  # DNs
-                            members = [normalizeDNstr(m) for m in members]
-                        members = set(members)
-
-                    if self.groupSchema["nestedGroupsAttr"]:
-                        nestedGroups = self._getMultipleLdapAttributes(
-                            attrs,
-                            self.groupSchema["nestedGroupsAttr"]
-                        )
-                        if not self.groupSchema["memberIdAttr"]:  # DNs
-                            nestedGroups = [
-                                normalizeDNstr(g) for g in nestedGroups
-                            ]
-                        nestedGroups = set(nestedGroups)
-                    else:
-                        # Since all members are lumped into the same attribute,
-                        # treat them all as nestedGroups instead
-                        nestedGroups = members
-                        members = set()
-
-                self._cachedRestrictedPrincipals = set(
-                    self._expandGroupMembership(members, nestedGroups)
-                )
-                self.log.info(
-                    "Got {count} restricted group members",
-                    count=len(self._cachedRestrictedPrincipals)
-                )
-                self.restrictedTimestamp = time.time()
-            return self._cachedRestrictedPrincipals
-        else:
-            # No restrictions
-            return None
-
-
-    def _expandGroupMembership(self, members, nestedGroups, processedItems=None):
-        """
-        A generator which recursively yields principals which are included within nestedGroups
-
-        @param members:  If the LDAP service is configured to use different attributes to
-            indicate member users and member nested groups, members will include the non-groups.
-            Otherwise, members will be empty and only nestedGroups will be used.
-        @type members: C{set}
-        @param nestedGroups:  If the LDAP service is configured to use different attributes to
-            indicate member users and member nested groups, nestedGroups will include only
-            the groups; otherwise nestedGroups will include all members
-        @type members: C{set}
-        @param processedItems: The set of members that have already been looked up in LDAP
-            so the code doesn't have to look up the same member twice or get stuck in a
-            membership loop.
-        @type processedItems: C{set}
-        @return: All members of the group, the values will correspond to memberIdAttr
-            if memberIdAttr is set in the group schema, or DNs otherwise.
-        @rtype: generator of C{str}
-        """
-
-        if processedItems is None:
-            processedItems = set()
-
-        if isinstance(members, str):
-            members = [members]
-
-        if isinstance(nestedGroups, str):
-            nestedGroups = [nestedGroups]
-
-        for member in members:
-            if member not in processedItems:
-                processedItems.add(member)
-                yield member
-
-        for group in nestedGroups:
-            if group in processedItems:
-                continue
-
-            recordType = self.recordType_groups
-            base = self.typeDNs[recordType]
-            if self.groupSchema["memberIdAttr"]:
-                scope = ldap.SCOPE_SUBTREE
-                base = self.typeDNs[recordType]
-                filterstr = "(%s=%s)" % (self.groupSchema["memberIdAttr"], group)
-            else:  # Use DN
-                scope = ldap.SCOPE_BASE
-                base = ldap.dn.str2dn(group)
-                filterstr = "(objectClass=*)"
-
-            self.log.debug(
-                "Retrieving ldap record with base {base} and filter {filter}.",
-                base=ldap.dn.dn2str(base), filter=filterstr
-            )
-            result = self.timedSearch(ldap.dn.dn2str(base),
-                                      scope,
-                                      filterstr=filterstr,
-                                      attrlist=self.attrlist)
-
-            if len(result) == 0:
-                continue
-
-            subMembers = set()
-            subNestedGroups = set()
-            if len(result) == 1:
-                dn, attrs = result[0]
-                dn = normalizeDNstr(dn)
-                if self.groupSchema["membersAttr"]:
-                    subMembers = self._getMultipleLdapAttributes(
-                        attrs,
-                        self.groupSchema["membersAttr"]
-                    )
-                    if not self.groupSchema["memberIdAttr"]:  # these are DNs
-                        subMembers = [normalizeDNstr(m) for m in subMembers]
-                    subMembers = set(subMembers)
-
-                if self.groupSchema["nestedGroupsAttr"]:
-                    subNestedGroups = self._getMultipleLdapAttributes(
-                        attrs,
-                        self.groupSchema["nestedGroupsAttr"]
-                    )
-                    if not self.groupSchema["memberIdAttr"]:  # these are DNs
-                        subNestedGroups = [normalizeDNstr(g) for g in subNestedGroups]
-                    subNestedGroups = set(subNestedGroups)
-
-            processedItems.add(group)
-            yield group
-
-            for item in self._expandGroupMembership(subMembers,
-                                                    subNestedGroups,
-                                                    processedItems):
-                yield item
-
-
-    def _getUniqueLdapAttribute(self, attrs, *keys):
-        """
-        Get the first value for one or several attributes
-        Useful when attributes have aliases (e.g. sn vs. surname)
-        """
-        for key in keys:
-            values = attrs.get(key)
-            if values is not None:
-                return values[0]
-        return None
-
-
-    def _getMultipleLdapAttributes(self, attrs, *keys):
-        """
-        Get all values for one or several attributes
-        """
-        results = []
-        for key in keys:
-            if key:
-                values = attrs.get(key)
-                if values is not None:
-                    results += values
-        return results
-
-
-    def _ldapResultToRecord(self, dn, attrs, recordType):
-        """
-        Convert the attrs returned by a LDAP search into a LdapDirectoryRecord
-        object.
-
-        If guidAttr was specified in the config but is missing from attrs,
-        raises MissingGuidException
-        """
-
-        guid = None
-        authIDs = set()
-        fullName = None
-        firstName = ""
-        lastName = ""
-        emailAddresses = set()
-        enabledForCalendaring = None
-        enabledForAddressBooks = None
-        uid = None
-        enabledForLogin = True
-        extras = {}
-
-        shortNames = tuple(self._getMultipleLdapAttributes(attrs, self.rdnSchema[recordType]["mapping"]["recordName"]))
-        if not shortNames:
-            raise MissingRecordNameException()
-
-        # First check for and add guid
-        guidAttr = self.rdnSchema["guidAttr"]
-        if guidAttr:
-            guid = self._getUniqueLdapAttribute(attrs, guidAttr)
-            if not guid:
-                self.log.debug(
-                    "LDAP data for {shortNames} is missing guid attribute "
-                    "{attr}",
-                    shortNames=shortNames, attr=guidAttr
-                )
-                raise MissingGuidException()
-            guid = normalizeUUID(guid)
-
-        # Find or build email
-        # (The emailAddresses mapping is a list of ldap fields)
-        emailAddressesMappedTo = self.rdnSchema[recordType]["mapping"].get("emailAddresses", "")
-        # Supporting either string or list for emailAddresses:
-        if isinstance(emailAddressesMappedTo, str):
-            emailAddresses = set(self._getMultipleLdapAttributes(attrs, self.rdnSchema[recordType]["mapping"].get("emailAddresses", "")))
-        else:
-            emailAddresses = set(self._getMultipleLdapAttributes(attrs, *self.rdnSchema[recordType]["mapping"]["emailAddresses"]))
-        emailSuffix = self.rdnSchema[recordType].get("emailSuffix", None)
-
-        if len(emailAddresses) == 0 and emailSuffix:
-            emailPrefix = self._getUniqueLdapAttribute(
-                attrs,
-                self.rdnSchema[recordType].get("attr", "cn")
-            )
-            emailAddresses.add(emailPrefix + emailSuffix)
-
-        proxyGUIDs = ()
-        readOnlyProxyGUIDs = ()
-        autoSchedule = False
-        autoAcceptGroup = ""
-        memberGUIDs = []
-
-        # LDAP attribute -> principal matchings
-        if recordType == self.recordType_users:
-            fullName = self._getUniqueLdapAttribute(attrs, self.rdnSchema[recordType]["mapping"]["fullName"])
-            firstName = self._getUniqueLdapAttribute(attrs, self.rdnSchema[recordType]["mapping"]["firstName"])
-            lastName = self._getUniqueLdapAttribute(attrs, self.rdnSchema[recordType]["mapping"]["lastName"])
-            enabledForCalendaring = True
-            enabledForAddressBooks = True
-
-        elif recordType == self.recordType_groups:
-            fullName = self._getUniqueLdapAttribute(attrs, self.rdnSchema[recordType]["mapping"]["fullName"])
-            enabledForCalendaring = False
-            enabledForAddressBooks = False
-            enabledForLogin = False
-
-            if self.groupSchema["membersAttr"]:
-                members = self._getMultipleLdapAttributes(attrs, self.groupSchema["membersAttr"])
-                memberGUIDs.extend(members)
-            if self.groupSchema["nestedGroupsAttr"]:
-                members = self._getMultipleLdapAttributes(attrs, self.groupSchema["nestedGroupsAttr"])
-                memberGUIDs.extend(members)
-
-            # Normalize members if they're in DN form
-            if not self.groupSchema["memberIdAttr"]:  # empty = dn
-                guids = list(memberGUIDs)
-                memberGUIDs = []
-                for dnStr in guids:
-                    try:
-                        dnStr = normalizeDNstr(dnStr)
-                        memberGUIDs.append(dnStr)
-                    except Exception, e:
-                        # LDAP returned an illegal DN value, log and ignore it
-                        self.log.warn("Bad LDAP DN: {dn!r}", dn=dnStr)
-
-        elif recordType in (self.recordType_resources,
-                            self.recordType_locations):
-            fullName = self._getUniqueLdapAttribute(attrs, self.rdnSchema[recordType]["mapping"]["fullName"])
-            enabledForCalendaring = True
-            enabledForAddressBooks = False
-            enabledForLogin = False
-            if self.resourceSchema["resourceInfoAttr"]:
-                resourceInfo = self._getUniqueLdapAttribute(
-                    attrs,
-                    self.resourceSchema["resourceInfoAttr"]
-                )
-                if resourceInfo:
-                    try:
-                        (
-                            autoSchedule,
-                            proxy,
-                            readOnlyProxy,
-                            autoAcceptGroup
-                        ) = self.parseResourceInfo(
-                            resourceInfo,
-                            guid,
-                            recordType,
-                            shortNames[0]
-                        )
-                        if proxy:
-                            proxyGUIDs = (proxy,)
-                        if readOnlyProxy:
-                            readOnlyProxyGUIDs = (readOnlyProxy,)
-                    except ValueError, e:
-                        self.log.error(
-                            "Unable to parse resource info: {e}", e=e
-                        )
-            else:  # the individual resource attributes might be specified
-                if self.resourceSchema["autoScheduleAttr"]:
-                    autoScheduleValue = self._getUniqueLdapAttribute(
-                        attrs,
-                        self.resourceSchema["autoScheduleAttr"]
-                    )
-                    autoSchedule = (
-                        autoScheduleValue == self.resourceSchema["autoScheduleEnabledValue"]
-                    )
-                if self.resourceSchema["proxyAttr"]:
-                    proxyGUIDs = set(
-                        self._getMultipleLdapAttributes(
-                            attrs,
-                            self.resourceSchema["proxyAttr"]
-                        )
-                    )
-                if self.resourceSchema["readOnlyProxyAttr"]:
-                    readOnlyProxyGUIDs = set(
-                        self._getMultipleLdapAttributes(
-                            attrs,
-                            self.resourceSchema["readOnlyProxyAttr"]
-                        )
-                    )
-                if self.resourceSchema["autoAcceptGroupAttr"]:
-                    autoAcceptGroup = self._getUniqueLdapAttribute(
-                        attrs,
-                        self.resourceSchema["autoAcceptGroupAttr"]
-                    )
-
-            if recordType == self.recordType_locations:
-                if self.rdnSchema[recordType].get("associatedAddressAttr", ""):
-                    associatedAddress = self._getUniqueLdapAttribute(
-                        attrs,
-                        self.rdnSchema[recordType]["associatedAddressAttr"]
-                    )
-                    if associatedAddress:
-                        extras["associatedAddress"] = associatedAddress
-
-        elif recordType == self.recordType_addresses:
-            if self.rdnSchema[recordType].get("geoAttr", ""):
-                geo = self._getUniqueLdapAttribute(
-                    attrs,
-                    self.rdnSchema[recordType]["geoAttr"]
-                )
-                if geo:
-                    extras["geo"] = geo
-            if self.rdnSchema[recordType].get("streetAddressAttr", ""):
-                street = self._getUniqueLdapAttribute(
-                    attrs,
-                    self.rdnSchema[recordType]["streetAddressAttr"]
-                )
-                if street:
-                    extras["streetAddress"] = street
-
-        serverID = None
-        if self.poddingSchema["serverIdAttr"]:
-            serverID = self._getUniqueLdapAttribute(
-                attrs,
-                self.poddingSchema["serverIdAttr"]
-            )
-
-        record = LdapDirectoryRecord(
-            service=self,
-            recordType=recordType,
-            guid=guid,
-            shortNames=shortNames,
-            authIDs=authIDs,
-            fullName=fullName,
-            firstName=firstName,
-            lastName=lastName,
-            emailAddresses=emailAddresses,
-            uid=uid,
-            dn=dn,
-            memberGUIDs=memberGUIDs,
-            extProxies=proxyGUIDs,
-            extReadOnlyProxies=readOnlyProxyGUIDs,
-            attrs=attrs,
-            **extras
-        )
-
-        if self.augmentService is not None:
-            # Look up augment information
-            # TODO: this needs to be deferred but for now we hard code
-            # the deferred result because we know it is completing
-            # immediately.
-            d = self.augmentService.getAugmentRecord(record.guid, recordType)
-            d.addCallback(lambda x: record.addAugmentInformation(x))
-
-        else:
-            # Generate augment record based on information retrieved from LDAP
-            augmentRecord = AugmentRecord(
-                guid,
-                enabled=True,
-                serverID=serverID,
-                enabledForCalendaring=enabledForCalendaring,
-                autoSchedule=autoSchedule,
-                autoAcceptGroup=autoAcceptGroup,
-                enabledForAddressBooks=enabledForAddressBooks,  # TODO: add to LDAP?
-                enabledForLogin=enabledForLogin,
-            )
-            record.addAugmentInformation(augmentRecord)
-
-        # Override with LDAP login control if attribute specified
-        if recordType == self.recordType_users:
-            loginEnabledAttr = self.rdnSchema[recordType]["loginEnabledAttr"]
-            if loginEnabledAttr:
-                loginEnabledValue = self.rdnSchema[recordType]["loginEnabledValue"]
-                record.enabledForLogin = self._getUniqueLdapAttribute(
-                    attrs, loginEnabledAttr
-                ) == loginEnabledValue
-
-        # Override with LDAP calendar-enabled control if attribute specified
-        calendarEnabledAttr = self.rdnSchema[recordType].get("calendarEnabledAttr", "")
-        if calendarEnabledAttr:
-            calendarEnabledValue = self.rdnSchema[recordType]["calendarEnabledValue"]
-            record.enabledForCalendaring = self._getUniqueLdapAttribute(
-                attrs,
-                calendarEnabledAttr
-            ) == calendarEnabledValue
-
-        return record
-
-
-    def queryDirectory(
-        self, recordTypes, indexType, indexKey, queryMethod=None
-    ):
-        """
-        Queries the LDAP directory for the record which has an attribute value
-        matching the indexType and indexKey parameters.
-
-        recordTypes is a list of record types to limit the search to.
-        indexType specifies one of the CachingDirectoryService constants
-            identifying which attribute to search on.
-        indexKey is the value to search for.
-
-        Nothing is returned -- the resulting record (if any) is placed in
-        the cache.
-        """
-
-        if queryMethod is None:
-            queryMethod = self.timedSearch
-
-        self.log.debug(
-            "LDAP query for types {types}, indexType {indexType} and "
-            "indexKey {indexKey}",
-            types=recordTypes, indexType=indexType, indexKey=indexKey
-        )
-
-        guidAttr = self.rdnSchema["guidAttr"]
-        for recordType in recordTypes:
-            # Build base for this record Type
-            base = self.typeDNs[recordType]
-
-            # Build filter
-            filterstr = "(!(objectClass=organizationalUnit))"
-            typeFilter = self.rdnSchema[recordType].get("filter", "")
-            if typeFilter:
-                filterstr = "(&%s%s)" % (filterstr, typeFilter)
-
-            if indexType == self.INDEX_TYPE_GUID:
-                # Query on guid only works if guid attribute has been defined.
-                # Support for query on guid even if is auto-generated should
-                # be added.
-                if not guidAttr:
-                    return
-                filterstr = "(&%s(%s=%s))" % (filterstr, guidAttr, indexKey)
-
-            elif indexType == self.INDEX_TYPE_SHORTNAME:
-                filterstr = "(&%s(%s=%s))" % (
-                    filterstr,
-                    self.rdnSchema[recordType]["mapping"]["recordName"],
-                    ldapEsc(indexKey)
-                )
-
-            elif indexType == self.INDEX_TYPE_CUA:
-                # indexKey is of the form "mailto:test at example.net"
-                email = indexKey[7:]  # strip "mailto:"
-                emailSuffix = self.rdnSchema[recordType].get(
-                    "emailSuffix", None
-                )
-                if (
-                    emailSuffix is not None and
-                    email.partition("@")[2] == emailSuffix
-                ):
-                    filterstr = "(&%s(|(&(!(mail=*))(%s=%s))(mail=%s)))" % (
-                        filterstr,
-                        self.rdnSchema[recordType].get("attr", "cn"),
-                        email.partition("@")[0],
-                        ldapEsc(email)
-                    )
-                else:
-                    # emailAddresses can map to multiple LDAP fields
-                    ldapFields = self.rdnSchema[recordType]["mapping"].get(
-                        "emailAddresses", ""
-                    )
-                    if isinstance(ldapFields, str):
-                        if ldapFields:
-                            subfilter = (
-                                "(%s=%s)" % (ldapFields, ldapEsc(email))
-                            )
-                        else:
-                            # No LDAP attribute assigned for emailAddresses
-                            continue
-
-                    else:
-                        subfilter = []
-                        for ldapField in ldapFields:
-                            if ldapField:
-                                subfilter.append(
-                                    "(%s=%s)" % (ldapField, ldapEsc(email))
-                                )
-                        if not subfilter:
-                            # No LDAP attribute assigned for emailAddresses
-                            continue
-
-                        subfilter = "(|%s)" % ("".join(subfilter))
-                    filterstr = "(&%s%s)" % (filterstr, subfilter)
-
-            elif indexType == self.INDEX_TYPE_AUTHID:
-                return
-
-            # Query the LDAP server
-            self.log.debug(
-                "Retrieving ldap record with base %s and filter %s.",
-                base=ldap.dn.dn2str(base), filter=filterstr,
-            )
-            result = queryMethod(
-                ldap.dn.dn2str(base),
-                ldap.SCOPE_SUBTREE,
-                filterstr=filterstr,
-                attrlist=self.attrlist,
-            )
-
-            if result:
-                dn, attrs = result.pop()
-                dn = normalizeDNstr(dn)
-
-                unrestricted = self.isAllowedByRestrictToGroup(dn, attrs)
-
-                try:
-                    record = self._ldapResultToRecord(dn, attrs, recordType)
-                    self.log.debug("Got LDAP record {rec}", rec=record)
-
-                    if not unrestricted:
-                        self.log.debug(
-                            "{dn} is not enabled because it's not a member of "
-                            "group {group!r}",
-                            dn=dn, group=self.restrictToGroup
-                        )
-                        record.enabledForCalendaring = False
-                        record.enabledForAddressBooks = False
-
-                    record.applySACLs()
-
-                    self.recordCacheForType(recordType).addRecord(
-                        record, indexType, indexKey
-                    )
-
-                    # We got a match, so don't bother checking other types
-                    break
-
-                except MissingRecordNameException:
-                    self.log.warn(
-                        "Ignoring record missing record name "
-                        "attribute: recordType {recordType}, indexType "
-                        "{indexType} and indexKey {indexKey}",
-                        recordTypes=recordTypes, indexType=indexType,
-                        indexKey=indexKey,
-                    )
-
-                except MissingGuidException:
-                    self.log.warn(
-                        "Ignoring record missing guid attribute: "
-                        "recordType {recordType}, indexType {indexType} and "
-                        "indexKey {indexKey}",
-                        recordTypes=recordTypes, indexType=indexType,
-                        indexKey=indexKey
-                    )
-
-
-    def recordsMatchingTokens(self, tokens, context=None, limitResults=50, timeoutSeconds=10):
-        """
-        # TODO: hook up limitResults to the client limit in the query
-
-        @param tokens: The tokens to search on
-        @type tokens: C{list} of C{str} (utf-8 bytes)
-        @param context: An indication of what the end user is searching
-            for; "attendee", "location", or None
-        @type context: C{str}
-        @return: a deferred sequence of L{IDirectoryRecord}s which
-            match the given tokens and optional context.
-
-        Each token is searched for within each record's full name and
-        email address; if each token is found within a record that
-        record is returned in the results.
-
-        If context is None, all record types are considered.  If
-        context is "location", only locations are considered.  If
-        context is "attendee", only users, groups, and resources
-        are considered.
-        """
-        self.log.debug(
-            "Peforming calendar user search for {tokens} ({context})",
-            tokens=tokens, context=context
-        )
-        startTime = time.time()
-        records = []
-        recordTypes = self.recordTypesForSearchContext(context)
-        recordTypes = [r for r in recordTypes if r in self.recordTypes()]
-
-        typeCounts = {}
-        for recordType in recordTypes:
-            if limitResults == 0:
-                self.log.debug("LDAP search aggregate limit reached")
-                break
-            typeCounts[recordType] = 0
-            base = self.typeDNs[recordType]
-            scope = ldap.SCOPE_SUBTREE
-            extraFilter = self.rdnSchema[recordType].get("filter", "")
-            filterstr = buildFilterFromTokens(
-                recordType,
-                self.rdnSchema[recordType]["mapping"],
-                tokens,
-                extra=extraFilter
-            )
-
-            if filterstr is not None:
-                # Query the LDAP server
-                self.log.debug(
-                    "LDAP search {base} {filter} (limit={limit:d})",
-                    base=ldap.dn.dn2str(base), filter=filterstr,
-                    limit=limitResults,
-                )
-                results = self.timedSearch(
-                    ldap.dn.dn2str(base),
-                    scope,
-                    filterstr=filterstr,
-                    attrlist=self.attrlist,
-                    timeoutSeconds=timeoutSeconds,
-                    resultLimit=limitResults
-                )
-                numMissingGuids = 0
-                numMissingRecordNames = 0
-                numNotEnabled = 0
-                for dn, attrs in results:
-                    dn = normalizeDNstr(dn)
-                    # Skip if group restriction is in place and guid is not
-                    # a member
-                    if (
-                            recordType != self.recordType_groups and
-                            not self.isAllowedByRestrictToGroup(dn, attrs)
-                    ):
-                        continue
-
-                    try:
-                        record = self._ldapResultToRecord(dn, attrs, recordType)
-
-                        # For non-group records, if not enabled for calendaring do
-                        # not include in principal property search results
-                        if (recordType != self.recordType_groups):
-                            if not record.enabledForCalendaring:
-                                numNotEnabled += 1
-                                continue
-
-                        records.append(record)
-                        typeCounts[recordType] += 1
-                        limitResults -= 1
-
-                    except MissingGuidException:
-                        numMissingGuids += 1
-
-                    except MissingRecordNameException:
-                        numMissingRecordNames += 1
-
-                self.log.debug(
-                    "LDAP search returned {resultCount:d} results, "
-                    "{typeCount:d} usable",
-                    resultCount=len(results), typeCount=typeCounts[recordType]
-                )
-
-        typeCountsStr = ", ".join(
-            ["%s:%d" % (rt, ct) for (rt, ct) in typeCounts.iteritems()]
-        )
-        totalTime = time.time() - startTime
-        self.log.info(
-            "Calendar user search for {tokens} matched {recordCount:d} "
-            "records ({typeCount}) in {time!.2f} seconds",
-            tokens=tokens, recordCount=len(records),
-            typeCount=typeCountsStr, time=totalTime,
-        )
-        return succeed(records)
-
-
-    @inlineCallbacks
-    def recordsMatchingFields(self, fields, operand="or", recordType=None):
-        """
-        Carries out the work of a principal-property-search against LDAP
-        Returns a deferred list of directory records.
-        """
-        records = []
-
-        self.log.debug(
-            "Performing principal property search for {fields}", fields=fields
-        )
-
-        if recordType is None:
-            # Make a copy since we're modifying it
-            recordTypes = list(self.recordTypes())
-
-            # principal-property-search syntax doesn't provide a way to ask
-            # for 3 of the 4 types (either all types or a single type).  This
-            # is wasteful in the case of iCal looking for event attendees
-            # since it always ignores the locations.  This config flag lets
-            # you skip querying for locations in this case:
-            if not self.queryLocationsImplicitly:
-                if self.recordType_locations in recordTypes:
-                    recordTypes.remove(self.recordType_locations)
-        else:
-            recordTypes = [recordType]
-
-        guidAttr = self.rdnSchema["guidAttr"]
-        for recordType in recordTypes:
-
-            base = self.typeDNs[recordType]
-
-            if fields[0][0] == "dn":
-                # DN's are not an attribute that can be searched on by filter
-                scope = ldap.SCOPE_BASE
-                filterstr = "(objectClass=*)"
-                base = ldap.dn.str2dn(fields[0][1])
-
-            else:
-                scope = ldap.SCOPE_SUBTREE
-                filterstr = buildFilter(
-                    recordType,
-                    self.rdnSchema[recordType]["mapping"],
-                    fields,
-                    operand=operand,
-                    optimizeMultiName=self.optimizeMultiName
-                )
-
-            if filterstr is not None:
-                # Query the LDAP server
-                self.log.debug(
-                    "LDAP search {base} {scope} {filter}",
-                    base=ldap.dn.dn2str(base), scope=scope, filter=filterstr
-                )
-                results = (yield deferToThread(
-                    self.timedSearch,
-                    ldap.dn.dn2str(base),
-                    scope,
-                    filterstr=filterstr,
-                    attrlist=self.attrlist,
-                    timeoutSeconds=self.requestTimeoutSeconds,
-                    resultLimit=self.requestResultsLimit)
-                )
-                self.log.debug(
-                    "LDAP search returned {count} results", count=len(results)
-                )
-                numMissingGuids = 0
-                numMissingRecordNames = 0
-                for dn, attrs in results:
-                    dn = normalizeDNstr(dn)
-                    # Skip if group restriction is in place and guid is not
-                    # a member
-                    if (
-                        recordType != self.recordType_groups and
-                        not self.isAllowedByRestrictToGroup(dn, attrs)
-                    ):
-                        continue
-
-                    try:
-                        record = self._ldapResultToRecord(dn, attrs, recordType)
-
-                        # For non-group records, if not enabled for calendaring do
-                        # not include in principal property search results
-                        if (recordType != self.recordType_groups):
-                            if not record.enabledForCalendaring:
-                                continue
-
-                        records.append(record)
-
-                    except MissingGuidException:
-                        numMissingGuids += 1
-
-                    except MissingRecordNameException:
-                        numMissingRecordNames += 1
-
-                if numMissingGuids:
-                    self.log.warn(
-                        "{count:d} {type} records are missing {attr}",
-                        count=numMissingGuids, type=recordType, attr=guidAttr
-                    )
-
-                if numMissingRecordNames:
-                    self.log.warn(
-                        "{count:d} {type} records are missing record name",
-                        count=numMissingRecordNames, type=recordType,
-                    )
-
-        self.log.debug(
-            "Principal property search matched {count} records",
-            count=len(records)
-        )
-        returnValue(records)
-
-
-    @inlineCallbacks
-    def getGroups(self, guids):
-        """
-        Returns a set of group records for the list of guids passed in.  For
-        any group that also contains subgroups, those subgroups' records are
-        also returned, and so on.
-        """
-
-        recordsByAlias = {}
-
-        groupsDN = self.typeDNs[self.recordType_groups]
-        memberIdAttr = self.groupSchema["memberIdAttr"]
-
-        # First time through the loop we search using the attribute
-        # corresponding to guid, since that is what the proxydb uses.
-        # Subsequent iterations fault in groups via the attribute
-        # used to identify members.
-        attributeToSearch = "guid"
-        valuesToFetch = guids
-
-        while valuesToFetch:
-            results = []
-
-            if attributeToSearch == "dn":
-                # Since DN can't be searched on in a filter we have to call
-                # recordsMatchingFields for *each* DN.
-                for value in valuesToFetch:
-                    fields = [["dn", value, False, "equals"]]
-                    result = (
-                        yield self.recordsMatchingFields(
-                            fields,
-                            recordType=self.recordType_groups
-                        )
-                    )
-                    results.extend(result)
-            else:
-                for batch in splitIntoBatches(valuesToFetch, self.batchSize):
-                    fields = []
-                    for value in batch:
-                        fields.append([attributeToSearch, value, False, "equals"])
-                    result = (
-                        yield self.recordsMatchingFields(
-                            fields,
-                            recordType=self.recordType_groups
-                        )
-                    )
-                    results.extend(result)
-
-            # Reset values for next iteration
-            valuesToFetch = set()
-
-            for record in results:
-                alias = record.cachedGroupsAlias()
-                if alias not in recordsByAlias:
-                    recordsByAlias[alias] = record
-
-                # record.memberGUIDs() contains the members of this group,
-                # but it might not be in guid form; it will be data from
-                # self.groupSchema["memberIdAttr"]
-                for memberAlias in record.memberGUIDs():
-                    if not memberIdAttr:
-                        # Members are identified by dn so we can take a short
-                        # cut:  we know we only need to examine groups, and
-                        # those will be children of the groups DN
-                        if not dnContainedIn(ldap.dn.str2dn(memberAlias),
-                                             groupsDN):
-                            continue
-                    if memberAlias not in recordsByAlias:
-                        valuesToFetch.add(memberAlias)
-
-            # Switch to the LDAP attribute used for identifying members
-            # for subsequent iterations.  If memberIdAttr is not specified
-            # in the config, we'll search using dn.
-            attributeToSearch = "memberIdAttr" if memberIdAttr else "dn"
-
-        returnValue(recordsByAlias.values())
-
-
-    def recordTypeForDN(self, dnStr):
-        """
-        Examine a DN to determine which recordType it belongs to
-        @param dn: DN to compare
-        @type dn: string
-        @return: recordType string, or None if no match
-        """
-        dn = ldap.dn.str2dn(dnStr.lower())
-        for recordType in self.recordTypes():
-            base = self.typeDNs[recordType]  # already lowercase
-            if dnContainedIn(dn, base):
-                return recordType
-        return None
-
-
-
-def dnContainedIn(child, parent):
-    """
-    Return True if child dn is contained within parent dn, otherwise False.
-    """
-    return child[-len(parent):] == parent
-
-
-
-def normalizeDNstr(dnStr):
-    """
-    Convert to lowercase and remove extra whitespace
-    @param dnStr: dn
-    @type dnStr: C{str}
-    @return: normalized dn C{str}
-    """
-    return ' '.join(ldap.dn.dn2str(ldap.dn.str2dn(dnStr.lower())).split())
-
-
-
-def _convertValue(value, matchType):
-    if matchType == "starts-with":
-        value = "%s*" % (ldapEsc(value),)
-    elif matchType == "contains":
-        value = "*%s*" % (ldapEsc(value),)
-    # otherwise it's an exact match
-    else:
-        value = ldapEsc(value)
-    return value
-
-
-
-def buildFilter(recordType, mapping, fields, operand="or", optimizeMultiName=False):
-    """
-    Create an LDAP filter string from a list of tuples representing directory
-    attributes to search
-
-    mapping is a dict mapping internal directory attribute names to ldap names.
-    fields is a list of tuples...
-        (directory field name, value to search, caseless (ignored), matchType)
-    ...where matchType is one of "starts-with", "contains", "exact"
-    """
-
-    converted = []
-    combined = {}
-    for field, value, caseless, matchType in fields:
-        ldapField = mapping.get(field, None)
-        if ldapField:
-            combined.setdefault(field, []).append((value, caseless, matchType))
-            value = _convertValue(value, matchType)
-            if isinstance(ldapField, str):
-                converted.append("(%s=%s)" % (ldapField, value))
-            else:
-                subConverted = []
-                for lf in ldapField:
-                    subConverted.append("(%s=%s)" % (lf, value))
-                converted.append("(|%s)" % "".join(subConverted))
-
-    if len(converted) == 0:
-        return None
-
-    if optimizeMultiName and recordType in ("users", "groups"):
-        for field in [key for key in combined.keys() if key != "guid"]:
-            if len(combined.get(field, [])) > 1:
-                # Client is searching on more than one name -- interpret this as the user
-                # explicitly looking up a user by name (ignoring other record types), and
-                # try the various firstName/lastName permutations:
-                if recordType == "users":
-                    converted = []
-                    for firstName, _ignore_firstCaseless, firstMatchType in combined["firstName"]:
-                        for lastName, _ignore_lastCaseless, lastMatchType in combined["lastName"]:
-                            if firstName != lastName:
-                                firstValue = _convertValue(firstName, firstMatchType)
-                                lastValue = _convertValue(lastName, lastMatchType)
-                                converted.append(
-                                    "(&(%s=%s)(%s=%s))" %
-                                    (mapping["firstName"], firstValue,
-                                     mapping["lastName"], lastValue)
-                                )
-                else:
-                    return None
-
-    if len(converted) == 1:
-        filterstr = converted[0]
-    else:
-        operand = ("|" if operand == "or" else "&")
-        filterstr = "(%s%s)" % (operand, "".join(converted))
-
-    if filterstr:
-        # To reduce the amount of records returned, filter out the ones
-        # that don't have (possibly) required attribute values (record
-        # name, guid)
-        additional = []
-        for key in ("recordName", "guid"):
-            if key in mapping:
-                additional.append("(%s=*)" % (mapping.get(key),))
-        if additional:
-            filterstr = "(&%s%s)" % ("".join(additional), filterstr)
-
-    return filterstr
-
-
-
-def buildFilterFromTokens(recordType, mapping, tokens, extra=None):
-    """
-    Create an LDAP filter string from a list of query tokens.  Each token is
-    searched for in each LDAP attribute corresponding to "fullName" and
-    "emailAddresses" (could be multiple LDAP fields for either).
-
-    @param recordType: The recordType to use to customize the filter
-    @param mapping: A dict mapping internal directory attribute names to ldap names.
-    @type mapping: C{dict}
-    @param tokens: The list of tokens to search for
-    @type tokens: C{list}
-    @param extra: Extra filter to "and" into the final filter
-    @type extra: C{str} or None
-    @return: An LDAP filterstr
-    @rtype: C{str}
-    """
-
-    filterStr = None
-
-    # Eliminate any substring duplicates
-    tokenSet = set()
-    for token in tokens:
-        collision = False
-        for existing in tokenSet:
-            if token in existing:
-                collision = True
-                break
-            elif existing in token:
-                tokenSet.remove(existing)
-                break
-        if not collision:
-            tokenSet.add(token)
-
-    tokens = [ldapEsc(t) for t in tokenSet]
-    if len(tokens) == 0:
-        return None
-    tokens.sort()
-
-    attributes = [
-        ("fullName", "(%s=*%s*)"),
-        ("emailAddresses", "(%s=%s*)"),
-    ]
-
-    ldapFields = []
-    for attribute, template in attributes:
-        ldapField = mapping.get(attribute, None)
-        if ldapField:
-            if isinstance(ldapField, str):
-                ldapFields.append((ldapField, template))
-            else:
-                for lf in ldapField:
-                    ldapFields.append((lf, template))
-
-    if len(ldapFields) == 0:
-        return None
-
-    tokenFragments = []
-    if extra:
-        tokenFragments.append(extra)
-
-    for token in tokens:
-        fragments = []
-        for ldapField, template in ldapFields:
-            fragments.append(template % (ldapField, token))
-        if len(fragments) == 1:
-            tokenFragment = fragments[0]
-        else:
-            tokenFragment = "(|%s)" % ("".join(fragments),)
-        tokenFragments.append(tokenFragment)
-
-    if len(tokenFragments) == 1:
-        filterStr = tokenFragments[0]
-    else:
-        filterStr = "(&%s)" % ("".join(tokenFragments),)
-
-    return filterStr
-
-
-
-class LdapDirectoryRecord(CachingDirectoryRecord):
-    """
-    LDAP implementation of L{IDirectoryRecord}.
-    """
-    def __init__(
-        self, service, recordType,
-        guid, shortNames, authIDs, fullName,
-        firstName, lastName, emailAddresses,
-        uid, dn, memberGUIDs, extProxies, extReadOnlyProxies,
-        attrs, **kwargs
-    ):
-        super(LdapDirectoryRecord, self).__init__(
-            service=service,
-            recordType=recordType,
-            guid=guid,
-            shortNames=shortNames,
-            authIDs=authIDs,
-            fullName=fullName,
-            firstName=firstName,
-            lastName=lastName,
-            emailAddresses=emailAddresses,
-            extProxies=extProxies,
-            extReadOnlyProxies=extReadOnlyProxies,
-            uid=uid,
-            **kwargs
-        )
-
-        # Save attributes of dn and attrs in case you might need them later
-        self.dn = dn
-        self.attrs = attrs
-
-        # Store copy of member guids
-        self._memberGUIDs = memberGUIDs
-
-        # Identifier of this record as a group member
-        memberIdAttr = self.service.groupSchema["memberIdAttr"]
-        if memberIdAttr:
-            self._memberId = self.service._getUniqueLdapAttribute(
-                attrs,
-                memberIdAttr
-            )
-        else:
-            self._memberId = normalizeDNstr(self.dn)
-
-
-    def members(self):
-        """ Return the records representing members of this group """
-
-        try:
-            return self._members_storage
-        except AttributeError:
-            self._members_storage = self._members()
-            return self._members_storage
-
-
-    def _members(self):
-        """ Fault in records for the members of this group """
-
-        memberIdAttr = self.service.groupSchema["memberIdAttr"]
-        results = []
-
-        for memberId in self._memberGUIDs:
-
-            if memberIdAttr:
-
-                base = self.service.base
-                filterstr = "(%s=%s)" % (memberIdAttr, ldapEsc(memberId))
-                self.log.debug(
-                    "Retrieving subtree of {base} with filter {filter}",
-                    base=ldap.dn.dn2str(base), filter=filterstr,
-                    system="LdapDirectoryService"
-                )
-                result = self.service.timedSearch(
-                    ldap.dn.dn2str(base),
-                    ldap.SCOPE_SUBTREE,
-                    filterstr=filterstr,
-                    attrlist=self.service.attrlist
-                )
-
-            else:  # using DN
-
-                self.log.debug(
-                    "Retrieving {id}.",
-                    id=memberId, system="LdapDirectoryService"
-                )
-                result = self.service.timedSearch(
-                    memberId,
-                    ldap.SCOPE_BASE, attrlist=self.service.attrlist
-                )
-
-            if result:
-
-                dn, attrs = result.pop()
-                dn = normalizeDNstr(dn)
-                self.log.debug("Retrieved: {dn} {attrs}", dn=dn, attrs=attrs)
-                recordType = self.service.recordTypeForDN(dn)
-                if recordType is None:
-                    self.log.error(
-                        "Unable to map {dn} to a record type", dn=dn
-                    )
-                    continue
-
-                shortName = self.service._getUniqueLdapAttribute(
-                    attrs,
-                    self.service.rdnSchema[recordType]["mapping"]["recordName"]
-                )
-
-                if shortName:
-                    record = self.service.recordWithShortName(
-                        recordType,
-                        shortName
-                    )
-                    if record:
-                        results.append(record)
-
-        return results
-
-
-    def groups(self):
-        """ Return the records representing groups this record is a member of """
-        try:
-            return self._groups_storage
-        except AttributeError:
-            self._groups_storage = self._groups()
-            return self._groups_storage
-
-
-    def _groups(self):
-        """ Fault in the groups of which this record is a member """
-
-        recordType = self.service.recordType_groups
-        base = self.service.typeDNs[recordType]
-
-        membersAttrs = []
-        if self.service.groupSchema["membersAttr"]:
-            membersAttrs.append(self.service.groupSchema["membersAttr"])
-        if self.service.groupSchema["nestedGroupsAttr"]:
-            membersAttrs.append(self.service.groupSchema["nestedGroupsAttr"])
-
-        if len(membersAttrs) == 1:
-            filterstr = "(%s=%s)" % (membersAttrs[0], self._memberId)
-        else:
-            filterstr = "(|%s)" % (
-                "".join(
-                    ["(%s=%s)" % (a, self._memberId) for a in membersAttrs]
-                ),
-            )
-        self.log.debug("Finding groups containing {id}", id=self._memberId)
-        groups = []
-
-        try:
-            results = self.service.timedSearch(
-                ldap.dn.dn2str(base),
-                ldap.SCOPE_SUBTREE,
-                filterstr=filterstr,
-                attrlist=self.service.attrlist
-            )
-
-            for dn, attrs in results:
-                dn = normalizeDNstr(dn)
-                shortName = self.service._getUniqueLdapAttribute(attrs, "cn")
-                self.log.debug(
-                    "{id} is a member of {shortName}",
-                    id=self._memberId, shortName=shortName
-                )
-                record = self.service.recordWithShortName(recordType, shortName)
-                if record is not None:
-                    groups.append(record)
-        except ldap.PROTOCOL_ERROR, e:
-            self.log.warn("{e}", e=e)
-
-        return groups
-
-
-    def cachedGroupsAlias(self):
-        """
-        See directory.py for full description
-
-        LDAP group members can be referred to by attributes other than guid.  _memberId
-        will be set to the appropriate value to look up group-membership with.
-        """
-        return self._memberId
-
-
-    def memberGUIDs(self):
-        return set(self._memberGUIDs)
-
-
-    def verifyCredentials(self, credentials):
-        """ Supports PAM or simple LDAP bind for username+password """
-
-        if isinstance(credentials, UsernamePassword):
-
-            # TODO: investigate:
-            # Check that the username supplied matches one of the shortNames
-            # (The DCS might already enforce this constraint, not sure)
-            if credentials.username not in self.shortNames:
-                return False
-
-            # Check cached password
-            try:
-                if credentials.password == self.password:
-                    return True
-            except AttributeError:
-                pass
-
-            if self.service.authMethod.upper() == "PAM":
-                # Authenticate against PAM (UNTESTED)
-
-                if not pamAvailable:
-                    self.log.error("PAM module is not installed")
-                    raise DirectoryConfigurationError()
-
-                def pam_conv(auth, query_list, userData):
-                    return [(credentials.password, 0)]
-
-                auth = PAM.pam()
-                auth.start("caldav")
-                auth.set_item(PAM.PAM_USER, credentials.username)
-                auth.set_item(PAM.PAM_CONV, pam_conv)
-                try:
-                    auth.authenticate()
-                except PAM.error:
-                    return False
-                else:
-                    # Cache the password to avoid further LDAP queries
-                    self.password = credentials.password
-                    return True
-
-            elif self.service.authMethod.upper() == "LDAP":
-
-                # Authenticate against LDAP
-                try:
-                    self.service.authenticate(self.dn, credentials.password)
-                    # Cache the password to avoid further LDAP queries
-                    self.password = credentials.password
-                    return True
-
-                except ldap.INVALID_CREDENTIALS:
-                    self.log.info(
-                        "Invalid credentials for {dn}",
-                        dn=repr(self.dn), system="LdapDirectoryService"
-                    )
-                    return False
-
-            else:
-                self.log.error(
-                    "Unknown Authentication Method {method!r}",
-                    method=self.service.authMethod.upper()
-                )
-                raise DirectoryConfigurationError()
-
-        return super(LdapDirectoryRecord, self).verifyCredentials(credentials)
-
-
-
-class MissingRecordNameException(Exception):
-    """ Raised when LDAP record is missing recordName """
-    pass
-
-
-
-class MissingGuidException(Exception):
-    """ Raised when LDAP record is missing guidAttr and it's required """
-    pass

Deleted: CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/opendirectorybacker.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/opendirectorybacker.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/opendirectorybacker.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -1,1960 +0,0 @@
-##
-# Copyright (c) 2006-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-
-"""
-Apple Open Directory directory service implementation for backing up directory-backed address books
-"""
-
-__all__ = [
-    "OpenDirectoryBackingService", "VCardRecord",
-]
-
-from calendarserver.platform.darwin.od import opendirectory, dsattributes, dsquery
-
-from pycalendar.datetime import DateTime
-from pycalendar.vcard.adr import Adr
-from pycalendar.vcard.n import N
-
-
-from twext.python.filepath import CachingFilePath as FilePath
-
-from txweb2.dav.resource import DAVPropertyMixIn
-from txweb2.dav.util import joinURL
-from txweb2.http_headers import MimeType, generateContentType, ETag
-
-from twisted.internet import reactor
-from twisted.internet.defer import inlineCallbacks, returnValue, deferredGenerator, succeed
-
-from twistedcaldav import customxml, carddavxml
-from twistedcaldav.config import config
-from twistedcaldav.customxml import calendarserver_namespace
-from twistedcaldav.directory.directory import DirectoryService, DirectoryRecord
-from twistedcaldav.memcachelock import MemcacheLock, MemcacheLockTimeoutError
-from twistedcaldav.vcard import Component, Property, vCardProductID
-
-from txdav.carddav.datastore.query.filter import IsNotDefined, ParameterFilter, \
-    TextMatch
-from txdav.xml import element as davxml
-from txdav.xml.base import twisted_dav_namespace, dav_namespace, parse_date, twisted_private_namespace
-
-from os import listdir
-from os.path import join, abspath
-from random import random
-from socket import getfqdn
-from tempfile import mkstemp, gettempdir
-from xmlrpclib import datetime
-import hashlib
-import os
-import sys
-import time
-import traceback
-
-class OpenDirectoryBackingService(DirectoryService):
-    """
-    Open Directory implementation of L{IDirectoryService}.
-    """
-
-    baseGUID = "BF07A1A2-5BB5-4A4D-A59A-67260EA7E143"
-
-    def __repr__(self):
-        return "<%s %r>" % (self.__class__.__name__, self.realmName,)
-
-
-    def __init__(self, params):
-        self._actuallyConfigure(**params)
-
-
-    def _actuallyConfigure(
-        self, queryPeopleRecords=True,
-        peopleNode="/Search/Contacts",
-        queryUserRecords=True,
-        userNode="/Search",
-        maxDSQueryRecords=0,            # maximum number of records requested for any ds query
-
-        queryDSLocal=False,             # query in DSLocal -- debug
-        dsLocalCacheTimeout=30,
-        ignoreSystemRecords=True,
-
-        liveQuery=True,                 # query directory service as needed
-        fakeETag=True,                  # eTag is not reliable if True
-
-        cacheQuery=False,
-        cacheTimeout=30,                # cache timeout
-
-        addDSAttrXProperties=False,        # add dsattributes to vcards as "X-" attributes
-        standardizeSyntheticUIDs=False,  # use simple synthetic UIDs --- good for testing
-        appleInternalServer=False,
-
-        additionalAttributes=[],
-        allowedAttributes=[],
-        directoryBackedAddressBook=None
-    ):
-        """
-        @queryPeopleRecords: C{True} to query for People records
-        @queryUserRecords: C{True} to query for User records
-        @maxDSQueryRecords: maximum number of (unfiltered) ds records retrieved before raising
-            NumberOfMatchesWithinLimits exception or returning results
-        @dsLocalCacheTimeout: how log to keep cache of DSLocal records
-        @liveQuery: C{True} to query the directory as needed
-        @fakeETag: C{True} to use a fake eTag; allows ds queries with partial attributes
-        @cacheQuery: C{True} to query the directory and cache results
-        @cacheTimeout: if caching, the average cache timeout
-        @standardizeSyntheticUIDs: C{True} when creating synthetic UID (==f(Node, Type, Record Name)),
-            use a standard Node name. This allows testing with the same UID on different hosts
-        @allowedAttributes: list of DSAttributes that are used to create VCards
-
-        """
-        assert directoryBackedAddressBook is not None
-        self.directoryBackedAddressBook = directoryBackedAddressBook
-
-        self.peopleDirectory = None
-        self.peopleNode = None
-        self.userDirectory = None
-        self.userNode = None
-
-        self.realmName = None # needed for super
-
-        if queryPeopleRecords or not queryUserRecords:
-            self.peopleNode = peopleNode
-            try:
-                self.peopleDirectory = opendirectory.odInit(peopleNode)
-            except opendirectory.ODError, e:
-                self.log.error("Open Directory (node=%s) Initialization error: %s" % (peopleNode, e))
-                raise
-            self.realmName = peopleNode
-
-        if queryUserRecords:
-            if self.peopleNode == userNode:          # use sane directory and node if they are equal
-                self.userNode = self.peopleNode
-                self.userDirectory = self.peopleDirectory
-            else:
-                self.userNode = userNode
-                try:
-                    self.userDirectory = opendirectory.odInit(userNode)
-                except opendirectory.ODError, e:
-                    self.log.error("Open Directory (node=%s) Initialization error: %s" % (userNode, e))
-                    raise
-                if self.realmName:
-                    self.realmName += "+" + userNode
-                else:
-                    self.realmName = userNode
-
-        self.maxDSQueryRecords = maxDSQueryRecords
-
-        self.ignoreSystemRecords = ignoreSystemRecords
-        self.queryDSLocal = queryDSLocal
-        self.dsLocalCacheTimeout = dsLocalCacheTimeout
-
-        self.liveQuery = liveQuery or not cacheQuery
-        self.fakeETag = fakeETag
-
-        self.cacheQuery = cacheQuery
-
-        self.cacheTimeout = cacheTimeout if cacheTimeout > 0 else 30
-
-        self.addDSAttrXProperties = addDSAttrXProperties
-        self.standardizeSyntheticUIDs = standardizeSyntheticUIDs
-        self.appleInternalServer = appleInternalServer
-
-        self.additionalAttributes = additionalAttributes
-        # filter allows attributes, but make sure there are a minimum of attributes for functionality
-        if allowedAttributes:
-            self.allowedDSQueryAttributes = sorted(list(set(
-                                                [attr for attr in VCardRecord.allDSQueryAttributes
-                                                    if (isinstance(attr, str) and attr in allowedAttributes) or
-                                                       (isinstance(attr, tuple) and attr[0] in allowedAttributes)] +
-                                                VCardRecord.dsqueryAttributesForProperty.get("X-INTERNAL-REQUIRED")
-                                                )))
-            if (self.allowedDSQueryAttributes != VCardRecord.allDSQueryAttributes):
-                self.log.info("Allowed DS query attributes = %r" % (self.allowedDSQueryAttributes,))
-        else:
-            self.allowedDSQueryAttributes = VCardRecord.allDSQueryAttributes
-
-        #self.returnedAttributes = VCardRecord.allDSQueryAttributes
-        self.returnedAttributes = self.allowedDSQueryAttributes
-
-        self._dsLocalRecords = []
-        self._nextDSLocalQueryTime = 0
-
-        # get this now once
-        hostname = getfqdn()
-        if hostname:
-            self.defaultNodeName = "/LDAPv3/" + hostname
-        else:
-            self.defaultNodeName = None
-
-        #cleanup
-        self._cleanupTime = time.time()
-
-        # file system locks
-        self._initLockPath = join(config.DocumentRoot, ".directory_address_book_create_lock")
-        self._createdLockPath = join(config.DocumentRoot, ".directory_address_book_created_lock")
-        self._updateLockPath = join(config.DocumentRoot, ".directory_address_book_update_lock")
-        self._tmpDirAddressBookLockPath = join(config.DocumentRoot, ".directory_address_book_tmpFolder_lock")
-
-        self._updateLock = MemcacheLock("OpenDirectoryBacker", self._updateLockPath)
-        self._tmpDirAddressBookLock = MemcacheLock("OpenDirectoryBacker", self._tmpDirAddressBookLockPath)
-
-        # optimization so we don't have to always get create lock
-        self._triedCreateLock = False
-        self._created = False
-
-
-    def __cmp__(self, other):
-        if not isinstance(other, DirectoryRecord):
-            return super(DirectoryRecord, self).__eq__(other)
-
-        for attr in ("directory", "node"):
-            diff = cmp(getattr(self, attr), getattr(other, attr))
-            if diff != 0:
-                return diff
-        return 0
-
-
-    def __hash__(self):
-        h = hash(self.__class__.__name__)
-        for attr in ("node",):
-            h = (h + hash(getattr(self, attr))) & sys.maxint
-        return h
-
-
-    @inlineCallbacks
-    def available(self):
-        if not self._triedCreateLock:
-            returnValue(False)
-        elif not self._created:
-            createdLock = MemcacheLock("OpenDirectoryBacker", self._createdLockPath)
-            self.log.debug("blocking on lock of: \"%s\")" % self._createdLockPath)
-            self._created = (yield createdLock.locked())
-
-        returnValue(self._created)
-
-
-    def updateLock(self):
-        return self._updateLock
-
-
-    @inlineCallbacks
-    def createCache(self):
-        """
-        If caching, create the cache for the first time.
-        """
-
-        if not self.liveQuery:
-            self.log.info("loading directory address book")
-
-            # get init lock
-            initLock = MemcacheLock("OpenDirectoryBacker", self._initLockPath, timeout=0)
-            self.log.debug("Attempt lock of: \"%s\")" % self._initLockPath)
-            gotCreateLock = False
-            try:
-                yield initLock.acquire()
-                gotCreateLock = True
-            except MemcacheLockTimeoutError:
-                pass
-
-            self._triedCreateLock = True
-
-            if gotCreateLock:
-                self.log.debug("Got lock!")
-                yield self._refreshCache(flushCache=False, creating=True)
-            else:
-                self.log.debug("Could not get lock - directory address book will be filled by peer")
-
-
-    @inlineCallbacks
-    def _refreshCache(self, flushCache=False, creating=False, reschedule=True, query=None, attributes=None, keepLock=False, clear=False, maxRecords=0):
-        """
-        refresh the cache.
-        """
-
-        #print("_refreshCache:, flushCache=%s, creating=%s, reschedule=%s, query = %s" % (flushCache, creating, reschedule, "None" if query is None else query.generate(),))
-
-        def refreshLater():
-            #
-            # Add jitter/fuzz factor to avoid stampede for large OD query
-            #
-            cacheTimeout = min(self.cacheTimeout, 60) * 60
-            cacheTimeout = (cacheTimeout * random()) - (cacheTimeout / 2)
-            cacheTimeout += self.cacheTimeout * 60
-            reactor.callLater(cacheTimeout, self._refreshCache) #@UndefinedVariable
-            self.log.info("Refresh directory address book in %d minutes %d seconds" % divmod(cacheTimeout, 60))
-
-        def cleanupLater():
-
-            # try to cancel previous call if last clean up was less than 15 minutes ago
-            if (time.time() - self._cleanupTime) < 15 * 60:
-                try:
-                    self._lastCleanupCall.cancel()
-                except:
-                    pass
-
-            #
-            # Add jitter/fuzz factor
-            #
-            nom = 120
-            later = nom * (random() + .5)
-            self._lastCleanupCall = reactor.callLater(later, removeTmpAddressBooks) #@UndefinedVariable
-            self.log.info("Remove temporary directory address books in %d minutes %d seconds" % divmod(later, 60))
-
-
-        def getTmpDirAndTmpFilePrefixSuffix():
-            # need to have temp file on same volumes as documents so that move works
-            absDocPath = abspath(config.DocumentRoot)
-            if absDocPath.startswith("/Volumes/"):
-                tmpDir = absDocPath
-                prefix = ".directoryAddressBook-"
-            else:
-                tmpDir = gettempdir()
-                prefix = "directoryAddressBook-"
-
-            return (tmpDir, prefix, ".tmp")
-
-        def makeTmpFilename():
-            tmpDir, prefix, suffix = getTmpDirAndTmpFilePrefixSuffix()
-            fd, fname = mkstemp(suffix=suffix, prefix=prefix, dir=tmpDir)
-            os.close(fd)
-            os.remove(fname)
-            return fname
-
-        @inlineCallbacks
-        def removeTmpAddressBooks():
-            self.log.info("Checking for temporary directory address books")
-            tmpDir, prefix, suffix = getTmpDirAndTmpFilePrefixSuffix()
-
-            tmpDirLock = self._tmpDirAddressBookLock
-            self.log.debug("blocking on lock of: \"%s\")" % self._tmpDirAddressBookLockPath)
-            yield tmpDirLock.acquire()
-
-            try:
-                for name in listdir(tmpDir):
-                    if name.startswith(prefix) and name.endswith(suffix):
-                        try:
-                            path = join(tmpDir, name)
-                            self.log.info("Deleting temporary directory address book at: %s" % path)
-                            FilePath(path).remove()
-                            self.log.debug("Done deleting")
-                        except:
-                            self.log.info("Deletion failed")
-            finally:
-                self.log.debug("unlocking: \"%s\")" % self._tmpDirAddressBookLockPath)
-                yield tmpDirLock.release()
-
-            self._cleanupTime = time.time()
-
-        updateLock = None
-        limited = False
-        try:
-
-            try:
-                # get the records
-                if clear:
-                    records = {}
-                else:
-                    records, limited = (yield self._getDirectoryRecords(query, attributes, maxRecords))
-
-                # calculate the hash
-                # simple for now, could use MD5 digest if too many collisions
-                newAddressBookCTag = customxml.GETCTag(str(hash(self.baseGUID + ":" + self.realmName + ":" + "".join(str(hash(records[key])) for key in records.keys()))))
-
-                # get the old hash
-                oldAddressBookCTag = ""
-                updateLock = self.updateLock()
-                self.log.debug("blocking on lock of: \"%s\")" % self._updateLockPath)
-                yield updateLock.acquire()
-
-                if not flushCache:
-                    # get update lock
-                    try:
-                        oldAddressBookCTag = self.directoryBackedAddressBook.readDeadProperty((calendarserver_namespace, "getctag"))
-                    except:
-                        oldAddressBookCTag = ""
-
-                self.log.debug("Comparing {http://calendarserver.org/ns/}getctag: new = %s, old = %s" % (newAddressBookCTag, oldAddressBookCTag))
-                if str(newAddressBookCTag) != str(oldAddressBookCTag):
-
-                    self.log.debug("unlocking: \"%s\")" % self._updateLockPath)
-                    yield updateLock.release()
-                    updateLock = None
-
-                if not keepLock:
-                    self.log.debug("unlocking: \"%s\")" % self._updateLockPath)
-                    yield updateLock.release()
-                    updateLock = None
-
-            except:
-                cleanupLater()
-                if reschedule:
-                    refreshLater()
-                raise
-
-            if creating:
-                createdLock = MemcacheLock("OpenDirectoryBacker", self._createdLockPath)
-                self.log.debug("blocking on lock of: \"%s\")" % self._createdLockPath)
-                yield createdLock.acquire()
-
-            cleanupLater()
-            if reschedule:
-                refreshLater()
-
-        except:
-            if updateLock:
-                yield updateLock.release()
-            raise
-
-        returnValue((updateLock, limited))
-
-
-    def _getDSLocalRecords(self):
-
-        def generateDSLocalRecords():
-
-            records = {}
-
-            recordTypes = [dsattributes.kDSStdRecordTypePeople, dsattributes.kDSStdRecordTypeUsers, ]
-            try:
-                localNodeDirectory = opendirectory.odInit("/Local/Default")
-                self.log.debug("opendirectory.listAllRecordsWithAttributes_list(%r,%r,%r)" % (
-                        "/DSLocal",
-                        recordTypes,
-                        self.returnedAttributes,
-                    ))
-                results = list(opendirectory.listAllRecordsWithAttributes_list(
-                        localNodeDirectory,
-                        recordTypes,
-                        self.returnedAttributes,
-                    ))
-            except opendirectory.ODError, ex:
-                self.log.error("Open Directory (node=%s) error: %s" % ("/Local/Default", str(ex)))
-                raise
-
-            self._dsLocalRecords = []
-            for (recordShortName, value) in results: #@UnusedVariable
-
-                record = VCardRecord(self, value, "/Local/Default")
-
-                if self.ignoreSystemRecords:
-                    # remove system users and people
-                    if record.guid.startswith("FFFFEEEE-DDDD-CCCC-BBBB-AAAA"):
-                        self.log.info("Ignoring vcard for system record %s" % (record,))
-                        continue
-
-                if record.guid in records:
-                    self.log.info("Record skipped due to conflict (duplicate uuid): %s" % (record,))
-                else:
-                    try:
-                        vCardText = record.vCardText()
-                    except:
-                        traceback.print_exc()
-                        self.log.info("Could not get vcard for record %s" % (record,))
-                    else:
-                        self.log.debug("VCard text =\n%s" % (vCardText,))
-                        records[record.guid] = record
-
-            return records
-
-        if not self.liveQuery or not self.queryDSLocal:
-            return {}
-
-        if time.time() > self._nextDSLocalQueryTime:
-            self._dsLocalRecords = generateDSLocalRecords()
-            # Add jitter/fuzz factor
-            self._nextDSLocalQueryTime = time.time() + self.dsLocalCacheTimeout * (random() + 0.5) * 60
-
-        return self._dsLocalRecords
-
-
-    @inlineCallbacks
-    def _getDirectoryRecords(self, query=None, attributes=None, maxRecords=0):
-        """
-        Get a list of filtered VCardRecord for the given query with the given attributes.
-        query == None gets all records. attribute == None gets VCardRecord.allDSQueryAttributes
-        """
-        limited = False
-        queryResults = (yield self._queryDirectory(query, attributes, maxRecords))
-        if maxRecords and len(queryResults) >= maxRecords:
-            limited = True
-            self.log.debug("Directory address book record limit (= %d) reached." % (maxRecords,))
-
-        self.log.debug("Query done. Inspecting %s results" % len(queryResults))
-
-        records = self._getDSLocalRecords().copy()
-        self.log.debug("Adding %s DSLocal results" % len(records.keys()))
-
-        for (recordShortName, value) in queryResults: #@UnusedVariable
-
-            record = VCardRecord(self, value, self.defaultNodeName)
-
-            if self.ignoreSystemRecords:
-                # remove system users and people
-                if record.guid.startswith("FFFFEEEE-DDDD-CCCC-BBBB-AAAA"):
-                    self.log.info("Ignoring vcard for system record %s" % (record,))
-                    continue
-
-            if record.guid in records:
-                self.log.info("Ignoring vcard for record due to conflict (duplicate uuid): %s" % (record,))
-            else:
-                records[record.guid] = record
-
-        self.log.debug("After filtering, %s records (limited=%s)." % (len(records), limited))
-        returnValue((records, limited,))
-
-
-    def _queryDirectory(self, query=None, attributes=None, maxRecords=0):
-
-        startTime = time.time()
-
-        if not attributes:
-            attributes = self.returnedAttributes
-
-        attributes = list(set(attributes + self.additionalAttributes)) # remove duplicates
-
-        directoryAndRecordTypes = []
-        if self.peopleDirectory == self.userDirectory:
-            # use single ds query if possible for best performance
-            directoryAndRecordTypes.append((self.peopleDirectory, self.peopleNode, (dsattributes.kDSStdRecordTypePeople, dsattributes.kDSStdRecordTypeUsers)))
-        else:
-            if self.peopleDirectory:
-                directoryAndRecordTypes.append((self.peopleDirectory, self.peopleNode, dsattributes.kDSStdRecordTypePeople))
-            if self.userDirectory:
-                directoryAndRecordTypes.append((self.userDirectory, self.userNode, dsattributes.kDSStdRecordTypeUsers))
-
-        allResults = []
-        for directory, node, recordType in directoryAndRecordTypes:
-            try:
-                if query:
-                    if isinstance(query, dsquery.match) and query.value is not "":
-                        self.log.debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r,%r)" % (
-                            node,
-                            query.attribute,
-                            query.value,
-                            query.matchType,
-                            False,
-                            recordType,
-                            attributes,
-                            maxRecords,
-                        ))
-                        results = list(
-                            opendirectory.queryRecordsWithAttribute_list(
-                                directory,
-                                query.attribute,
-                                query.value,
-                                query.matchType,
-                                False,
-                                recordType,
-                                attributes,
-                                maxRecords,
-                            ))
-                    else:
-                        self.log.debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r)" % (
-                            node,
-                            query.generate(),
-                            False,
-                            recordType,
-                            attributes,
-                            maxRecords,
-                        ))
-                        results = list(
-                            opendirectory.queryRecordsWithAttributes_list(
-                                directory,
-                                query.generate(),
-                                False,
-                                recordType,
-                                attributes,
-                                maxRecords,
-                            ))
-                else:
-                    self.log.debug("opendirectory.listAllRecordsWithAttributes_list(%r,%r,%r,%r)" % (
-                        node,
-                        recordType,
-                        attributes,
-                        maxRecords,
-                    ))
-                    results = list(
-                        opendirectory.listAllRecordsWithAttributes_list(
-                            directory,
-                            recordType,
-                            attributes,
-                            maxRecords,
-                        ))
-            except opendirectory.ODError, ex:
-                self.log.error("Open Directory (node=%s) error: %s" % (self.realmName, str(ex)))
-                raise
-
-            allResults.extend(results)
-
-            if maxRecords:
-                maxRecords -= len(results)
-                if maxRecords <= 0:
-                    break
-
-        elaspedTime = time.time() - startTime
-        self.log.info("Timing: Directory query: %.1f ms (%d records, %.2f records/sec)" % (elaspedTime * 1000, len(allResults), len(allResults) / elaspedTime))
-        return succeed(allResults)
-
-
-    def _getDSFilter(self, addressBookFilter):
-        """
-        Convert the supplied addressbook-query into an expression tree.
-
-        @param filter: the L{Filter} for the addressbook-query to convert.
-        @return: (needsAllRecords, espressionAttributes, expression) tuple
-        """
-        def propFilterListQuery(filterAllOf, propFilters):
-
-            def propFilterExpression(filterAllOf, propFilter):
-                #print("propFilterExpression")
-                """
-                Create an expression for a single prop-filter element.
-
-                @param propFilter: the L{PropertyFilter} element.
-                @return: (needsAllRecords, espressionAttributes, expressions) tuple
-                """
-
-                def definedExpression(defined, allOf, filterName, constant, queryAttributes, allAttrStrings):
-                    if constant or filterName in ("N" , "FN", "UID",):
-                        return (defined, [], [])     # all records have this property so no records do not have it
-                    else:
-                        matchList = list(set([dsquery.match(attrName, "", dsattributes.eDSStartsWith) for attrName in allAttrStrings]))
-                        if defined:
-                            return andOrExpression(allOf, queryAttributes, matchList)
-                        else:
-                            if len(matchList) > 1:
-                                expr = dsquery.expression(dsquery.expression.OR, matchList)
-                            else:
-                                expr = matchList
-                            return (False, queryAttributes, [dsquery.expression(dsquery.expression.NOT, expr), ])
-                    #end isNotDefinedExpression()
-
-
-                def andOrExpression(propFilterAllOf, queryAttributes, matchList):
-                    #print("andOrExpression(propFilterAllOf=%r, queryAttributes%r, matchList%r)" % (propFilterAllOf, queryAttributes, matchList))
-                    if propFilterAllOf and len(matchList):
-                        # add OR expression because parent will AND
-                        return (False, queryAttributes, [dsquery.expression(dsquery.expression.OR, matchList), ])
-                    else:
-                        return (False, queryAttributes, matchList)
-                    #end andOrExpression()
-
-
-                # short circuit parameter filters
-                def supportedParamter(filterName, paramFilters, propFilterAllOf):
-
-                    def supported(paramFilterName, paramFilterDefined, params):
-                        paramFilterName = paramFilterName.upper()
-                        if len(params.keys()) and ((paramFilterName in params.keys()) != paramFilterDefined):
-                            return False
-                        if len(params[paramFilterName]) and str(paramFilter.qualifier).upper() not in params[paramFilterName]:
-                            return False
-                        return True
-                        #end supported()
-
-                    oneSupported = False
-                    for paramFilter in paramFilters:
-                        if filterName == "PHOTO":
-                            if propFilterAllOf != supported(paramFilter.filter_name, paramFilter.defined, {"ENCODING": ["B", ], "TYPE": ["JPEG", ], }):
-                                return not propFilterAllOf
-                            oneSupported |= propFilterAllOf
-                        elif filterName == "ADR":
-                            if propFilterAllOf != supported(paramFilter.filter_name, paramFilter.defined, {"TYPE": ["WORK", "PREF", "POSTAL", "PARCEL", ], }):
-                                return not propFilterAllOf
-                            oneSupported |= propFilterAllOf
-                        elif filterName == "LABEL":
-                            if propFilterAllOf != supported(paramFilter.filter_name, paramFilter.defined, {"TYPE": ["POSTAL", "PARCEL", ]}):
-                                return not propFilterAllOf
-                            oneSupported |= propFilterAllOf
-                        elif filterName == "TEL":
-                            if propFilterAllOf != supported(paramFilter.filter_name, paramFilter.defined, {"TYPE": [], }): # has params derived from ds attributes
-                                return not propFilterAllOf
-                            oneSupported |= propFilterAllOf
-                        elif filterName == "EMAIL":
-                            if propFilterAllOf != supported(paramFilter.filter_name, paramFilter.defined, {"TYPE": [], }): # has params derived from ds attributes
-                                return not propFilterAllOf
-                            oneSupported |= propFilterAllOf
-                        elif filterName == "URL":
-                            if propFilterAllOf != supported(paramFilter.filter_name, paramFilter.defined, {}):
-                                return not propFilterAllOf
-                            oneSupported |= propFilterAllOf
-                        elif filterName == "KEY":
-                            if propFilterAllOf != supported(paramFilter.filter_name, paramFilter.defined, {"ENCODING": ["B", ], "TYPE": ["PGPPUBILICKEY", "USERCERTIFICATE", "USERPKCS12DATA", "USERSMIMECERTIFICATE", ]}):
-                                return not propFilterAllOf
-                            oneSupported |= propFilterAllOf
-                        elif not filterName.startswith("X-"): #X- IMHandles X-ABRELATEDNAMES excepted, no other params are used
-                            if propFilterAllOf == paramFilter.defined:
-                                return not propFilterAllOf
-                            oneSupported |= propFilterAllOf
-
-                    if propFilterAllOf:
-                        return True
-                    else:
-                        return oneSupported
-                    #end supportedParamter()
-
-
-                def textMatchElementExpression(propFilterAllOf, textMatchElement):
-
-                    # pre process text match strings for ds query
-                    def getMatchStrings(propFilter, matchString):
-
-                        if propFilter.filter_name in ("REV" , "BDAY",):
-                            rawString = matchString
-                            matchString = ""
-                            for c in rawString:
-                                if not c in "TZ-:":
-                                    matchString += c
-                        elif propFilter.filter_name == "GEO":
-                            matchString = ",".join(matchString.split(";"))
-
-                        if propFilter.filter_name in ("N" , "ADR", "ORG",):
-                            # for structured properties, change into multiple strings for ds query
-                            if propFilter.filter_name == "ADR":
-                                #split by newline and comma
-                                rawStrings = ",".join(matchString.split("\n")).split(",")
-                            else:
-                                #split by space
-                                rawStrings = matchString.split(" ")
-
-                            # remove empty strings
-                            matchStrings = []
-                            for oneString in rawStrings:
-                                if len(oneString):
-                                    matchStrings += [oneString, ]
-                            return matchStrings
-
-                        elif len(matchString):
-                            return [matchString, ]
-                        else:
-                            return []
-                        # end getMatchStrings
-
-                    if constant:
-                        # do the match right now!  Return either all or none.
-                        return(textMatchElement.test([constant, ]), [], [])
-                    else:
-
-                        matchStrings = getMatchStrings(propFilter, textMatchElement.text)
-
-                        if not len(matchStrings) or binaryAttrStrs:
-                            # no searching text in binary ds attributes, so change to defined/not defined case
-                            if textMatchElement.negate:
-                                return definedExpression(False, propFilterAllOf, propFilter.filter_name, constant, queryAttributes, allAttrStrings)
-                            # else fall through to attribute exists case below
-                        else:
-
-                            # special case UID's formed from node and record name
-                            if propFilter.filter_name == "UID":
-                                matchString = matchStrings[0]
-                                seperatorIndex = matchString.find(VCardRecord.peopleUIDSeparator)
-                                if seperatorIndex > 1:
-                                    recordNameStart = seperatorIndex + len(VCardRecord.peopleUIDSeparator)
-                                else:
-                                    seperatorIndex = matchString.find(VCardRecord.userUIDSeparator)
-                                    if seperatorIndex > 1:
-                                        recordNameStart = seperatorIndex + len(VCardRecord.userUIDSeparator)
-                                    else:
-                                        recordNameStart = sys.maxint
-
-                                if recordNameStart < len(matchString) - 1:
-                                    try:
-                                        recordNameQualifier = matchString[recordNameStart:].decode("base64").decode("utf8")
-                                    except Exception, e:
-                                        self.log.debug("Could not decode UID string %r in %r: %r" % (matchString[recordNameStart:], matchString, e,))
-                                    else:
-                                        if textMatchElement.negate:
-                                            return (False, queryAttributes,
-                                                    [dsquery.expression(dsquery.expression.NOT, dsquery.match(dsattributes.kDSNAttrRecordName, recordNameQualifier, dsattributes.eDSExact)), ]
-                                                    )
-                                        else:
-                                            return (False, queryAttributes,
-                                                    [dsquery.match(dsattributes.kDSNAttrRecordName, recordNameQualifier, dsattributes.eDSExact), ]
-                                                    )
-
-                            # use match_type where possible depending on property/attribute mapping
-                            # Note that case sensitive negate will not work
-                            #        Should return all records in that case
-                            matchType = dsattributes.eDSContains
-                            if propFilter.filter_name in ("NICKNAME" , "TITLE" , "NOTE" , "UID", "URL", "N", "ADR", "ORG", "REV", "LABEL",):
-                                if textMatchElement.match_type == "equals":
-                                        matchType = dsattributes.eDSExact
-                                elif textMatchElement.match_type == "starts-with":
-                                        matchType = dsattributes.eDSStartsWith
-                                elif textMatchElement.match_type == "ends-with":
-                                        matchType = dsattributes.eDSEndsWith
-
-                            matchList = []
-                            for matchString in matchStrings:
-                                matchList += [dsquery.match(attrName, matchString, matchType) for attrName in stringAttrStrs]
-
-                            matchList = list(set(matchList))
-
-                            if textMatchElement.negate:
-                                if len(matchList) > 1:
-                                    expr = dsquery.expression(dsquery.expression.OR, matchList)
-                                else:
-                                    expr = matchList
-                                return (False, queryAttributes, [dsquery.expression(dsquery.expression.NOT, expr), ])
-                            else:
-                                return andOrExpression(propFilterAllOf, queryAttributes, matchList)
-
-                    # attribute exists search
-                    return definedExpression(True, propFilterAllOf, propFilter.filter_name, constant, queryAttributes, allAttrStrings)
-                    #end textMatchElementExpression()
-
-                # get attribute strings from dsqueryAttributesForProperty list
-                queryAttributes = list(set(VCardRecord.dsqueryAttributesForProperty.get(propFilter.filter_name, [])).intersection(set(self.allowedDSQueryAttributes)))
-
-                binaryAttrStrs = []
-                stringAttrStrs = []
-                for attr in queryAttributes:
-                    if isinstance(attr, tuple):
-                        binaryAttrStrs.append(attr[0])
-                    else:
-                        stringAttrStrs.append(attr)
-                allAttrStrings = stringAttrStrs + binaryAttrStrs
-
-                constant = VCardRecord.constantProperties.get(propFilter.filter_name)
-                if not constant and not allAttrStrings:
-                    return (False, [], [])
-
-                if propFilter.qualifier and isinstance(propFilter.qualifier, IsNotDefined):
-                    return definedExpression(False, filterAllOf, propFilter.filter_name, constant, queryAttributes, allAttrStrings)
-
-                paramFilterElements = [paramFilterElement for paramFilterElement in propFilter.filters if isinstance(paramFilterElement, ParameterFilter)]
-                textMatchElements = [textMatchElement for textMatchElement in propFilter.filters if isinstance(textMatchElement, TextMatch)]
-                propFilterAllOf = propFilter.propfilter_test == "allof"
-
-                # handle parameter filter elements
-                if len(paramFilterElements) > 0:
-                    if supportedParamter(propFilter.filter_name, paramFilterElements, propFilterAllOf):
-                        if len(textMatchElements) == 0:
-                            return definedExpression(True, filterAllOf, propFilter.filter_name, constant, queryAttributes, allAttrStrings)
-                    else:
-                        if propFilterAllOf:
-                            return (False, [], [])
-
-                # handle text match elements
-                propFilterNeedsAllRecords = propFilterAllOf
-                propFilterAttributes = []
-                propFilterExpressionList = []
-                for textMatchElement in textMatchElements:
-
-                    textMatchNeedsAllRecords, textMatchExpressionAttributes, textMatchExpression = textMatchElementExpression(propFilterAllOf, textMatchElement)
-                    if propFilterAllOf:
-                        propFilterNeedsAllRecords &= textMatchNeedsAllRecords
-                    else:
-                        propFilterNeedsAllRecords |= textMatchNeedsAllRecords
-                    propFilterAttributes += textMatchExpressionAttributes
-                    propFilterExpressionList += textMatchExpression
-
-                if (len(propFilterExpressionList) > 1) and (filterAllOf != propFilterAllOf):
-                    propFilterExpressions = [dsquery.expression(dsquery.expression.AND if propFilterAllOf else dsquery.expression.OR , list(set(propFilterExpressionList)))] # remove duplicates
-                else:
-                    propFilterExpressions = list(set(propFilterExpressionList))
-
-                return (propFilterNeedsAllRecords, propFilterAttributes, propFilterExpressions)
-                #end propFilterExpression
-
-            #print("propFilterListQuery: filterAllOf=%r, propFilters=%r" % (filterAllOf, propFilters,))
-            """
-            Create an expression for a list of prop-filter elements.
-
-            @param filterAllOf: the C{True} if parent filter test is "allof"
-            @param propFilters: the C{list} of L{ComponentFilter} elements.
-            @return: (needsAllRecords, espressionAttributes, expression) tuple
-            """
-            needsAllRecords = filterAllOf
-            attributes = []
-            expressions = []
-            for propFilter in propFilters:
-
-                propNeedsAllRecords, propExpressionAttributes, propExpression = propFilterExpression(filterAllOf, propFilter)
-                if filterAllOf:
-                    needsAllRecords &= propNeedsAllRecords
-                else:
-                    needsAllRecords |= propNeedsAllRecords
-                attributes += propExpressionAttributes
-                expressions += propExpression
-
-            if len(expressions) > 1:
-                expr = dsquery.expression(dsquery.expression.AND if filterAllOf else dsquery.expression.OR , list(set(expressions))) # remove duplicates
-            elif len(expressions):
-                expr = expressions[0]
-            else:
-                expr = None
-
-            return (needsAllRecords, attributes, expr)
-
-        #print("_getDSFilter")
-        # Lets assume we have a valid filter from the outset
-
-        # Top-level filter contains zero or more prop-filters
-        if addressBookFilter:
-            filterAllOf = addressBookFilter.filter_test == "allof"
-            if len(addressBookFilter.children) > 0:
-                return propFilterListQuery(filterAllOf, addressBookFilter.children)
-            else:
-                return (filterAllOf, [], [])
-        else:
-            return (False, [], [])
-
-
-    def _attributesForAddressBookQuery(self, addressBookQuery):
-
-        propertyNames = []
-        #print( "addressBookQuery.qname=%r" % addressBookQuery.qname)
-        if addressBookQuery.qname() == ("DAV:", "prop"):
-
-            for property in addressBookQuery.children:
-                #print("property = %r" % property )
-                if isinstance(property, carddavxml.AddressData):
-                    for addressProperty in property.children:
-                        #print("addressProperty = %r" % addressProperty )
-                        if isinstance(addressProperty, carddavxml.Property):
-                            #print("Adding property %r", addressProperty.attributes["name"])
-                            propertyNames.append(addressProperty.attributes["name"])
-
-                elif not self.fakeETag and property.qname() == ("DAV:", "getetag"):
-                    # for a real etag == md5(vCard), we need all attributes
-                    propertyNames = None
-                    break
-
-        if not len(propertyNames):
-            #print("using all attributes")
-            return self.returnedAttributes
-
-        else:
-            propertyNames.append("X-INTERNAL-MINIMUM-VCARD-PROPERTIES") # these properties are required to make a vCard
-            queryAttributes = []
-            for prop in propertyNames:
-                if prop in VCardRecord.dsqueryAttributesForProperty:
-                    #print("adding attributes %r" % VCardRecord.dsqueryAttributesForProperty.get(prop))
-                    queryAttributes += VCardRecord.dsqueryAttributesForProperty.get(prop)
-
-            return list(set(queryAttributes).intersection(set(self.returnedAttributes)))
-
-
-    @inlineCallbacks
-    def cacheVCardsForAddressBookQuery(self, addressBookFilter, addressBookQuery, maxResults):
-        """
-        Cache the vCards for a given addressBookFilder and addressBookQuery
-        """
-        startTime = time.time()
-        #print("Timing: cacheVCardsForAddressBookQuery.starttime=%f" % startTime)
-
-        allRecords, filterAttributes, dsFilter = self._getDSFilter(addressBookFilter)
-        #print("allRecords = %s, query = %s" % (allRecords, "None" if dsFilter is None else dsFilter.generate(),))
-
-        if allRecords:
-            dsFilter = None #  None expression == all Records
-        clear = not allRecords and not dsFilter
-
-        #get unique list of requested attributes
-        if clear:
-            attributes = None
-        else:
-            queryAttributes = self._attributesForAddressBookQuery(addressBookQuery)
-            attributes = filterAttributes + queryAttributes
-
-        #calc maxRecords from passed in maxResults allowing extra for second stage filtering in caller
-        maxRecords = int(maxResults * 1.2)
-        if self.maxDSQueryRecords and maxRecords > self.maxDSQueryRecords:
-            maxRecords = self.maxDSQueryRecords
-
-        updateLock, limited = (yield self._refreshCache(reschedule=False, query=dsFilter, attributes=attributes, keepLock=True, clear=clear, maxRecords=maxRecords))
-
-        elaspedTime = time.time() - startTime
-        self.log.info("Timing: Cache fill: %.1f ms" % (elaspedTime * 1000,))
-
-        returnValue((updateLock, limited))
-
-
-    @inlineCallbacks
-    def vCardRecordsForAddressBookQuery(self, addressBookFilter, addressBookQuery, maxResults):
-        """
-        Get vCards for a given addressBookFilder and addressBookQuery
-        """
-
-        allRecords, filterAttributes, dsFilter = self._getDSFilter(addressBookFilter)
-        #print("allRecords = %s, query = %s" % (allRecords, "None" if dsFilter is None else dsFilter.generate(),))
-
-        # testing:
-        # allRecords = True
-
-        if allRecords:
-            dsFilter = None #  None expression == all Records
-        clear = not allRecords and not dsFilter
-
-        queryRecords = []
-        limited = False
-
-        if not clear:
-            queryAttributes = self._attributesForAddressBookQuery(addressBookQuery)
-            attributes = filterAttributes + queryAttributes
-
-            #calc maxRecords from passed in maxResults allowing extra for second stage filtering in caller
-            maxRecords = int(maxResults * 1.2)
-            if self.maxDSQueryRecords and maxRecords > self.maxDSQueryRecords:
-                maxRecords = self.maxDSQueryRecords
-
-            records, limited = (yield self._getDirectoryRecords(dsFilter, attributes, maxRecords))
-
-            #filter out bad records --- should only happen during development
-            for record in records.values():
-                try:
-                    vCardText = record.vCardText()
-                except:
-                    traceback.print_exc()
-                    self.log.info("Could not get vcard for record %s" % (record,))
-                else:
-                    if not record.firstValueForAttribute(dsattributes.kDSNAttrMetaNodeLocation).startswith("/Local"):
-                        self.log.debug("VCard text =\n%s" % (vCardText,))
-                    queryRecords.append(record)
-
-        returnValue((queryRecords, limited,))
-
-
-
-class VCardRecord(DirectoryRecord, DAVPropertyMixIn):
-    """
-    Open Directory implementation of L{IDirectoryRecord}.
-    """
-
-    # od attributes that may contribute to vcard properties
-    # will be used to translate vCard queries to od queries
-
-    dsqueryAttributesForProperty = {
-
-        "FN" : [
-               dsattributes.kDS1AttrFirstName,
-               dsattributes.kDS1AttrLastName,
-               dsattributes.kDS1AttrMiddleName,
-               dsattributes.kDSNAttrNamePrefix,
-               dsattributes.kDSNAttrNameSuffix,
-               dsattributes.kDS1AttrDistinguishedName,
-               dsattributes.kDSNAttrRecordName,
-               ],
-        "N" : [
-               dsattributes.kDS1AttrFirstName,
-               dsattributes.kDS1AttrLastName,
-               dsattributes.kDS1AttrMiddleName,
-               dsattributes.kDSNAttrNamePrefix,
-               dsattributes.kDSNAttrNameSuffix,
-               dsattributes.kDS1AttrDistinguishedName,
-               dsattributes.kDSNAttrRecordName,
-               ],
-        "NICKNAME" : [
-                dsattributes.kDSNAttrNickName,
-                ],
-        # no binary searching
-        "PHOTO" : [
-                (dsattributes.kDSNAttrJPEGPhoto, "base64"),
-                ],
-        "BDAY" : [
-                dsattributes.kDS1AttrBirthday,
-                ],
-        "ADR" : [
-                dsattributes.kDSNAttrBuilding,
-                dsattributes.kDSNAttrStreet,
-                dsattributes.kDSNAttrCity,
-                dsattributes.kDSNAttrState,
-                dsattributes.kDSNAttrPostalCode,
-                dsattributes.kDSNAttrCountry,
-                ],
-        "LABEL" : [
-                dsattributes.kDSNAttrPostalAddress,
-                dsattributes.kDSNAttrPostalAddressContacts,
-                dsattributes.kDSNAttrAddressLine1,
-                dsattributes.kDSNAttrAddressLine2,
-                dsattributes.kDSNAttrAddressLine3,
-                ],
-         "TEL" : [
-                dsattributes.kDSNAttrPhoneNumber,
-                dsattributes.kDSNAttrMobileNumber,
-                dsattributes.kDSNAttrPagerNumber,
-                dsattributes.kDSNAttrHomePhoneNumber,
-                dsattributes.kDSNAttrPhoneContacts,
-                dsattributes.kDSNAttrFaxNumber,
-                #dsattributes.kDSNAttrAreaCode,
-                ],
-         "EMAIL" : [
-                dsattributes.kDSNAttrEMailAddress,
-                dsattributes.kDSNAttrEMailContacts,
-                ],
-         "GEO" : [
-                dsattributes.kDSNAttrMapCoordinates,
-                ],
-         "TITLE" : [
-                dsattributes.kDSNAttrJobTitle,
-                ],
-         "ORG" : [
-                dsattributes.kDSNAttrCompany,
-                dsattributes.kDSNAttrOrganizationName,
-                dsattributes.kDSNAttrDepartment,
-                ],
-         "NOTE" : [
-                dsattributes.kDS1AttrComment,
-                dsattributes.kDS1AttrNote,
-                ],
-         "REV" : [
-                dsattributes.kDS1AttrModificationTimestamp,
-                ],
-         "UID" : [
-                dsattributes.kDS1AttrGeneratedUID,
-                # special cased
-                #dsattributes.kDSNAttrMetaNodeLocation,
-                #dsattributes.kDSNAttrRecordName,
-                #dsattributes.kDS1AttrDistinguishedName,
-                ],
-         "URL" : [
-                dsattributes.kDS1AttrWeblogURI,
-                dsattributes.kDSNAttrURL,
-                ],
-         "KEY" : [
-                # check on format, are these all binary?
-                (dsattributes.kDSNAttrPGPPublicKey, "base64"),
-                (dsattributes.kDS1AttrUserCertificate, "base64"),
-                (dsattributes.kDS1AttrUserPKCS12Data, "base64"),
-                (dsattributes.kDS1AttrUserSMIMECertificate, "base64"),
-                ],
-         # too bad this is not one X-Attribute with params.     Would make searching easier
-         "X-AIM" : [
-                dsattributes.kDSNAttrIMHandle,
-                ],
-         "X-JABBER" : [
-                dsattributes.kDSNAttrIMHandle,
-                ],
-         "X-MSN" : [
-                dsattributes.kDSNAttrIMHandle,
-                ],
-         "X-YAHOO" : [
-                dsattributes.kDSNAttrIMHandle,
-                ],
-         "X-ICQ" : [
-                dsattributes.kDSNAttrIMHandle,
-                ],
-         "X-ABRELATEDNAMES" : [
-                dsattributes.kDSNAttrRelationships,
-                ],
-          "X-INTERNAL-MINIMUM-VCARD-PROPERTIES" : [
-                dsattributes.kDS1AttrGeneratedUID,
-                dsattributes.kDSNAttrMetaNodeLocation,
-                dsattributes.kDS1AttrFirstName,
-                 dsattributes.kDS1AttrLastName,
-                dsattributes.kDS1AttrMiddleName,
-                   dsattributes.kDSNAttrNamePrefix,
-                  dsattributes.kDSNAttrNameSuffix,
-                 dsattributes.kDS1AttrDistinguishedName,
-                dsattributes.kDSNAttrRecordName,
-                dsattributes.kDSNAttrRecordType,
-                dsattributes.kDS1AttrModificationTimestamp,
-                dsattributes.kDS1AttrCreationTimestamp,
-                ],
-          "X-INTERNAL-REQUIRED" : [
-                dsattributes.kDS1AttrGeneratedUID,
-                dsattributes.kDSNAttrMetaNodeLocation,
-                 dsattributes.kDS1AttrDistinguishedName,
-                dsattributes.kDSNAttrRecordName,
-                dsattributes.kDS1AttrFirstName,
-                 dsattributes.kDS1AttrLastName,
-                dsattributes.kDSNAttrRecordType,
-                ],
-
-    }
-
-    allDSQueryAttributes = sorted(list(set([attr for lookupAttributes in dsqueryAttributesForProperty.values()
-                                      for attr in lookupAttributes])))
-
-    binaryDSAttributeStrs = [attr[0] for attr in allDSQueryAttributes
-                                if isinstance(attr, tuple)]
-
-    stringDSAttributeStrs = [attr for attr in allDSQueryAttributes
-                                if isinstance(attr, str)]
-
-    allDSAttributeStrs = stringDSAttributeStrs + binaryDSAttributeStrs
-
-    #peopleUIDSeparator = "-" + OpenDirectoryBackingService.baseGUID + "-"
-    userUIDSeparator = "-bf07a1a2-"
-    peopleUIDSeparator = "-cf07a1a2-"
-
-    constantProperties = {
-        # 3.6.3 PRODID Type Definition
-        "PRODID": vCardProductID,
-        # 3.6.9 VERSION Type Definition
-        "VERSION": "3.0",
-        }
-
-
-    def __init__(self, service, recordAttributes, defaultNodeName=None):
-
-        self.log.debug("service=%s, attributes=%s" % (service, recordAttributes))
-
-        #save off for debugging
-        if service.addDSAttrXProperties:
-            self.originalAttributes = recordAttributes.copy()
-
-        self.directoryBackedAddressBook = service.directoryBackedAddressBook
-        self._vCard = None
-        self._vCardText = None
-        self._uriName = None
-        self._hRef = None
-
-        self.attributes = {}
-        for key, values in recordAttributes.items():
-            if key in VCardRecord.stringDSAttributeStrs:
-                if isinstance(values, list):
-                    self.attributes[key] = [removeControlChars(val).decode("utf8") for val in values]
-                else:
-                    self.attributes[key] = removeControlChars(values).decode("utf8")
-            else:
-                self.attributes[key] = values
-
-        # fill in  missing essential attributes used for filtering
-        fullName = self.firstValueForAttribute(dsattributes.kDS1AttrDistinguishedName)
-        if not fullName:
-            fullName = self.firstValueForAttribute(dsattributes.kDSNAttrRecordName)
-            self.attributes[dsattributes.kDS1AttrDistinguishedName] = fullName
-
-        node = self.firstValueForAttribute(dsattributes.kDSNAttrMetaNodeLocation)
-
-        # use a better node name -- makes better synthetic GUIDS
-        if not node or node == "/LDAPv3/127.0.0.1":
-            node = defaultNodeName if defaultNodeName else service.realmName
-            self.attributes[dsattributes.kDSNAttrMetaNodeLocation] = node
-
-        guid = self.firstValueForAttribute(dsattributes.kDS1AttrGeneratedUID)
-        if not guid:
-            if service.standardizeSyntheticUIDs:
-                nodeUUIDStr = "00000000"
-            else:
-                nodeUUIDStr = "%x" % abs(hash(node))
-            nameUUIDStr = "".join(self.firstValueForAttribute(dsattributes.kDSNAttrRecordName).encode("utf8").encode("base64").split("\n"))
-            if self.firstValueForAttribute(dsattributes.kDSNAttrRecordType) != dsattributes.kDSStdRecordTypePeople:
-                guid = VCardRecord.userUIDSeparator.join([nodeUUIDStr, nameUUIDStr, ])
-            else:
-                guid = VCardRecord.peopleUIDSeparator.join([nodeUUIDStr, nameUUIDStr, ])
-
-        # since guid is used as file name, normalize so uid uniqueness == fine name uniqueness
-        #guid = "/".join(guid.split(":")).upper()
-        self.attributes[dsattributes.kDS1AttrGeneratedUID] = guid
-
-        if self.firstValueForAttribute(dsattributes.kDS1AttrLastName) == "99":
-            del self.attributes[dsattributes.kDS1AttrLastName]
-
-        if self.firstValueForAttribute(dsattributes.kDSNAttrRecordType) != dsattributes.kDSStdRecordTypePeople:
-            recordType = DirectoryService.recordType_users
-        else:
-            recordType = DirectoryService.recordType_people
-
-        super(VCardRecord, self).__init__(
-            service=service,
-            recordType=recordType,
-            guid=guid,
-            shortNames=tuple(self.valuesForAttribute(dsattributes.kDSNAttrRecordName)),
-            fullName=fullName,
-            firstName=self.firstValueForAttribute(dsattributes.kDS1AttrFirstName, None),
-            lastName=self.firstValueForAttribute(dsattributes.kDS1AttrLastName, None),
-            emailAddresses=(),
-            calendarUserAddresses=(),
-            autoSchedule=False,
-            enabledForCalendaring=False,
-        )
-
-
-    def __repr__(self):
-        return "<%s[%s(%s)] %s(%s) %r>" % (
-            self.__class__.__name__,
-            self.firstValueForAttribute(dsattributes.kDSNAttrRecordType),
-            self.firstValueForAttribute(dsattributes.kDSNAttrMetaNodeLocation),
-            self.guid,
-            self.shortNames,
-            self.fullName
-        )
-
-
-    def __hash__(self):
-        s = "".join([
-              "%s:%s" % (attribute, self.valuesForAttribute(attribute),)
-              for attribute in self.attributes
-              ])
-        return hash(s)
-
-    """
-    def nextFileName(self):
-        self.renameCounter += 1
-        self.fileName = self.baseFileName + "-" + str(self.renameCounter)
-        self.fileNameLower = self.fileName.lower()
-    """
-
-    def hasAttribute(self, attributeName):
-        return self.valuesForAttribute(attributeName, None) is not None
-
-
-    def valuesForAttribute(self, attributeName, default_values=[]):
-        values = self.attributes.get(attributeName)
-        if (values is None):
-            return default_values
-        elif not isinstance(values, list):
-            values = [values, ]
-
-        # ds templates often return empty attribute values
-        #     get rid of them here
-        nonEmptyValues = [(value.encode("utf-8") if isinstance(value, unicode) else value) for value in values if len(value) > 0]
-
-        if len(nonEmptyValues) > 0:
-            return nonEmptyValues
-        else:
-            return default_values
-
-
-    def firstValueForAttribute(self, attributeName, default_value=""):
-        values = self.attributes.get(attributeName)
-        if values is None:
-            return default_value
-        elif isinstance(values, list):
-            return values[0].encode("utf_8") if isinstance(values[0], unicode) else values[0]
-        else:
-            return values.encode("utf_8") if isinstance(values, unicode) else values
-
-
-    def joinedValuesForAttribute(self, attributeName, separator=",", default_string=""):
-        values = self.valuesForAttribute(attributeName, None)
-        if not values:
-            return default_string
-        else:
-            return separator.join(values)
-
-
-    def isoDateStringForDateAttribute(self, attributeName, default_string=""):
-        modDate = self.firstValueForAttribute(attributeName, default_string)
-        revDate = None
-        if modDate:
-            if len(modDate) >= len("YYYYMMDD") and modDate[:8].isdigit():
-                revDate = "%s-%s-%s" % (modDate[:4], modDate[4:6], modDate[6:8],)
-            if len(modDate) >= len("YYYYMMDDHHMMSS") and modDate[8:14].isdigit():
-                revDate += "T%s:%s:%sZ" % (modDate[8:10], modDate[10:12], modDate[12:14],)
-        return revDate
-
-
-    def vCard(self):
-
-
-        def generateVCard():
-
-            def isUniqueProperty(vcard, newProperty, ignoreParams=None):
-                existingProperties = vcard.properties(newProperty.name())
-                for existingProperty in existingProperties:
-                    if ignoreParams:
-                        existingProperty = existingProperty.duplicate()
-                        for paramname, paramvalue in ignoreParams:
-                            existingProperty.removeParameterValue(paramname, paramvalue)
-                    if existingProperty == newProperty:
-                        return False
-                return True
-
-            def addUniqueProperty(vcard, newProperty, ignoreParams=None, attrType=None, attrValue=None):
-                if isUniqueProperty(vcard, newProperty, ignoreParams):
-                    vcard.addProperty(newProperty)
-                else:
-                    if attrType and attrValue:
-                        self.log.info("Ignoring attribute %r with value %r in creating property %r. A duplicate property already exists." % (attrType, attrValue, newProperty,))
-
-            def addPropertyAndLabel(groupCount, label, propertyName, propertyValue, parameters=None):
-                groupCount[0] += 1
-                groupPrefix = "item%d" % groupCount[0]
-                vcard.addProperty(Property(propertyName, propertyValue, params=parameters, group=groupPrefix))
-                vcard.addProperty(Property("X-ABLabel", label, group=groupPrefix))
-
-            # for attributes of the form  param:value
-            def addPropertiesAndLabelsForPrefixedAttribute(groupCount, propertyPrefix, propertyName, defaultLabel, nolabelParamTypes, labelMap, attrType):
-                preferred = True
-                for attrValue in self.valuesForAttribute(attrType):
-                    try:
-                        # special case for Apple
-                        if self.service.appleInternalServer and attrType == dsattributes.kDSNAttrIMHandle:
-                            splitValue = attrValue.split("|")
-                            if len(splitValue) > 1:
-                                attrValue = splitValue[0]
-
-                        colonIndex = attrValue.find(":")
-                        if (colonIndex > len(attrValue) - 2):
-                            raise ValueError("Nothing after colon.")
-
-                        propertyValue = attrValue[colonIndex + 1:]
-                        labelString = attrValue[:colonIndex] if colonIndex > 0 else defaultLabel
-                        paramTypeString = labelString.upper()
-
-                        # add PREF to first prop's parameters
-                        paramTypeStrings = [paramTypeString, ]
-                        if preferred and "PREF" != paramTypeString:
-                            paramTypeStrings += ["PREF", ]
-                        parameters = {"TYPE": paramTypeStrings, }
-
-                        #special case for IMHandles which the param is the last part of the property like X-AIM or X-JABBER
-                        if propertyPrefix:
-                            propertyName = propertyPrefix + paramTypeString
-
-                        # only add label prop if needed
-                        if paramTypeString in nolabelParamTypes:
-                            addUniqueProperty(vcard, Property(propertyName, attrValue[colonIndex + 1:], params=parameters), None, attrValue, attrType)
-                        else:
-                            # use special localizable addressbook labels where possible
-                            abLabelString = labelMap.get(labelString, labelString)
-                            addPropertyAndLabel(groupCount, abLabelString, propertyName, propertyValue, parameters)
-                        preferred = False
-
-                    except Exception, e:
-                        traceback.print_exc()
-                        self.log.debug("addPropertiesAndLabelsForPrefixedAttribute(): groupCount=%r, propertyPrefix=%r, propertyName=%r, nolabelParamTypes=%r, labelMap=%r, attrType=%r" % (groupCount[0], propertyPrefix, propertyName, nolabelParamTypes, labelMap, attrType,))
-                        self.log.error("addPropertiesAndLabelsForPrefixedAttribute(): Trouble parsing attribute %s, with value \"%s\".  Error = %s" % (attrType, attrValue, e,))
-
-            #print("VCardRecord.vCard")
-            # create vCard
-            vcard = Component("VCARD")
-            groupCount = [0]
-
-            # add constant properties - properties that are the same regardless of the record attributes
-            for key, value in VCardRecord.constantProperties.items():
-                vcard.addProperty(Property(key, value))
-
-            # 3.1 IDENTIFICATION TYPES http://tools.ietf.org/html/rfc2426#section-3.1
-            # 3.1.1 FN Type Definition
-            # dsattributes.kDS1AttrDistinguishedName,      # Users distinguished or real name
-            #
-            # full name is required but this is set in OpenDiretoryBackingRecord.__init__
-            #vcard.addProperty(Property("FN", self.firstValueForAttribute(dsattributes.kDS1AttrDistinguishedName)))
-
-            # 3.1.2 N Type Definition
-            # dsattributes.kDS1AttrFirstName,            # Used for first name of user or person record.
-            # dsattributes.kDS1AttrLastName,            # Used for the last name of user or person record.
-            # dsattributes.kDS1AttrMiddleName,            # Used for the middle name of user or person record.
-            # dsattributes.kDSNAttrNameSuffix,            # Represents the name suffix of a user or person.
-                                                        #      ie. Jr., Sr., etc.
-                                                        #      Usually found in user or people records (kDSStdRecordTypeUsers or
-                                                        #      dsattributes.kDSStdRecordTypePeople).
-            # dsattributes.kDSNAttrNamePrefix,            # Represents the title prefix of a user or person.
-                                                        #      ie. Mr., Ms., Mrs., Dr., etc.
-                                                        #      Usually found in user or people records (kDSStdRecordTypeUsers or
-                                                        #      dsattributes.kDSStdRecordTypePeople).
-
-            # name is required, so make sure we have one
-            # vcard says: Each name attribute can be a string or a list of strings.
-            if not self.hasAttribute(dsattributes.kDS1AttrFirstName) and not self.hasAttribute(dsattributes.kDS1AttrLastName):
-                familyName = self.firstValueForAttribute(dsattributes.kDS1AttrDistinguishedName)
-            else:
-                familyName = self.valuesForAttribute(dsattributes.kDS1AttrLastName, "")
-
-            nameObject = N(
-                first=self.valuesForAttribute(dsattributes.kDS1AttrFirstName, ""),
-                last=familyName,
-                middle=self.valuesForAttribute(dsattributes.kDS1AttrMiddleName, ""),
-                prefix=self.valuesForAttribute(dsattributes.kDSNAttrNamePrefix, ""),
-                suffix=self.valuesForAttribute(dsattributes.kDSNAttrNameSuffix, ""),
-            )
-            vcard.addProperty(Property("N", nameObject))
-
-            # set full name to Name with contiguous spaces stripped
-            # it turns out that Address Book.app ignores FN and creates it fresh from N in ABRecord
-            # so no reason to have FN distinct from N
-            vcard.addProperty(Property("FN", nameObject.getFullName()))
-
-            # 3.1.3 NICKNAME Type Definition
-            # dsattributes.kDSNAttrNickName,            # Represents the nickname of a user or person.
-                                                        #    Usually found in user or people records (kDSStdRecordTypeUsers or
-                                                        #    dsattributes.kDSStdRecordTypePeople).
-            for nickname in self.valuesForAttribute(dsattributes.kDSNAttrNickName):
-                addUniqueProperty(vcard, Property("NICKNAME", nickname), None, dsattributes.kDSNAttrNickName, nickname)
-
-            # 3.1.4 PHOTO Type Definition
-            # dsattributes.kDSNAttrJPEGPhoto,            # Used to store binary picture data in JPEG format.
-                                                        #      Usually found in user, people or group records (kDSStdRecordTypeUsers,
-                                                        #      dsattributes.kDSStdRecordTypePeople,dsattributes.kDSStdRecordTypeGroups).
-            # pyOpenDirectory always returns binary-encoded string
-
-            for photo in self.valuesForAttribute(dsattributes.kDSNAttrJPEGPhoto):
-                addUniqueProperty(vcard, Property("PHOTO", photo, params={"ENCODING": ["b", ], "TYPE": ["JPEG", ], }), None, dsattributes.kDSNAttrJPEGPhoto, photo)
-
-            # 3.1.5 BDAY Type Definition
-            # dsattributes.kDS1AttrBirthday,            # Single-valued attribute that defines the user's birthday.
-                                                        #      Format is x.208 standard YYYYMMDDHHMMSSZ which we will require as GMT time.
-                                                        #                               012345678901234
-
-            birthdate = self.isoDateStringForDateAttribute(dsattributes.kDS1AttrBirthday)
-            if birthdate:
-                vcard.addProperty(Property("BDAY", DateTime.parseText(birthdate, fullISO=True)))
-
-            # 3.2 Delivery Addressing Types http://tools.ietf.org/html/rfc2426#section-3.2
-            #
-            # 3.2.1 ADR Type Definition
-
-            #address
-            # vcard says: Each address attribute can be a string or a list of strings.
-            extended = self.valuesForAttribute(dsattributes.kDSNAttrBuilding, "")
-            street = self.valuesForAttribute(dsattributes.kDSNAttrStreet, "")
-            city = self.valuesForAttribute(dsattributes.kDSNAttrCity, "")
-            region = self.valuesForAttribute(dsattributes.kDSNAttrState, "")
-            code = self.valuesForAttribute(dsattributes.kDSNAttrPostalCode, "")
-            country = self.valuesForAttribute(dsattributes.kDSNAttrCountry, "")
-
-            if len(extended) > 0 or len(street) > 0 or len(city) > 0 or len(region) > 0 or len(code) > 0 or len(country) > 0:
-                vcard.addProperty(Property("ADR",
-                    Adr(
-                        #pobox = box,
-                        extended=extended,
-                        street=street,
-                        locality=city,
-                        region=region,
-                        postalcode=code,
-                        country=country,
-                    ),
-                    params={"TYPE": ["WORK", "PREF", "POSTAL", "PARCEL", ], }
-                ))
-
-            # 3.2.2 LABEL Type Definition
-
-            # dsattributes.kDSNAttrPostalAddress,            # The postal address usually excluding postal code.
-            # dsattributes.kDSNAttrPostalAddressContacts,    # multi-valued attribute that defines a record's alternate postal addresses .
-                                                            #      found in user records (kDSStdRecordTypeUsers) and resource records (kDSStdRecordTypeResources).
-            # dsattributes.kDSNAttrAddressLine1,            # Line one of multiple lines of address data for a user.
-            # dsattributes.kDSNAttrAddressLine2,            # Line two of multiple lines of address data for a user.
-            # dsattributes.kDSNAttrAddressLine3,            # Line three of multiple lines of address data for a user.
-
-            for label in self.valuesForAttribute(dsattributes.kDSNAttrPostalAddress):
-                addUniqueProperty(vcard, Property("LABEL", label, params={"TYPE": ["POSTAL", "PARCEL", ]}), None, dsattributes.kDSNAttrPostalAddress, label)
-
-            for label in self.valuesForAttribute(dsattributes.kDSNAttrPostalAddressContacts):
-                addUniqueProperty(vcard, Property("LABEL", label, params={"TYPE": ["POSTAL", "PARCEL", ]}), None, dsattributes.kDSNAttrPostalAddressContacts, label)
-
-            address = self.joinedValuesForAttribute(dsattributes.kDSNAttrAddressLine1)
-            addressLine2 = self.joinedValuesForAttribute(dsattributes.kDSNAttrAddressLine2)
-            if len(addressLine2) > 0:
-                address += "\n" + addressLine2
-            addressLine3 = self.joinedValuesForAttribute(dsattributes.kDSNAttrAddressLine3)
-            if len(addressLine3) > 0:
-                address += "\n" + addressLine3
-
-            if len(address) > 0:
-                vcard.addProperty(Property("LABEL", address, params={"TYPE": ["POSTAL", "PARCEL", ]}))
-
-            # 3.3 TELECOMMUNICATIONS ADDRESSING TYPES http://tools.ietf.org/html/rfc2426#section-3.3
-            # 3.3.1 TEL Type Definition
-            #          TEL;TYPE=work,voice,pref,msg:+1-213-555-1234
-
-            # dsattributes.kDSNAttrPhoneNumber,            # Telephone number of a user.
-            # dsattributes.kDSNAttrMobileNumber,        # Represents the mobile numbers of a user or person.
-                                                        #      Usually found in user or people records (kDSStdRecordTypeUsers or
-                                                        #      dsattributes.kDSStdRecordTypePeople).
-            # dsattributes.kDSNAttrFaxNumber,            # Represents the FAX numbers of a user or person.
-                                                        # Usually found in user or people records (kDSStdRecordTypeUsers or
-                                                        # kDSStdRecordTypePeople).
-            # dsattributes.kDSNAttrPagerNumber,            # Represents the pager numbers of a user or person.
-                                                        #      Usually found in user or people records (kDSStdRecordTypeUsers or
-                                                        #      dsattributes.kDSStdRecordTypePeople).
-            # dsattributes.kDSNAttrHomePhoneNumber,        # Home telephone number of a user or person.
-            # dsattributes.kDSNAttrPhoneContacts,        # multi-valued attribute that defines a record's custom phone numbers .
-                                                        #      found in user records (kDSStdRecordTypeUsers).
-                                                        #      Example: home fax:408-555-4444
-
-            params = {"TYPE": ["WORK", "PREF", "VOICE", ], }
-            for phone in self.valuesForAttribute(dsattributes.kDSNAttrPhoneNumber):
-                addUniqueProperty(vcard, Property("TEL", phone, params=params), (("TYPE", "PREF"),), phone, dsattributes.kDSNAttrPhoneNumber)
-                params = {"TYPE": ["WORK", "VOICE", ], }
-
-            params = {"TYPE": ["WORK", "PREF", "CELL", ], }
-            for phone in self.valuesForAttribute(dsattributes.kDSNAttrMobileNumber):
-                addUniqueProperty(vcard, Property("TEL", phone, params=params), (("TYPE", "PREF"),), phone, dsattributes.kDSNAttrMobileNumber)
-                params = {"TYPE": ["WORK", "CELL", ], }
-
-            params = {"TYPE": ["WORK", "PREF", "FAX", ], }
-            for phone in self.valuesForAttribute(dsattributes.kDSNAttrFaxNumber):
-                addUniqueProperty(vcard, Property("TEL", phone, params=params), (("TYPE", "PREF"),), phone, dsattributes.kDSNAttrFaxNumber)
-                params = {"TYPE": ["WORK", "FAX", ], }
-
-            params = {"TYPE": ["WORK", "PREF", "PAGER", ], }
-            for phone in self.valuesForAttribute(dsattributes.kDSNAttrPagerNumber):
-                addUniqueProperty(vcard, Property("TEL", phone, params=params), (("TYPE", "PREF"),), phone, dsattributes.kDSNAttrPagerNumber)
-                params = {"TYPE": ["WORK", "PAGER", ], }
-
-            params = {"TYPE": ["HOME", "PREF", "VOICE", ], }
-            for phone in self.valuesForAttribute(dsattributes.kDSNAttrHomePhoneNumber):
-                addUniqueProperty(vcard, Property("TEL", phone, params=params), (("TYPE", "PREF"),), phone, dsattributes.kDSNAttrHomePhoneNumber)
-                params = {"TYPE": ["HOME", "VOICE", ], }
-
-            addPropertiesAndLabelsForPrefixedAttribute(groupCount, None, "TEL", "work",
-                                                        ["VOICE", "CELL", "FAX", "PAGER", ], {},
-                                                        dsattributes.kDSNAttrPhoneContacts,)
-
-            """
-            # EXTEND:  Use this attribute
-            # dsattributes.kDSNAttrAreaCode,            # Area code of a user's phone number.
-            """
-
-            # 3.3.2 EMAIL Type Definition
-            # dsattributes.kDSNAttrEMailAddress,        # Email address of usually a user record.
-
-            # setup some params
-            preferredWorkParams = {"TYPE": ["WORK", "PREF", "INTERNET", ], }
-            workParams = {"TYPE": ["WORK", "INTERNET", ], }
-            params = preferredWorkParams
-            for emailAddress in self.valuesForAttribute(dsattributes.kDSNAttrEMailAddress):
-                addUniqueProperty(vcard, Property("EMAIL", emailAddress, params=params), (("TYPE", "PREF"),), emailAddress, dsattributes.kDSNAttrEMailAddress)
-                params = workParams
-
-            # dsattributes.kDSNAttrEMailContacts,        # multi-valued attribute that defines a record's custom email addresses .
-                                                        #    found in user records (kDSStdRecordTypeUsers).
-                                                        #      Example: home:johndoe at mymail.com
-
-            # check to see if parameters type are open ended. Could be any string
-            addPropertiesAndLabelsForPrefixedAttribute(groupCount, None, "EMAIL", "work",
-                                                        ["WORK", "HOME", ], {},
-                                                        dsattributes.kDSNAttrEMailContacts,)
-
-            """
-            # UNIMPLEMENTED:
-            # 3.3.3 MAILER Type Definition
-            """
-            # 3.4 GEOGRAPHICAL TYPES http://tools.ietf.org/html/rfc2426#section-3.4
-            """
-            # UNIMPLEMENTED:
-            # 3.4.1 TZ Type Definition
-            """
-            # 3.4.2 GEO Type Definition
-            #dsattributes.kDSNAttrMapCoordinates,        # attribute that defines coordinates for a user's location .
-                                                        #      Found in user records (kDSStdRecordTypeUsers) and resource records (kDSStdRecordTypeResources).
-                                                        #      Example: 7.7,10.6
-            for coordinate in self.valuesForAttribute(dsattributes.kDSNAttrMapCoordinates):
-                parts = coordinate.split(",")
-                if (len(parts) == 2):
-                    vcard.addProperty(Property("GEO", parts))
-                else:
-                    self.log.info("Ignoring malformed attribute %r with value %r. Well-formed example: 7.7,10.6." % (dsattributes.kDSNAttrMapCoordinates, coordinate))
-            #
-            # 3.5 ORGANIZATIONAL TYPES http://tools.ietf.org/html/rfc2426#section-3.5
-            #
-            # 3.5.1 TITLE Type Definition
-            for jobTitle in self.valuesForAttribute(dsattributes.kDSNAttrJobTitle):
-                addUniqueProperty(vcard, Property("TITLE", jobTitle), None, dsattributes.kDSNAttrJobTitle, jobTitle)
-
-            """
-            # UNIMPLEMENTED:
-            # 3.5.2 ROLE Type Definition
-            # 3.5.3 LOGO Type Definition
-            # 3.5.4 AGENT Type Definition
-            """
-            # 3.5.5 ORG Type Definition
-            company = self.joinedValuesForAttribute(dsattributes.kDSNAttrCompany)
-            if len(company) == 0:
-                company = self.joinedValuesForAttribute(dsattributes.kDSNAttrOrganizationName)
-            department = self.joinedValuesForAttribute(dsattributes.kDSNAttrDepartment)
-            extra = self.joinedValuesForAttribute(dsattributes.kDSNAttrOrganizationInfo)
-            if len(company) > 0 or len(department) > 0:
-                vcard.addProperty(Property("ORG", (company, department, extra,),))
-
-            # 3.6 EXPLANATORY TYPES http://tools.ietf.org/html/rfc2426#section-3.6
-            """
-            # UNIMPLEMENTED:
-            # 3.6.1 CATEGORIES Type Definition
-            """
-            # 3.6.2 NOTE Type Definition
-            # dsattributes.kDS1AttrComment,                  # Attribute used for unformatted comment.
-            # dsattributes.kDS1AttrNote,                  # Note attribute. Commonly used in printer records.
-            for comment in self.valuesForAttribute(dsattributes.kDS1AttrComment):
-                addUniqueProperty(vcard, Property("NOTE", comment), None, dsattributes.kDS1AttrComment, comment)
-
-            for note in self.valuesForAttribute(dsattributes.kDS1AttrNote):
-                addUniqueProperty(vcard, Property("NOTE", note), None, dsattributes.kDS1AttrNote, note)
-
-            # 3.6.3 PRODID Type Definition
-            #vcard.addProperty(Property("PRODID", vCardProductID + "//BUILD %s" % twistedcaldav.__version__))
-            #vcard.addProperty(Property("PRODID", vCardProductID))
-            # ADDED WITH CONTSTANT PROPERTIES
-
-            # 3.6.4 REV Type Definition
-            revDate = self.isoDateStringForDateAttribute(dsattributes.kDS1AttrModificationTimestamp)
-            if revDate:
-                vcard.addProperty(Property("REV", DateTime.parseText(revDate, fullISO=True)))
-
-            """
-            # UNIMPLEMENTED:
-            # 3.6.5 SORT-STRING Type Definition
-            # 3.6.6 SOUND Type Definition
-            """
-            # 3.6.7 UID Type Definition
-            # dsattributes.kDS1AttrGeneratedUID,        # Used for 36 character (128 bit) unique ID. Usually found in user,
-                                                        #      group, and computer records. An example value is "A579E95E-CDFE-4EBC-B7E7-F2158562170F".
-                                                        #      The standard format contains 32 hex characters and four hyphen characters.
-            # !! don't use self.guid which is URL encoded
-            vcard.addProperty(Property("UID", self.firstValueForAttribute(dsattributes.kDS1AttrGeneratedUID)))
-
-            # 3.6.8 URL Type Definition
-            # dsattributes.kDSNAttrURL,                    # List of URLs.
-            # dsattributes.kDS1AttrWeblogURI,            # Single-valued attribute that defines the URI of a user's weblog.
-                                                        #     Usually found in user records (kDSStdRecordTypeUsers).
-                                                        #      Example: http://example.com/blog/jsmith
-            for url in self.valuesForAttribute(dsattributes.kDS1AttrWeblogURI):
-                addPropertyAndLabel(groupCount, "weblog", "URL", url, parameters={"TYPE": ["Weblog", ]})
-
-            for url in self.valuesForAttribute(dsattributes.kDSNAttrURL):
-                addPropertyAndLabel(groupCount, "_$!<HomePage>!$_", "URL", url, parameters={"TYPE": ["Homepage", ]})
-
-            # 3.6.9 VERSION Type Definition
-            # ALREADY ADDED
-
-            # 3.7 SECURITY TYPES http://tools.ietf.org/html/rfc2426#section-3.7
-            # 3.7.1 CLASS Type Definition
-            # ALREADY ADDED
-
-            # 3.7.2 KEY Type Definition
-
-            # dsattributes.kDSNAttrPGPPublicKey,        # Pretty Good Privacy public encryption key.
-            # dsattributes.kDS1AttrUserCertificate,        # Attribute containing the binary of the user's certificate.
-                                                        #       Usually found in user records. The certificate is data which identifies a user.
-                                                        #       This data is attested to by a known party, and can be independently verified
-                                                        #       by a third party.
-            # dsattributes.kDS1AttrUserPKCS12Data,        # Attribute containing binary data in PKCS #12 format.
-                                                        #       Usually found in user records. The value can contain keys, certificates,
-                                                        #      and other related information and is encrypted with a passphrase.
-            # dsattributes.kDS1AttrUserSMIMECertificate,# Attribute containing the binary of the user's SMIME certificate.
-                                                        #       Usually found in user records. The certificate is data which identifies a user.
-                                                        #       This data is attested to by a known party, and can be independently verified
-                                                        #       by a third party. SMIME certificates are often used for signed or encrypted
-                                                        #       emails.
-
-            for key in self.valuesForAttribute(dsattributes.kDSNAttrPGPPublicKey):
-                addUniqueProperty(vcard, Property("KEY", key, params={"ENCODING": ["b", ], "TYPE": ["PGPPublicKey", ]}), None, dsattributes.kDSNAttrPGPPublicKey, key)
-
-            for key in self.valuesForAttribute(dsattributes.kDS1AttrUserCertificate):
-                addUniqueProperty(vcard, Property("KEY", key, params={"ENCODING": ["b", ], "TYPE": ["UserCertificate", ]}), None, dsattributes.kDS1AttrUserCertificate, key)
-
-            for key in self.valuesForAttribute(dsattributes.kDS1AttrUserPKCS12Data):
-                addUniqueProperty(vcard, Property("KEY", key, params={"ENCODING": ["b", ], "TYPE": ["UserPKCS12Data", ]}), None, dsattributes.kDS1AttrUserPKCS12Data, key)
-
-            for key in self.valuesForAttribute(dsattributes.kDS1AttrUserSMIMECertificate):
-                addUniqueProperty(vcard, Property("KEY", key, params={"ENCODING": ["b", ], "TYPE": ["UserSMIMECertificate", ]}), None, dsattributes.kDS1AttrUserSMIMECertificate, key)
-
-            """
-            X- attributes, Address Book support
-            """
-            # X-AIM, X-JABBER, X-MSN, X-YAHOO, X-ICQ
-            # instant messaging
-            # dsattributes.kDSNAttrIMHandle,            # Represents the Instant Messaging handles of a user.
-                                                        #      Values should be prefixed with the appropriate IM type
-                                                        #       ie. AIM:, Jabber:, MSN:, Yahoo:, or ICQ:
-                                                        #       Usually found in user records (kDSStdRecordTypeUsers).
-
-            addPropertiesAndLabelsForPrefixedAttribute(groupCount, "X-", None, "aim",
-                                                        ["AIM", "JABBER", "MSN", "YAHOO", "ICQ"],
-                                                        {},
-                                                        dsattributes.kDSNAttrIMHandle,)
-
-            # X-ABRELATEDNAMES
-            # dsattributes.kDSNAttrRelationships,        #      multi-valued attribute that defines the relationship to the record type .
-                                                        #      found in user records (kDSStdRecordTypeUsers).
-                                                        #      Example: brother:John
-            addPropertiesAndLabelsForPrefixedAttribute(groupCount, None, "X-ABRELATEDNAMES", "friend",
-                                                        [],
-                                                        {"FATHER": "_$!<Father>!$_",
-                                                         "MOTHER": "_$!<Mother>!$_",
-                                                         "PARENT": "_$!<Parent>!$_",
-                                                         "BROTHER": "_$!<Brother>!$_",
-                                                         "SISTER": "_$!<Sister>!$_",
-                                                         "CHILD": "_$!<Child>!$_",
-                                                         "FRIEND": "_$!<Friend>!$_",
-                                                         "SPOUSE": "_$!<Spouse>!$_",
-                                                         "PARTNER": "_$!<Partner>!$_",
-                                                         "ASSISTANT": "_$!<Assistant>!$_",
-                                                         "MANAGER": "_$!<Manager>!$_", },
-                                                        dsattributes.kDSNAttrRelationships,)
-
-            # special case for Apple
-            if self.service.appleInternalServer:
-                for manager in self.valuesForAttribute("dsAttrTypeNative:appleManager"):
-                    splitManager = manager.split("|")
-                    if len(splitManager) >= 4:
-                        managerValue = "%s %s, %s" % (splitManager[0], splitManager[1], splitManager[3],)
-                    elif len(splitManager) >= 2:
-                        managerValue = "%s %s" % (splitManager[0], splitManager[1])
-                    else:
-                        managerValue = manager
-                    addPropertyAndLabel(groupCount, "_$!<Manager>!$_", "X-ABRELATEDNAMES", managerValue, parameters={"TYPE": ["Manager", ]})
-
-            """
-            # UNIMPLEMENTED: X- attributes
-
-            X-MAIDENNAME
-            X-PHONETIC-FIRST-NAME
-            X-PHONETIC-MIDDLE-NAME
-            X-PHONETIC-LAST-NAME
-
-            sattributes.kDS1AttrPicture,                # Represents the path of the picture for each user displayed in the login window.
-                                                        #      Found in user records (kDSStdRecordTypeUsers).
-
-            dsattributes.kDS1AttrMapGUID,                # Represents the GUID for a record's map.
-            dsattributes.kDSNAttrMapURI,                # attribute that defines the URI of a user's location.
-
-            dsattributes.kDSNAttrOrganizationInfo,        # Usually the organization info of a user.
-            dsattributes.kDSNAttrAreaCode,                # Area code of a user's phone number.
-
-            dsattributes.kDSNAttrMIME,                    # Data contained in this attribute type is a fully qualified MIME Type.
-
-            """
-
-            # debug, create x attributes for all ds attributes
-            if self.service.addDSAttrXProperties:
-                for attribute in self.originalAttributes:
-                    for value in self.valuesForAttribute(attribute):
-                        vcard.addProperty(Property("X-" + "-".join(attribute.split(":")), removeControlChars(value)))
-
-            return vcard
-
-        if not self._vCard:
-            self._vCard = generateVCard()
-
-        return self._vCard
-
-
-    def vCardText(self):
-        if not self._vCardText:
-            self._vCardText = str(self.vCard())
-
-        return self._vCardText
-
-
-    def uriName(self):
-        if not self._uriName:
-            self._uriName = self.vCard().getProperty("UID").value() + ".vcf"
-        #print("uriName():self._uriName=%s" % self._uriName)
-        return self._uriName
-
-
-    def hRef(self, parentURI="/directory/"):
-        if not self._hRef:
-            self._hRef = davxml.HRef.fromString(joinURL(parentURI, self.uriName()))
-
-        return self._hRef
-
-
-    def readProperty(self, property, request):
-
-        if type(property) is tuple:
-            qname = property
-        else:
-            qname = property.qname()
-
-        namespace, name = qname
-
-        #print("VCardResource.readProperty: qname = %s" % (qname, ))
-
-        if namespace == dav_namespace:
-            if name == "resourcetype":
-                result = davxml.ResourceType.empty #@UndefinedVariable
-                #print("VCardResource.readProperty: qname = %s, result = %s" % (qname, result))
-                return result
-            elif name == "getetag":
-                result = davxml.GETETag(ETag(hashlib.md5(self.vCardText()).hexdigest()).generate())
-                #print("VCardResource.readProperty: qname = %s, result = %s" % (qname, result))
-                return result
-            elif name == "getcontenttype":
-                mimeType = MimeType('text', 'vcard', {})
-                result = davxml.GETContentType(generateContentType(mimeType))
-                #print("VCardResource.readProperty: qname = %s, result = %s" % (qname, result))
-                return result
-            elif name == "getcontentlength":
-                result = davxml.GETContentLength.fromString(str(len(self.vCardText())))
-                #print("VCardResource.readProperty: qname = %s, result = %s" % (qname, result))
-                return result
-            elif name == "getlastmodified":
-                if self.vCard().hasProperty("REV"):
-                    modDatetime = parse_date(self.vCard().propertyValue("REV"))
-                else:
-                    # use creation date attribute if it exists
-                    creationDateString = self.isoDateStringForDateAttribute(dsattributes.kDS1AttrCreationTimestamp)
-                    if creationDateString:
-                        modDatetime = parse_date(creationDateString)
-                    else:
-                        modDatetime = datetime.datetime.utcnow()
-
-                #strip time zone because time zones are unimplemented in davxml.GETLastModified.fromDate
-                d = modDatetime.date()
-                t = modDatetime.time()
-                modDatetimeNoTZ = datetime.datetime(d.year, d.month, d.day, t.hour, t.minute, t.second, t.microsecond, None)
-                result = davxml.GETLastModified.fromDate(modDatetimeNoTZ)
-                #print("VCardResource.readProperty: qname = %s, result = %s" % (qname, result))
-                return result
-            elif name == "creationdate":
-                creationDateString = self.isoDateStringForDateAttribute(dsattributes.kDS1AttrCreationTimestamp)
-                if creationDateString:
-                    creationDatetime = parse_date(creationDateString)
-                elif self.vCard().hasProperty("REV"):    # use modification date property if it exists
-                    creationDatetime = parse_date(self.vCard().propertyValue("REV"))
-                else:
-                    creationDatetime = datetime.datetime.utcnow()
-                result = davxml.CreationDate.fromDate(creationDatetime)
-                #print("VCardResource.readProperty: qname = %s, result = %s" % (qname, result))
-                return result
-            elif name == "displayname":
-                # AddressBook.app uses N. Use FN or UID instead?
-                result = davxml.DisplayName.fromString(self.vCard().propertyValue("N"))
-                #print("VCardResource.readProperty: qname = %s, result = %s" % (qname, result))
-                return result
-
-        elif namespace == twisted_dav_namespace:
-            return super(VCardRecord, self).readProperty(property, request)
-            #return DAVPropertyMixIn.readProperty(self, property, request)
-
-        return self.directoryBackedAddressBook.readProperty(property, request)
-
-
-    def listProperties(self, request):
-        #print("VCardResource.listProperties()")
-        qnames = set(self.liveProperties())
-
-        # Add dynamic live properties that exist
-        dynamicLiveProperties = (
-            (dav_namespace, "quota-available-bytes"),
-            (dav_namespace, "quota-used-bytes"),
-        )
-        for dqname in dynamicLiveProperties:
-            #print("VCardResource.listProperties: removing dqname=%s" % (dqname,))
-            qnames.remove(dqname)
-
-        for qname in self.deadProperties().list():
-            if (qname not in qnames) and (qname[0] != twisted_private_namespace):
-                #print("listProperties: adding qname=%s" % (qname,))
-                qnames.add(qname)
-
-        #for qn in qnames: print("VCardResource.listProperties: qn=%s" % (qn,))
-
-        yield qnames
-
-    listProperties = deferredGenerator(listProperties)
-
-
-
-# utility
-#remove control characters because vCard does not support them
-def removeControlChars(utf8String):
-    result = utf8String
-    for a in utf8String:
-        if '\x00' <= a <= '\x1F':
-            result = ""
-            for c in utf8String:
-                if '\x00' <= c <= '\x1F':
-                    pass
-                else:
-                    result += c
-    #if utf8String != result: print ("changed %r to %r" % (utf8String, result))
-    return result

Modified: CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/principal.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/principal.py	2014-04-04 17:05:48 UTC (rev 13157)
+++ CalendarServer/branches/users/sagen/move2who-5/twistedcaldav/directory/principal.py	2014-04-04 17:20:27 UTC (rev 13158)
@@ -28,55 +28,55 @@
     "DirectoryCalendarPrincipalResource",
 ]
 
-from urllib import unquote
+from urllib import quote, unquote
 from urlparse import urlparse
+import uuid
 
+from twext.python.log import Logger
 from twisted.cred.credentials import UsernamePassword
-from twisted.python.failure import Failure
 from twisted.internet.defer import inlineCallbacks, returnValue
 from twisted.internet.defer import succeed
-from twisted.web.template import XMLFile, Element, renderer, tags
-from twistedcaldav.directory.util import NotFoundResource
-
-from txweb2.auth.digest import DigestedCredentials
-from txweb2 import responsecode
-from txweb2.http import HTTPError
-from txdav.xml import element as davxml
-from txweb2.dav.util import joinURL
-from txweb2.dav.noneprops import NonePropertyStore
-
-from twext.python.log import Logger
-
-
-try:
-    from twistedcaldav.authkerb import NegotiateCredentials
-    NegotiateCredentials # sigh, pyflakes
-except ImportError:
-    NegotiateCredentials = None
 from twisted.python.modules import getModule
-
+from twisted.web.template import XMLFile, Element, renderer
 from twistedcaldav import caldavxml, customxml
 from twistedcaldav.cache import DisabledCacheNotifier, PropfindCacheMixin
 from twistedcaldav.config import config
 from twistedcaldav.customxml import calendarserver_namespace
 from twistedcaldav.directory.augment import allowedAutoScheduleModes
 from twistedcaldav.directory.common import uidsResourceName
-from twistedcaldav.directory.directory import DirectoryService, DirectoryRecord
-from twistedcaldav.directory.idirectory import IDirectoryService
-from twistedcaldav.directory.wiki import getWikiACL
+from twistedcaldav.directory.util import NotFoundResource
+from twistedcaldav.directory.util import (
+    formatLink, formatLinks, formatPrincipals, formatList
+)
+from txdav.who.wiki import getWikiACL
+from twistedcaldav.extensions import (
+    ReadOnlyResourceMixIn, DAVPrincipalResource, DAVResourceWithChildrenMixin
+)
 from twistedcaldav.extensions import DirectoryElement
-from twistedcaldav.extensions import ReadOnlyResourceMixIn, DAVPrincipalResource, \
-    DAVResourceWithChildrenMixin
 from twistedcaldav.resource import CalendarPrincipalCollectionResource, CalendarPrincipalResource
 from txdav.caldav.datastore.scheduling.cuaddress import normalizeCUAddr
+from txdav.who.delegates import RecordType as DelegateRecordType
+from txdav.who.directory import CalendarDirectoryRecordMixin
+from txdav.xml import element as davxml
+from txweb2 import responsecode
+from txweb2.auth.digest import DigestedCredentials
+from txweb2.dav.noneprops import NonePropertyStore
+from txweb2.dav.util import joinURL
+from txweb2.http import HTTPError
 
+try:
+    from twistedcaldav.authkerb import NegotiateCredentials
+    NegotiateCredentials  # sigh, pyflakes
+except ImportError:
+    NegotiateCredentials = None
+
 thisModule = getModule(__name__)
 log = Logger()
 
 
 class PermissionsMixIn (ReadOnlyResourceMixIn):
     def defaultAccessControlList(self):
-        return authReadACL
+        return succeed(authReadACL)
 
 
     @inlineCallbacks
@@ -94,7 +94,7 @@
         else:
             # ...otherwise permissions are fixed, and are not subject to
             # inheritance rules, etc.
-            returnValue(self.defaultAccessControlList())
+            returnValue((yield self.defaultAccessControlList()))
 
 
 
@@ -108,7 +108,7 @@
 def cuTypeConverter(cuType):
     """ Converts calendar user types to OD type names """
 
-    return "recordType", DirectoryRecord.fromCUType(cuType)
+    return "recordType", CalendarDirectoryRecordMixin.fromCUType(cuType)
 
 
 
@@ -118,7 +118,7 @@
     cua = normalizeCUAddr(origCUAddr)
 
     if cua.startswith("urn:uuid:"):
-        return "guid", cua[9:]
+        return "guid", uuid.UUID(cua[9:])
 
     elif cua.startswith("mailto:"):
         return "emailAddresses", cua[7:]
@@ -126,7 +126,7 @@
     elif cua.startswith("/") or cua.startswith("http"):
         ignored, collection, id = cua.rsplit("/", 2)
         if collection == "__uids__":
-            return "guid", id
+            return "uid", id
         else:
             return "recordName", id
 
@@ -150,18 +150,21 @@
         CalendarPrincipalCollectionResource.__init__(self, url)
         DAVResourceWithChildrenMixin.__init__(self)
 
-        self.directory = IDirectoryService(directory)
+        # MOVE2WHO
+        # self.directory = IDirectoryService(directory)
+        self.directory = directory
 
 
     def __repr__(self):
         return "<%s: %s %s>" % (self.__class__.__name__, self.directory, self._url)
 
 
+    @inlineCallbacks
     def locateChild(self, req, segments):
-        child = self.getChild(segments[0])
+        child = (yield self.getChild(segments[0]))
         if child is not None:
-            return (child, segments[1:])
-        return (NotFoundResource(principalCollections=self.principalCollections()), ())
+            returnValue((child, segments[1:]))
+        returnValue((NotFoundResource(principalCollections=self.principalCollections()), ()))
 
 
     def deadProperties(self):
@@ -174,27 +177,30 @@
         return succeed(None)
 
 
+    @inlineCallbacks
     def principalForShortName(self, recordType, name):
-        return self.principalForRecord(self.directory.recordWithShortName(recordType, name))
+        record = (yield self.directory.recordWithShortName(recordType, name))
+        returnValue((yield self.principalForRecord(record)))
 
 
     def principalForUser(self, user):
-        return self.principalForShortName(DirectoryService.recordType_users, user)
+        return self.principalForShortName(self.directory.recordType.lookupByName("user"), user)
 
 
+    @inlineCallbacks
     def principalForAuthID(self, user):
         # Basic/Digest creds -> just lookup user name
         if isinstance(user, UsernamePassword) or isinstance(user, DigestedCredentials):
-            return self.principalForUser(user.username)
+            returnValue((yield self.principalForUser(user.username)))
         elif NegotiateCredentials is not None and isinstance(user, NegotiateCredentials):
             authID = "Kerberos:%s" % (user.principal,)
-            principal = self.principalForRecord(self.directory.recordWithAuthID(authID))
+            principal = yield self.principalForRecord((yield self.directory.recordWithAuthID(authID)))
             if principal:
-                return principal
+                returnValue(principal)
             elif user.username:
-                return self.principalForUser(user.username)
+                returnValue((yield self.principalForUser(user.username)))
 
-        return None
+        returnValue(None)
 
 
     def principalForUID(self, uid):
@@ -207,7 +213,7 @@
 
     def principalForRecord(self, record):
         if record is None or not record.enabled:
-            return None
+            return succeed(None)
         return self.principalForUID(record.uid)
 
     ##
@@ -217,7 +223,7 @@
     _cs_ns = "http://calendarserver.org/ns/"
     _fieldMap = {
         ("DAV:" , "displayname") :
-            ("fullName", None, "Display Name", davxml.DisplayName),
+            ("fullNames", None, "Display Name", davxml.DisplayName),
         ("urn:ietf:params:xml:ns:caldav" , "calendar-user-type") :
             ("", cuTypeConverter, "Calendar User Type",
             caldavxml.CalendarUserType),
@@ -281,16 +287,36 @@
         #
         # Create children
         #
-        for recordType in self.directory.recordTypes():
-            self.putChild(recordType, DirectoryPrincipalTypeProvisioningResource(self, recordType))
 
+        self.supportedChildTypes = (
+            self.directory.recordType.user,
+            self.directory.recordType.group,
+            self.directory.recordType.location,
+            self.directory.recordType.resource,
+            self.directory.recordType.address,
+        )
+
+        for name, recordType in [
+            (self.directory.recordTypeToOldName(r), r)
+            for r in self.supportedChildTypes
+        ]:
+            self.putChild(
+                name,
+                DirectoryPrincipalTypeProvisioningResource(
+                    self, name, recordType
+                )
+            )
+
         self.putChild(uidsResourceName, DirectoryPrincipalUIDProvisioningResource(self))
 
 
+    @inlineCallbacks
     def principalForUID(self, uid):
-        return self.getChild(uidsResourceName).getChild(uid)
+        child = (yield self.getChild(uidsResourceName))
+        returnValue((yield child.getChild(uid)))
 
 
+    @inlineCallbacks
     def _principalForURI(self, uri):
         scheme, netloc, path, _ignore_params, _ignore_query, _ignore_fragment = urlparse(uri)
 
@@ -312,56 +338,62 @@
 
             if (host != config.ServerHostName and
                 host not in config.Scheduling.Options.PrincipalHostAliases):
-                return None
+                returnValue(None)
 
             if port != {
                 "http" : config.HTTPPort,
                 "https": config.SSLPort,
             }[scheme]:
-                return None
+                returnValue(None)
 
         elif scheme == "urn":
             if path.startswith("uuid:"):
-                return self.principalForUID(path[5:])
+                returnValue((yield self.principalForUID(path[5:])))
             else:
-                return None
+                returnValue(None)
         else:
-            return None
+            returnValue(None)
 
         if not path.startswith(self._url):
-            return None
+            returnValue(None)
 
         path = path[len(self._url) - 1:]
 
         segments = [unquote(s) for s in path.rstrip("/").split("/")]
         if segments[0] == "" and len(segments) == 3:
-            typeResource = self.getChild(segments[1])
+            typeResource = yield self.getChild(segments[1])
             if typeResource is not None:
-                principalResource = typeResource.getChild(segments[2])
+                principalResource = yield typeResource.getChild(segments[2])
                 if principalResource:
-                    return principalResource
+                    returnValue(principalResource)
 
-        return None
+        returnValue(None)
 
 
+    @inlineCallbacks
     def principalForCalendarUserAddress(self, address):
         # First see if the address is a principal URI
-        principal = self._principalForURI(address)
+        principal = yield self._principalForURI(address)
         if principal:
-            if isinstance(principal, DirectoryCalendarPrincipalResource) and principal.record.enabledForCalendaring:
-                return principal
+            if (
+                isinstance(principal, DirectoryCalendarPrincipalResource) and
+                principal.record.hasCalendars
+            ):
+                returnValue(principal)
         else:
             # Next try looking it up in the directory
-            record = self.directory.recordWithCalendarUserAddress(address)
-            if record is not None and record.enabled and record.enabledForCalendaring:
-                return self.principalForRecord(record)
+            record = yield self.directory.recordWithCalendarUserAddress(address)
+            if record is not None and record.hasCalendars:
+                returnValue((yield self.principalForRecord(record)))
 
         log.debug("No principal for calendar user address: %r" % (address,))
-        return None
+        returnValue(None)
 
 
+    @inlineCallbacks
     def principalForRecord(self, record):
-        return self.getChild(uidsResourceName).principalForRecord(record)
+        child = (yield self.getChild(uidsResourceName))
+        returnValue((yield child.principalForRecord(record)))
 
 
     ##
@@ -375,13 +407,16 @@
 
     def getChild(self, name):
         if name == "":
-            return self
+            return succeed(self)
         else:
-            return self.putChildren.get(name, None)
+            return succeed(self.putChildren.get(name, None))
 
 
     def listChildren(self):
-        return self.directory.recordTypes()
+        return [
+            self.directory.recordTypeToOldName(r) for r in
+            self.supportedChildTypes
+        ]
 
 
     ##
@@ -392,43 +427,20 @@
         return (self,)
 
 
-    ##
-    # Proxy callback from directory service
-    ##
 
-    def isProxyFor(self, record1, record2):
-        """
-        Test whether the principal identified by directory record1 is a proxy for the principal identified by
-        record2.
-
-        @param record1: directory record for a user
-        @type record1: L{DirectoryRecord}
-        @param record2: directory record to test with
-        @type record2: L{DirectoryRercord}
-
-        @return: C{True} if record1 is a proxy for record2, otherwise C{False}
-        @rtype: C{bool}
-        """
-
-        principal1 = self.principalForUID(record1.uid)
-        principal2 = self.principalForUID(record2.uid)
-        return principal1.isProxyFor(principal2)
-
-
-
 class DirectoryPrincipalTypeProvisioningResource (DirectoryProv