[CalendarServer-changes] [12899] CalendarServer/branches/users/sagen/move2who-3
source_changes at macosforge.org
source_changes at macosforge.org
Thu Mar 13 13:40:10 PDT 2014
Revision: 12899
http://trac.calendarserver.org//changeset/12899
Author: sagen at apple.com
Date: 2014-03-13 13:40:10 -0700 (Thu, 13 Mar 2014)
Log Message:
-----------
Merge in trunk to move2who-3
Modified Paths:
--------------
CalendarServer/branches/users/sagen/move2who-3/calendarserver/accesslog.py
CalendarServer/branches/users/sagen/move2who-3/calendarserver/provision/root.py
CalendarServer/branches/users/sagen/move2who-3/calendarserver/push/applepush.py
CalendarServer/branches/users/sagen/move2who-3/calendarserver/tap/caldav.py
CalendarServer/branches/users/sagen/move2who-3/calendarserver/tap/test/test_util.py
CalendarServer/branches/users/sagen/move2who-3/calendarserver/tap/util.py
CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/calverify.py
CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/principals.py
CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/shell/directory.py
CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/test/test_principals.py
CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/util.py
CalendarServer/branches/users/sagen/move2who-3/calendarserver/webcal/resource.py
CalendarServer/branches/users/sagen/move2who-3/conf/auth/accounts-test.xml
CalendarServer/branches/users/sagen/move2who-3/conf/auth/resources-test.xml
CalendarServer/branches/users/sagen/move2who-3/contrib/performance/loadtest/test_sim.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/customxml.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/addressbook.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/augment.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/calendar.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/calendaruserproxy.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/common.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/directory-principal-resource.html
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/principal.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/accounts.xml
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_augment.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_principal.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/util.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/wiki.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directorybackedaddressbook.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/extensions.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/freebusyurl.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/ical.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/resource.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/scheduling_store/caldav/resource.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/sharing.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/stdconfig.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/storebridge.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_addressbookmultiget.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_addressbookquery.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_calendarquery.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_collectioncontents.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_icalendar.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_mkcalendar.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_multiget.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_props.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_resource.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_sharing.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_upgrade.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_wrapping.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/util.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/timezoneservice.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/timezonestdservice.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/upgrade.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/util.py
CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/caldav/scheduler.py
CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/freebusy.py
CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/imip/inbound.py
CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/implicit.py
CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/ischedule/delivery.py
CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/ischedule/resource.py
CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/ischedule/scheduler.py
CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/processing.py
CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/scheduler.py
CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/work.py
CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/sql.py
CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/test/attachments/accounts.xml
CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/test/test_attachments.py
CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/test/util.py
CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/util.py
CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/icalendardirectoryservice.py
CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/file.py
CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/podding/conduit.py
CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/podding/resource.py
CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/podding/test/test_conduit.py
CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/sql.py
CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/test/util.py
CalendarServer/branches/users/sagen/move2who-3/txdav/dps/client.py
CalendarServer/branches/users/sagen/move2who-3/txdav/dps/commands.py
CalendarServer/branches/users/sagen/move2who-3/txdav/dps/server.py
CalendarServer/branches/users/sagen/move2who-3/txdav/dps/test/test.xml
CalendarServer/branches/users/sagen/move2who-3/txdav/dps/test/test_client.py
CalendarServer/branches/users/sagen/move2who-3/txdav/who/delegates.py
CalendarServer/branches/users/sagen/move2who-3/txdav/who/groups.py
CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/accounts/accounts.xml
CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/accounts/resources.xml
CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/test_delegates.py
CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/test_groups.py
CalendarServer/branches/users/sagen/move2who-3/txdav/who/xml.py
CalendarServer/branches/users/sagen/move2who-3/txweb2/channel/http.py
CalendarServer/branches/users/sagen/move2who-3/txweb2/dav/resource.py
CalendarServer/branches/users/sagen/move2who-3/txweb2/server.py
Added Paths:
-----------
CalendarServer/branches/users/sagen/move2who-3/txdav/who/augment.py
CalendarServer/branches/users/sagen/move2who-3/txdav/who/directory.py
CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/accounts/augments.xml
CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/test_util.py
CalendarServer/branches/users/sagen/move2who-3/txdav/who/util.py
Removed Paths:
-------------
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/aggregate.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/appleopendirectory.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/cachingdirectory.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/directory.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/ldapdirectory.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_aggregate.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_buildquery.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_cachedirectory.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_directory.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_modify.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_proxyprincipalmembers.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_resources.py
CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_xmlfile.py
Property Changed:
----------------
CalendarServer/branches/users/sagen/move2who-3/
Property changes on: CalendarServer/branches/users/sagen/move2who-3
___________________________________________________________________
Modified: svn:mergeinfo
- /CalDAVTester/trunk:11193-11198
/CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/release/CalendarServer-4.3-dev:10180-10190,10192
/CalendarServer/branches/release/CalendarServer-5.1-dev:11846
/CalendarServer/branches/release/CalendarServer-5.2-dev:11972,12357-12358,12794,12814
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/cross-pod-sharing:12038-12191
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/fix-no-ischedule:11607-11871
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
/CalendarServer/branches/users/cdaboo/json:11622-11912
/CalendarServer/branches/users/cdaboo/managed-attachments:9985-10145
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/performance-tweaks:11824-11836
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/reverse-proxy-pods:11875-11900
/CalendarServer/branches/users/cdaboo/scheduling-queue-refresh:11783-12557
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/sharing-in-the-store:11935-12016
/CalendarServer/branches/users/cdaboo/store-scheduling:10876-11129
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/gaya/cleanrevisions:12152-12334
/CalendarServer/branches/users/gaya/sharedgroupfixes:12120-12142
/CalendarServer/branches/users/gaya/sharedgroups-3:11088-11204
/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/digest-auth-redux:10624-10635
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/enforce-max-requests:11640-11643
/CalendarServer/branches/users/glyph/hang-fix:11465-11491
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/launchd-wrapper-bis:11413-11436
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/log-cleanups:11691-11731
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/one-home-list-api:10048-10073
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/q:9560-9688
/CalendarServer/branches/users/glyph/queue-locking-and-timing:10204-10289
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/start-service-start-loop:11060-11065
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/unshare-when-access-revoked:10562-10595
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/warning-cleanups:11347-11357
/CalendarServer/branches/users/glyph/whenNotProposed:11881-11897
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/sagen/testing:10827-10851,10853-10855
/CalendarServer/branches/users/wsanchez/transations:5515-5593
+ /CalDAVTester/trunk:11193-11198
/CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/release/CalendarServer-4.3-dev:10180-10190,10192
/CalendarServer/branches/release/CalendarServer-5.1-dev:11846
/CalendarServer/branches/release/CalendarServer-5.2-dev:11972,12357-12358,12794,12814
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/cross-pod-sharing:12038-12191
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/fix-no-ischedule:11607-11871
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
/CalendarServer/branches/users/cdaboo/json:11622-11912
/CalendarServer/branches/users/cdaboo/managed-attachments:9985-10145
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/performance-tweaks:11824-11836
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/reverse-proxy-pods:11875-11900
/CalendarServer/branches/users/cdaboo/scheduling-queue-refresh:11783-12557
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/sharing-in-the-store:11935-12016
/CalendarServer/branches/users/cdaboo/store-scheduling:10876-11129
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/gaya/cleanrevisions:12152-12334
/CalendarServer/branches/users/gaya/sharedgroupfixes:12120-12142
/CalendarServer/branches/users/gaya/sharedgroups-3:11088-11204
/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/digest-auth-redux:10624-10635
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/enforce-max-requests:11640-11643
/CalendarServer/branches/users/glyph/hang-fix:11465-11491
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/launchd-wrapper-bis:11413-11436
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/log-cleanups:11691-11731
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/one-home-list-api:10048-10073
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/q:9560-9688
/CalendarServer/branches/users/glyph/queue-locking-and-timing:10204-10289
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/start-service-start-loop:11060-11065
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/unshare-when-access-revoked:10562-10595
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/warning-cleanups:11347-11357
/CalendarServer/branches/users/glyph/whenNotProposed:11881-11897
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/move2who:12819-12860
/CalendarServer/branches/users/sagen/move2who-2:12861-12898
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/sagen/testing:10827-10851,10853-10855
/CalendarServer/branches/users/wsanchez/transations:5515-5593
Modified: CalendarServer/branches/users/sagen/move2who-3/calendarserver/accesslog.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/calendarserver/accesslog.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/calendarserver/accesslog.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -48,7 +48,6 @@
from twisted.protocols import amp
from twistedcaldav.config import config
-from twistedcaldav.directory.directory import DirectoryService
from txdav.xml import element as davxml
@@ -91,22 +90,27 @@
if hasattr(request, "authzUser") and str(request.authzUser.children[0]) != uidn:
uidz = str(request.authzUser.children[0])
- def convertUIDtoShortName(uid):
- uid = uid.rstrip("/")
- uid = uid[uid.rfind("/") + 1:]
- record = request.site.resource.getDirectory().recordWithUID(uid)
- if record:
- if record.recordType == DirectoryService.recordType_users:
- return record.shortNames[0]
- else:
- return "(%s)%s" % (record.recordType, record.shortNames[0],)
- else:
- return uid
+ # def convertUIDtoShortName(uid):
+ # uid = uid.rstrip("/")
+ # uid = uid[uid.rfind("/") + 1:]
+ # record = request.site.resource.getDirectory().recordWithUID(uid)
+ # if record:
+ # if record.recordType == DirectoryService.recordType_users:
+ # return record.shortNames[0]
+ # else:
+ # return "(%s)%s" % (record.recordType, record.shortNames[0],)
+ # else:
+ # return uid
- uidn = convertUIDtoShortName(uidn)
- if uidz:
- uidz = convertUIDtoShortName(uidz)
+ # MOVE2WHO
+ # Better to stick the records directly on the request at
+ # an earlier point, since we can't do anything deferred
+ # in here.
+ # uidn = convertUIDtoShortName(uidn)
+ # if uidz:
+ # uidz = convertUIDtoShortName(uidz)
+
if uidn and uidz:
uid = '"%s as %s"' % (uidn, uidz,)
else:
Modified: CalendarServer/branches/users/sagen/move2who-3/calendarserver/provision/root.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/calendarserver/provision/root.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/calendarserver/provision/root.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -27,7 +27,7 @@
from txweb2.http import HTTPError, StatusResponse, RedirectResponse
from twisted.cred.error import LoginFailed, UnauthorizedLogin
-from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twisted.python.reflect import namedClass
from twisted.web.xmlrpc import Proxy
from twisted.web.error import Error as WebError
@@ -110,7 +110,7 @@
def defaultAccessControlList(self):
- return config.RootResourceACL
+ return succeed(config.RootResourceACL)
@inlineCallbacks
Modified: CalendarServer/branches/users/sagen/move2who-3/calendarserver/push/applepush.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/calendarserver/push/applepush.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/calendarserver/push/applepush.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -820,23 +820,25 @@
def defaultAccessControlList(self):
- return davxml.ACL(
- # DAV:Read for authenticated principals
- davxml.ACE(
- davxml.Principal(davxml.Authenticated()),
- davxml.Grant(
- davxml.Privilege(davxml.Read()),
+ return succeed(
+ davxml.ACL(
+ # DAV:Read for authenticated principals
+ davxml.ACE(
+ davxml.Principal(davxml.Authenticated()),
+ davxml.Grant(
+ davxml.Privilege(davxml.Read()),
+ ),
+ davxml.Protected(),
),
- davxml.Protected(),
- ),
- # DAV:Write for authenticated principals
- davxml.ACE(
- davxml.Principal(davxml.Authenticated()),
- davxml.Grant(
- davxml.Privilege(davxml.Write()),
+ # DAV:Write for authenticated principals
+ davxml.ACE(
+ davxml.Principal(davxml.Authenticated()),
+ davxml.Grant(
+ davxml.Privilege(davxml.Write()),
+ ),
+ davxml.Protected(),
),
- davxml.Protected(),
- ),
+ )
)
Modified: CalendarServer/branches/users/sagen/move2who-3/calendarserver/tap/caldav.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/calendarserver/tap/caldav.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/calendarserver/tap/caldav.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -86,14 +86,12 @@
from txdav.common.datastore.work.revision_cleanup import (
scheduleFirstFindMinRevision
)
-from txdav.dps.server import DirectoryProxyServiceMaker
+from txdav.who.util import directoryFromConfig
from txdav.dps.client import DirectoryService as DirectoryProxyClientService
-from txdav.who.groups import GroupCacher as NewGroupCacher
+from txdav.who.groups import GroupCacher
from twistedcaldav import memcachepool
from twistedcaldav.config import config, ConfigurationError
-from twistedcaldav.directory import calendaruserproxy
-from twistedcaldav.directory.directory import GroupMembershipCacheUpdater
from txdav.who.groups import scheduleNextGroupCachingUpdate
from twistedcaldav.localization import processLocalizationFiles
from twistedcaldav.stdconfig import DEFAULT_CONFIG, DEFAULT_CONFIG_FILE
@@ -533,10 +531,7 @@
)
self.monitor.addProcessObject(process, PARENT_ENVIRONMENT)
- if (
- config.DirectoryProxy.Enabled and
- config.DirectoryProxy.SocketPath != ""
- ):
+ if (config.DirectoryProxy.SocketPath != ""):
log.info("Adding directory proxy service")
dpsArgv = [
@@ -912,10 +907,10 @@
CalDAV and CardDAV requests.
"""
pool, txnFactory = getDBPool(config)
- store = storeFromConfig(config, txnFactory)
+ directory = DirectoryProxyClientService("FIXME")
+ store = storeFromConfig(config, txnFactory, directory)
logObserver = AMPCommonAccessLoggingObserver()
result = self.requestProcessingService(options, store, logObserver)
- directory = store.directoryService()
if pool is not None:
pool.setServiceParent(result)
@@ -987,14 +982,9 @@
# Optionally set up group cacher
if config.GroupCaching.Enabled:
- groupCacher = GroupMembershipCacheUpdater(
- calendaruserproxy.ProxyDBService,
+ groupCacher = GroupCacher(
directory,
- config.GroupCaching.UpdateSeconds,
- config.GroupCaching.ExpireSeconds,
- config.GroupCaching.LockSeconds,
- namespace=config.GroupCaching.MemcachedPool,
- useExternalProxies=config.GroupCaching.UseExternalProxies,
+ updateSeconds=config.GroupCaching.UpdateSeconds
)
else:
groupCacher = None
@@ -1294,6 +1284,13 @@
if store is None:
raise StoreNotAvailable()
+ # Create a Directory Proxy "Server" service and hand it to the
+ # store.
+ # FIXME: right now the store passed *to* the directory is the
+ # calendar/contacts data store, but for a multi-server deployment
+ # it will need its own separate store.
+ store.setDirectoryService(directoryFromConfig(config, store=store))
+
result = self.requestProcessingService(options, store, logObserver)
# Optionally set up push notifications
@@ -1330,21 +1327,12 @@
# Optionally set up group cacher
if config.GroupCaching.Enabled:
- groupCacher = GroupMembershipCacheUpdater(
- calendaruserproxy.ProxyDBService,
+ groupCacher = GroupCacher(
directory,
- config.GroupCaching.UpdateSeconds,
- config.GroupCaching.ExpireSeconds,
- config.GroupCaching.LockSeconds,
- namespace=config.GroupCaching.MemcachedPool,
- useExternalProxies=config.GroupCaching.UseExternalProxies
+ updateSeconds=config.GroupCaching.UpdateSeconds
)
- newGroupCacher = NewGroupCacher(
- DirectoryProxyClientService(None)
- )
else:
groupCacher = None
- newGroupCacher = None
# Optionally enable Manhole access
if config.Manhole.Enabled:
@@ -1375,17 +1363,11 @@
"manhole_tap could not be imported"
)
- # Optionally enable Directory Proxy
- if config.DirectoryProxy.Enabled:
- dps = DirectoryProxyServiceMaker().makeService(None)
- dps.setServiceParent(result)
-
def decorateTransaction(txn):
txn._pushDistributor = pushDistributor
txn._rootResource = result.rootResource
txn._mailRetriever = mailRetriever
txn._groupCacher = groupCacher
- txn._newGroupCacher = newGroupCacher
store.callWithNewTransactions(decorateTransaction)
@@ -1425,7 +1407,7 @@
Popen(memcachedArgv)
return self.storageService(
- slaveSvcCreator, logObserver, uid=uid, gid=gid
+ slaveSvcCreator, logObserver, uid=uid, gid=gid, directory=None
)
@@ -1438,10 +1420,17 @@
"""
def toolServiceCreator(pool, store, ignored, storageService):
+ # Create a Directory Proxy "Server" service and hand it to the
+ # store
+ # FIXME: right now the store passed *to* the directory is the
+ # calendar/contacts data store, but for a multi-server deployment
+ # it will need its own separate store.
+ store.setDirectoryService(directoryFromConfig(config, store=store))
return config.UtilityServiceClass(store)
uid, gid = getSystemIDs(config.UserName, config.GroupName)
- return self.storageService(toolServiceCreator, None, uid=uid, gid=gid)
+ return self.storageService(toolServiceCreator, None, uid=uid, gid=gid,
+ directory=None)
def makeService_Agent(self, options):
@@ -1489,7 +1478,7 @@
def storageService(
- self, createMainService, logObserver, uid=None, gid=None
+ self, createMainService, logObserver, uid=None, gid=None, directory=None
):
"""
If necessary, create a service to be started used for storage; for
@@ -1515,6 +1504,9 @@
running as root (also the gid to chown Attachments to).
@type gid: C{int}
+ @param directory: The directory service to use.
+ @type directory: L{IStoreDirectoryService} or None
+
@return: the appropriate a service to start.
@rtype: L{IService}
"""
@@ -1529,7 +1521,7 @@
maxConnections=config.MaxDBConnectionsPerPool
)
cp.setServiceParent(ms)
- store = storeFromConfig(config, cp.connection)
+ store = storeFromConfig(config, cp.connection, directory)
pps = PreProcessingService(
createMainService, cp, store, logObserver, storageService
@@ -1654,7 +1646,7 @@
"Unknown database type {}".format(config.DBType)
)
else:
- store = storeFromConfig(config, None)
+ store = storeFromConfig(config, None, directory)
return createMainService(None, store, logObserver, None)
@@ -1914,14 +1906,9 @@
# Optionally set up group cacher
if config.GroupCaching.Enabled:
- groupCacher = GroupMembershipCacheUpdater(
- calendaruserproxy.ProxyDBService,
+ groupCacher = GroupCacher(
directory,
- config.GroupCaching.UpdateSeconds,
- config.GroupCaching.ExpireSeconds,
- config.GroupCaching.LockSeconds,
- namespace=config.GroupCaching.MemcachedPool,
- useExternalProxies=config.GroupCaching.UseExternalProxies
+ updateSeconds=config.GroupCaching.UpdateSeconds
)
else:
groupCacher = None
@@ -1936,7 +1923,10 @@
return multi
- ssvc = self.storageService(spawnerSvcCreator, None, uid, gid)
+ ssvc = self.storageService(
+ spawnerSvcCreator, None, uid, gid,
+ directory=DirectoryProxyClientService("FIXME")
+ )
ssvc.setServiceParent(s)
return s
Modified: CalendarServer/branches/users/sagen/move2who-3/calendarserver/tap/test/test_util.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/calendarserver/tap/test/test_util.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/calendarserver/tap/test/test_util.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -14,13 +14,14 @@
# limitations under the License.
##
-from calendarserver.tap.util import directoryFromConfig, MemoryLimitService, Stepper
+from calendarserver.tap.util import MemoryLimitService, Stepper
from twistedcaldav.util import computeProcessCount
from twistedcaldav.test.util import TestCase
from twistedcaldav.config import config
from twistedcaldav.directory.augment import AugmentXMLDB
from twisted.internet.task import Clock
from twisted.internet.defer import succeed, inlineCallbacks
+from txdav.who.util import directoryFromConfig
class ProcessCountTestCase(TestCase):
Modified: CalendarServer/branches/users/sagen/move2who-3/calendarserver/tap/util.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/calendarserver/tap/util.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/calendarserver/tap/util.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -53,10 +53,8 @@
from twistedcaldav.cache import CacheStoreNotifierFactory
from twistedcaldav.directory import calendaruserproxy
from twistedcaldav.directory.addressbook import DirectoryAddressBookHomeProvisioningResource
-from twistedcaldav.directory.aggregate import AggregateDirectoryService
from twistedcaldav.directory.calendar import DirectoryCalendarHomeProvisioningResource
from twistedcaldav.directory.digest import QopDigestCredentialFactory
-from twistedcaldav.directory.directory import GroupMembershipCache
from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
from twistedcaldav.directory.wiki import WikiDirectoryService
from calendarserver.push.notifier import NotifierFactory
@@ -100,7 +98,12 @@
from urllib import quote
from twisted.python.usage import UsageError
+from txdav.dps.client import DirectoryService as DirectoryProxyClientService
+from twext.who.checker import UsernamePasswordCredentialChecker
+from twext.who.checker import HTTPDigestCredentialChecker
+from twisted.cred.error import UnauthorizedLogin
+from txweb2.dav.auth import IPrincipalCredentials
log = Logger()
@@ -218,7 +221,7 @@
-def storeFromConfig(config, txnFactory, directoryService=None):
+def storeFromConfig(config, txnFactory, directoryService):
"""
Produce an L{IDataStore} from the given configuration, transaction factory,
and notifier factory.
@@ -236,9 +239,6 @@
if config.EnableResponseCache and config.Memcached.Pools.Default.ClientEnabled:
notifierFactories["cache"] = CacheStoreNotifierFactory()
- if directoryService is None:
- directoryService = directoryFromConfig(config)
-
quota = config.UserQuota
if quota == 0:
quota = None
@@ -281,10 +281,11 @@
-def directoryFromConfig(config):
+def REMOVEMEdirectoryFromConfig(config):
"""
Create an L{AggregateDirectoryService} from the given configuration.
"""
+
#
# Setup the Augment Service
#
@@ -370,7 +371,56 @@
return directory
+# MOVE2WHO -- should we move this class somewhere else?
+class PrincipalCredentialChecker(object):
+ credentialInterfaces = (IPrincipalCredentials,)
+ @inlineCallbacks
+ def requestAvatarId(self, credentials):
+ credentials = IPrincipalCredentials(credentials)
+
+ if credentials.authnPrincipal is None:
+ raise UnauthorizedLogin("No such user: %s" % (credentials.credentials.username,))
+
+ # See if record is enabledForLogin
+ if not credentials.authnPrincipal.record.isLoginEnabled():
+ raise UnauthorizedLogin(
+ "User not allowed to log in: {user}".format(
+ user=credentials.credentials.username
+ )
+ )
+
+ # Handle Kerberos as a separate behavior
+ try:
+ from twistedcaldav.authkerb import NegotiateCredentials
+ except ImportError:
+ NegotiateCredentials = None
+
+ if NegotiateCredentials and isinstance(credentials.credentials,
+ NegotiateCredentials):
+ # If we get here with Kerberos, then authentication has already succeeded
+ returnValue(
+ (
+ credentials.authnPrincipal.principalURL(),
+ credentials.authzPrincipal.principalURL(),
+ credentials.authnPrincipal,
+ credentials.authzPrincipal,
+ )
+ )
+ else:
+ if (yield credentials.authnPrincipal.record.verifyCredentials(credentials.credentials)):
+ returnValue(
+ (
+ credentials.authnPrincipal.principalURL(),
+ credentials.authzPrincipal.principalURL(),
+ credentials.authnPrincipal,
+ credentials.authzPrincipal,
+ )
+ )
+ else:
+ raise UnauthorizedLogin("Incorrect credentials for %s" % (credentials.credentials.username,))
+
+
def getRootResource(config, newStore, resources=None):
"""
Set up directory service and resource hierarchy based on config.
@@ -407,22 +457,26 @@
addressBookResourceClass = DirectoryAddressBookHomeProvisioningResource
directoryBackedAddressBookResourceClass = DirectoryBackedAddressBookResource
apnSubscriptionResourceClass = APNSubscriptionResource
+ principalResourceClass = DirectoryPrincipalProvisioningResource
directory = newStore.directoryService()
+ principalCollection = principalResourceClass("/principals/", directory)
#
# Setup the ProxyDB Service
#
- proxydbClass = namedClass(config.ProxyDBService.type)
- log.info("Configuring proxydb service of type: {cls}", cls=proxydbClass)
+ # MOVE2WHO
+ # proxydbClass = namedClass(config.ProxyDBService.type)
- try:
- calendaruserproxy.ProxyDBService = proxydbClass(**config.ProxyDBService.params)
- except IOError:
- log.error("Could not start proxydb service")
- raise
+ # log.info("Configuring proxydb service of type: {cls}", cls=proxydbClass)
+ # try:
+ # calendaruserproxy.ProxyDBService = proxydbClass(**config.ProxyDBService.params)
+ # except IOError:
+ # log.error("Could not start proxydb service")
+ # raise
+
#
# Configure the Site and Wrappers
#
@@ -431,9 +485,11 @@
portal = Portal(auth.DavRealm())
- portal.registerChecker(directory)
+ portal.registerChecker(UsernamePasswordCredentialChecker(directory))
+ portal.registerChecker(HTTPDigestCredentialChecker(directory))
+ portal.registerChecker(PrincipalCredentialChecker())
- realm = directory.realmName or ""
+ realm = directory.realmName.encode("utf-8") or ""
log.info("Configuring authentication for realm: {realm}", realm=realm)
@@ -491,7 +547,7 @@
#
log.info("Setting up document root at: {root}", root=config.DocumentRoot)
- principalCollection = directory.principalCollection
+ # principalCollection = directory.principalCollection
if config.EnableCalDAV:
log.info("Setting up calendar collection: {cls}", cls=calendarResourceClass)
@@ -712,6 +768,7 @@
#
# Configure ancillary data
#
+ # MOVE2WHO
log.info("Configuring authentication wrapper")
overrides = {}
Modified: CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/calverify.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/calverify.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/calverify.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -431,7 +431,7 @@
configuration, creating one first if necessary.
"""
if self._directory is None:
- self._directory = getDirectory(self.config) #directoryFromConfig(self.config)
+ self._directory = getDirectory(self.config)
return self._directory
Modified: CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/principals.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/principals.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/principals.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -26,17 +26,15 @@
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from txdav.xml import element as davxml
+from txdav.who.delegates import addDelegate, removeDelegate
-from txdav.xml.base import decodeXMLName, encodeXMLName
from twistedcaldav.config import config
-from twistedcaldav.directory.directory import UnknownRecordTypeError, DirectoryError
from txdav.who.groups import schedulePolledGroupCachingUpdate
from calendarserver.tools.util import (
- booleanArgument, proxySubprincipal, action_addProxyPrincipal,
- principalForPrincipalID, prettyPrincipal, ProxyError,
- action_removeProxyPrincipal
+ booleanArgument, proxySubprincipal,
+ recordForPrincipalID, prettyPrincipal, prettyRecord, ProxyError
)
from twistedcaldav.directory.augment import allowedAutoScheduleModes
@@ -45,11 +43,6 @@
def usage(e=None):
if e:
- if isinstance(e, UnknownRecordTypeError):
- print("Valid record types:")
- for recordType in config.directory.recordTypes():
- print(" %s" % (recordType,))
-
print(e)
print("")
@@ -74,10 +67,10 @@
print(" --search <search-string>: search for matching principals")
print(" --list-principal-types: list all of the known principal types")
print(" --list-principals type: list all principals of the given type")
- print(" --read-property=property: read DAV property (eg.: {DAV:}group-member-set)")
print(" --list-read-proxies: list proxies with read-only access")
print(" --list-write-proxies: list proxies with read-write access")
print(" --list-proxies: list all proxies")
+ print(" --list-proxy-for: principals this principal is a proxy for")
print(" --add-read-proxy=principal: add a read-only proxy")
print(" --add-write-proxy=principal: add a read-write proxy")
print(" --remove-proxy=principal: remove a proxy")
@@ -118,30 +111,29 @@
resource, directory, store, and whatever has been assigned to "params".
"""
if self.function is not None:
- rootResource = self.rootResource()
- directory = rootResource.getDirectory()
- yield self.function(rootResource, directory, self.store, *self.params)
+ yield self.function(self.store, *self.params)
+
attrMap = {
- 'GeneratedUID' : { 'attr' : 'guid', },
- 'RealName' : { 'attr' : 'fullName', },
- 'RecordName' : { 'attr' : 'shortNames', },
- 'AutoSchedule' : { 'attr' : 'autoSchedule', },
- 'AutoAcceptGroup' : { 'attr' : 'autoAcceptGroup', },
+ 'GeneratedUID': {'attr': 'guid', },
+ 'RealName': {'attr': 'fullName', },
+ 'RecordName': {'attr': 'shortNames', },
+ 'AutoSchedule': {'attr': 'autoSchedule', },
+ 'AutoAcceptGroup': {'attr': 'autoAcceptGroup', },
- 'Comment' : { 'extras' : True, 'attr' : 'comment', },
- 'Description' : { 'extras' : True, 'attr' : 'description', },
- 'Type' : { 'extras' : True, 'attr' : 'type', },
+ 'Comment': {'extras': True, 'attr': 'comment', },
+ 'Description': {'extras': True, 'attr': 'description', },
+ 'Type': {'extras': True, 'attr': 'type', },
# For "Locations", i.e. scheduled spaces
- 'Capacity' : { 'extras' : True, 'attr' : 'capacity', },
- 'Floor' : { 'extras' : True, 'attr' : 'floor', },
- 'AssociatedAddress' : { 'extras' : True, 'attr' : 'associatedAddress', },
+ 'Capacity': {'extras': True, 'attr': 'capacity', },
+ 'Floor': {'extras': True, 'attr': 'floor', },
+ 'AssociatedAddress': {'extras': True, 'attr': 'associatedAddress', },
# For "Addresses", i.e. nonscheduled areas containing Locations
- 'AbbreviatedName' : { 'extras' : True, 'attr' : 'abbreviatedName', },
- 'StreetAddress' : { 'extras' : True, 'attr' : 'streetAddress', },
- 'Geo' : { 'extras' : True, 'attr' : 'geo', },
+ 'AbbreviatedName': {'extras': True, 'attr': 'abbreviatedName', },
+ 'StreetAddress': {'extras': True, 'attr': 'streetAddress', },
+ 'Geo': {'extras': True, 'attr': 'geo', },
}
@@ -156,10 +148,10 @@
"search=",
"list-principal-types",
"list-principals=",
- "read-property=",
"list-read-proxies",
"list-write-proxies",
"list-proxies",
+ "list-proxy-for",
"add-read-proxy=",
"add-write-proxy=",
"remove-proxy=",
@@ -185,7 +177,7 @@
# Get configuration
#
configFileName = None
- addType = None
+ # addType = None
listPrincipalTypes = False
listPrincipals = None
searchPrincipals = None
@@ -193,6 +185,10 @@
verbose = False
for opt, arg in optargs:
+
+ # Args come in as encoded bytes
+ arg = arg.decode("utf-8")
+
if opt in ("-h", "--help"):
usage()
@@ -202,11 +198,11 @@
elif opt in ("-f", "--config"):
configFileName = arg
- elif opt in ("-a", "--add"):
- addType = arg
+ # elif opt in ("-a", "--add"):
+ # addType = arg
- elif opt in ("-r", "--remove"):
- principalActions.append((action_removePrincipal,))
+ # elif opt in ("-r", "--remove"):
+ # principalActions.append((action_removePrincipal,))
elif opt in ("", "--list-principal-types"):
listPrincipalTypes = True
@@ -217,13 +213,6 @@
elif opt in ("", "--search"):
searchPrincipals = arg
- elif opt in ("", "--read-property"):
- try:
- qname = decodeXMLName(arg)
- except ValueError, e:
- abort(e)
- principalActions.append((action_readProperty, qname))
-
elif opt in ("", "--list-read-proxies"):
principalActions.append((action_listProxies, "read"))
@@ -233,6 +222,9 @@
elif opt in ("-L", "--list-proxies"):
principalActions.append((action_listProxies, "read", "write"))
+ elif opt in ("--list-proxy-for"):
+ principalActions.append((action_listProxyFor, "read", "write"))
+
elif opt in ("--add-read-proxy", "--add-write-proxy"):
if "read" in opt:
proxyType = "read"
@@ -240,74 +232,63 @@
proxyType = "write"
else:
raise AssertionError("Unknown proxy type")
-
- try:
- principalForPrincipalID(arg, checkOnly=True)
- except ValueError, e:
- abort(e)
-
principalActions.append((action_addProxy, proxyType, arg))
elif opt in ("", "--remove-proxy"):
- try:
- principalForPrincipalID(arg, checkOnly=True)
- except ValueError, e:
- abort(e)
-
principalActions.append((action_removeProxy, arg))
- elif opt in ("", "--set-auto-schedule"):
- try:
- autoSchedule = booleanArgument(arg)
- except ValueError, e:
- abort(e)
+ # elif opt in ("", "--set-auto-schedule"):
+ # try:
+ # autoSchedule = booleanArgument(arg)
+ # except ValueError, e:
+ # abort(e)
- principalActions.append((action_setAutoSchedule, autoSchedule))
+ # principalActions.append((action_setAutoSchedule, autoSchedule))
- elif opt in ("", "--get-auto-schedule"):
- principalActions.append((action_getAutoSchedule,))
+ # elif opt in ("", "--get-auto-schedule"):
+ # principalActions.append((action_getAutoSchedule,))
- elif opt in ("", "--set-auto-schedule-mode"):
- try:
- if arg not in allowedAutoScheduleModes:
- raise ValueError("Unknown auto-schedule mode: %s" % (arg,))
- autoScheduleMode = arg
- except ValueError, e:
- abort(e)
+ # elif opt in ("", "--set-auto-schedule-mode"):
+ # try:
+ # if arg not in allowedAutoScheduleModes:
+ # raise ValueError("Unknown auto-schedule mode: %s" % (arg,))
+ # autoScheduleMode = arg
+ # except ValueError, e:
+ # abort(e)
- principalActions.append((action_setAutoScheduleMode, autoScheduleMode))
+ # principalActions.append((action_setAutoScheduleMode, autoScheduleMode))
- elif opt in ("", "--get-auto-schedule-mode"):
- principalActions.append((action_getAutoScheduleMode,))
+ # elif opt in ("", "--get-auto-schedule-mode"):
+ # principalActions.append((action_getAutoScheduleMode,))
- elif opt in ("", "--set-auto-accept-group"):
- try:
- principalForPrincipalID(arg, checkOnly=True)
- except ValueError, e:
- abort(e)
+ # elif opt in ("", "--set-auto-accept-group"):
+ # try:
+ # yield recordForPrincipalID(arg, checkOnly=True)
+ # except ValueError, e:
+ # abort(e)
- principalActions.append((action_setAutoAcceptGroup, arg))
+ # principalActions.append((action_setAutoAcceptGroup, arg))
- elif opt in ("", "--get-auto-accept-group"):
- principalActions.append((action_getAutoAcceptGroup,))
+ # elif opt in ("", "--get-auto-accept-group"):
+ # principalActions.append((action_getAutoAcceptGroup,))
- elif opt in ("", "--set-geo"):
- principalActions.append((action_setValue, "Geo", arg))
+ # elif opt in ("", "--set-geo"):
+ # principalActions.append((action_setValue, "Geo", arg))
- elif opt in ("", "--get-geo"):
- principalActions.append((action_getValue, "Geo"))
+ # elif opt in ("", "--get-geo"):
+ # principalActions.append((action_getValue, "Geo"))
- elif opt in ("", "--set-street-address"):
- principalActions.append((action_setValue, "StreetAddress", arg))
+ # elif opt in ("", "--set-street-address"):
+ # principalActions.append((action_setValue, "StreetAddress", arg))
- elif opt in ("", "--get-street-address"):
- principalActions.append((action_getValue, "StreetAddress"))
+ # elif opt in ("", "--get-street-address"):
+ # principalActions.append((action_getValue, "StreetAddress"))
- elif opt in ("", "--set-address"):
- principalActions.append((action_setValue, "AssociatedAddress", arg))
+ # elif opt in ("", "--set-address"):
+ # principalActions.append((action_setValue, "AssociatedAddress", arg))
- elif opt in ("", "--get-address"):
- principalActions.append((action_getValue, "AssociatedAddress"))
+ # elif opt in ("", "--get-address"):
+ # principalActions.append((action_getValue, "AssociatedAddress"))
else:
raise NotImplementedError(opt)
@@ -322,32 +303,34 @@
function = runListPrincipalTypes
params = ()
- elif addType:
+ # elif addType:
- try:
- addType = matchStrings(addType, ["locations", "resources", "addresses"])
- except ValueError, e:
- print(e)
- return
+ # try:
+ # addType = matchStrings(addType, ["locations", "resources", "addresses"])
+ # except ValueError, e:
+ # print(e)
+ # return
- try:
- fullName, shortName, guid = parseCreationArgs(args)
- except ValueError, e:
- print(e)
- return
+ # try:
+ # fullName, shortName, guid = parseCreationArgs(args)
+ # except ValueError, e:
+ # print(e)
+ # return
- if shortName is not None:
- shortNames = [shortName]
- else:
- shortNames = ()
+ # if shortName is not None:
+ # shortNames = [shortName]
+ # else:
+ # shortNames = ()
- function = runAddPrincipal
- params = (addType, guid, shortNames, fullName)
+ # function = runAddPrincipal
+ # params = (addType, guid, shortNames, fullName)
elif listPrincipals:
try:
- listPrincipals = matchStrings(listPrincipals, ["users", "groups",
- "locations", "resources", "addresses"])
+ listPrincipals = matchStrings(
+ listPrincipals,
+ ["users", "groups", "locations", "resources", "addresses"]
+ )
except ValueError, e:
print(e)
return
@@ -363,21 +346,19 @@
params = (searchPrincipals,)
else:
- #
- # Do a quick sanity check that arguments look like principal
- # identifiers.
- #
if not args:
usage("No principals specified.")
- for arg in args:
- try:
- principalForPrincipalID(arg, checkOnly=True)
- except ValueError, e:
- abort(e)
+ # We don't have a directory yet
+ # for arg in args:
+ # try:
+ # yield recordForPrincipalID(arg, checkOnly=True)
+ # except ValueError, e:
+ # abort(e)
+ unicodeArgs = [a.decode("utf-8") for a in args]
function = runPrincipalActions
- params = (args, principalActions)
+ params = (unicodeArgs, principalActions)
PrincipalService.function = function
PrincipalService.params = params
@@ -385,74 +366,86 @@
-def runListPrincipalTypes(service, rootResource, directory, store):
+def runListPrincipalTypes(service, store):
+ directory = store.directoryService()
for recordType in directory.recordTypes():
- print(recordType)
+ print(directory.recordTypeToOldString(recordType))
return succeed(None)
-def runListPrincipals(service, rootResource, directory, store, listPrincipals):
+ at inlineCallbacks
+def runListPrincipals(service, store, listPrincipals):
+ directory = store.directoryService()
+ recordType = directory.oldNameToRecordType(listPrincipals)
try:
- records = list(directory.listRecords(listPrincipals))
+ records = list((yield directory.recordsWithRecordType(recordType)))
if records:
printRecordList(records)
else:
print("No records of type %s" % (listPrincipals,))
except UnknownRecordTypeError, e:
usage(e)
- return succeed(None)
+ returnValue(None)
@inlineCallbacks
-def runPrincipalActions(service, rootResource, directory, store, principalIDs,
- actions):
+def runPrincipalActions(service, store, principalIDs, actions):
+ directory = store.directoryService()
for principalID in principalIDs:
- # Resolve the given principal IDs to principals
+ # Resolve the given principal IDs to records
try:
- principal = principalForPrincipalID(principalID, directory=directory)
+ record = yield recordForPrincipalID(directory, principalID)
except ValueError:
- principal = None
+ record = None
- if principal is None:
+ if record is None:
sys.stderr.write("Invalid principal ID: %s\n" % (principalID,))
continue
# Performs requested actions
for action in actions:
- (yield action[0](rootResource, directory, store, principal,
- *action[1:]))
+ (yield action[0](store, record, *action[1:]))
print("")
@inlineCallbacks
-def runSearch(service, rootResource, directory, store, searchTerm):
-
+def runSearch(service, store, searchTerm):
+ directory = store.directoryService()
fields = []
- for fieldName in ("fullName", "firstName", "lastName", "emailAddresses"):
+ for fieldName in ("fullNames", "emailAddresses"):
fields.append((fieldName, searchTerm, True, "contains"))
records = list((yield directory.recordsMatchingTokens(searchTerm.strip().split())))
if records:
- records.sort(key=operator.attrgetter('fullName'))
- print("%d matches found:" % (len(records),))
+ records.sort(key=operator.attrgetter('fullNames'))
+ print("{n} matches found:".format(n=len(records)))
for record in records:
- print("\n%s (%s)" % (record.fullName,
- {"users" : "User",
- "groups" : "Group",
- "locations" : "Place",
- "resources" : "Resource",
- "addresses" : "Address",
- }.get(record.recordType),
- ))
- print(" GUID: %s" % (record.guid,))
- print(" Record name(s): %s" % (", ".join(record.shortNames),))
- if record.authIDs:
- print(" Auth ID(s): %s" % (", ".join(record.authIDs),))
- if record.emailAddresses:
- print(" Email(s): %s" % (", ".join(record.emailAddresses),))
+ print(
+ "\n{d} ({rt})".format(
+ d=record.displayName,
+ rt=record.recordType.name
+ )
+ )
+ print(" UID: {u}".format(u=record.uid,))
+ print(
+ " Record name{plural}: {names}".format(
+ plural=("s" if len(record.shortNames) > 1 else ""),
+ names=(", ".join(record.shortNames))
+ )
+ )
+ try:
+ if record.emailAddresses:
+ print(
+ " Email{plural}: {emails}".format(
+ plural=("s" if len(record.emailAddresses) > 1 else ""),
+ emails=(", ".join(record.emailAddresses))
+ )
+ )
+ except AttributeError:
+ pass
else:
print("No matches found")
@@ -460,292 +453,318 @@
- at inlineCallbacks
-def runAddPrincipal(service, rootResource, directory, store, addType, guid,
- shortNames, fullName):
- try:
- yield updateRecord(True, directory, addType, guid=guid,
- shortNames=shortNames, fullName=fullName)
- print("Added '%s'" % (fullName,))
- except DirectoryError, e:
- print(e)
+# @inlineCallbacks
+# def runAddPrincipal(service, store, addType, guid, shortNames, fullName):
+# directory = store.directoryService()
+# try:
+# # FIXME STOP USING GUID
+# yield updateRecord(
+# True, directory, addType, guid=guid,
+# shortNames=shortNames, fullName=fullName
+# )
+# print("Added '%s'" % (fullName,))
+# except DirectoryError, e:
+# print(e)
-def action_removePrincipal(rootResource, directory, store, principal):
- record = principal.record
- fullName = record.fullName
- shortName = record.shortNames[0]
- guid = record.guid
+# def action_removePrincipal(store, record):
+# directory = store.directoryService()
+# fullName = record.displayName
+# shortName = record.shortNames[0]
- directory.destroyRecord(record.recordType, guid=guid)
- print("Removed '%s' %s %s" % (fullName, shortName, guid))
+# yield directory.destroyRecord(record.recordType, uid=record.uid)
+# print("Removed '%s' %s %s" % (fullName, shortName, record.uid))
+
@inlineCallbacks
-def action_readProperty(rootResource, directory, store, resource, qname):
- property = (yield resource.readProperty(qname, None))
- print("%r on %s:" % (encodeXMLName(*qname), resource))
- print("")
- print(property.toxml())
+def action_listProxies(store, record, *proxyTypes):
+ directory = store.directoryService()
+ for proxyType in proxyTypes:
+ groupRecordType = {
+ "read": directory.recordType.readDelegateGroup,
+ "write": directory.recordType.writeDelegateGroup,
+ }.get(proxyType)
+ pseudoGroup = yield directory.recordWithShortName(
+ groupRecordType,
+ record.uid
+ )
+ proxies = yield pseudoGroup.members()
+ if proxies:
+ print("%s proxies for %s:" % (
+ {"read": "Read-only", "write": "Read/write"}[proxyType],
+ prettyRecord(record)
+ ))
+ printRecordList(proxies)
+ print("")
+ else:
+ print("No %s proxies for %s" % (proxyType, prettyRecord(record)))
+
@inlineCallbacks
-def action_listProxies(rootResource, directory, store, principal, *proxyTypes):
+def action_listProxyFor(store, record, *proxyTypes):
+ directory = store.directoryService()
for proxyType in proxyTypes:
- subPrincipal = proxySubprincipal(principal, proxyType)
- if subPrincipal is None:
- print("No %s proxies for %s" % (proxyType,
- prettyPrincipal(principal)))
- continue
- membersProperty = (yield subPrincipal.readProperty(davxml.GroupMemberSet, None))
+ groupRecordType = {
+ "read": directory.recordType.readDelegatorGroup,
+ "write": directory.recordType.writeDelegatorGroup,
+ }.get(proxyType)
- if membersProperty.children:
- print("%s proxies for %s:" % (
- {"read": "Read-only", "write": "Read/write"}[proxyType],
- prettyPrincipal(principal)
+ pseudoGroup = yield directory.recordWithShortName(
+ groupRecordType,
+ record.uid
+ )
+ proxies = yield pseudoGroup.members()
+ if proxies:
+ print("%s is a %s proxy for:" % (
+ prettyRecord(record),
+ {"read": "Read-only", "write": "Read/write"}[proxyType]
))
- records = []
- for member in membersProperty.children:
- proxyPrincipal = principalForPrincipalID(str(member),
- directory=directory)
- records.append(proxyPrincipal.record)
-
- printRecordList(records)
- print
+ printRecordList(proxies)
+ print("")
else:
- print("No %s proxies for %s" % (proxyType,
- prettyPrincipal(principal)))
+ print(
+ "{r} is not a {t} proxy for anyone".format(
+ r=prettyRecord(record),
+ t={"read": "Read-only", "write": "Read/write"}[proxyType]
+ )
+ )
-
@inlineCallbacks
-def action_addProxy(rootResource, directory, store, principal, proxyType, *proxyIDs):
+def _addRemoveProxy(fn, store, record, proxyType, *proxyIDs):
+ directory = store.directoryService()
+ readWrite = (proxyType == "write")
for proxyID in proxyIDs:
- proxyPrincipal = principalForPrincipalID(proxyID, directory=directory)
- if proxyPrincipal is None:
+ proxyRecord = yield recordForPrincipalID(directory, proxyID)
+ if proxyRecord is None:
print("Invalid principal ID: %s" % (proxyID,))
else:
- (yield action_addProxyPrincipal(rootResource, directory, store,
- principal, proxyType, proxyPrincipal))
+ txn = store.newTransaction()
+ yield fn(txn, record, proxyRecord, readWrite)
+ yield txn.commit()
+def action_addProxy(store, record, proxyType, *proxyIDs):
+ return _addRemoveProxy(addDelegate, store, record, proxyType, *proxyIDs)
+
@inlineCallbacks
-def setProxies(store, principal, readProxyPrincipals, writeProxyPrincipals, directory=None):
- """
- Set read/write proxies en masse for a principal
- @param principal: DirectoryPrincipalResource
- @param readProxyPrincipals: a list of principal IDs (see principalForPrincipalID)
- @param writeProxyPrincipals: a list of principal IDs (see principalForPrincipalID)
- """
+def action_removeProxy(store, record, *proxyIDs):
+ # Write
+ yield _addRemoveProxy(removeDelegate, store, record, "write", *proxyIDs)
+ # Read
+ yield _addRemoveProxy(removeDelegate, store, record, "read", *proxyIDs)
- proxyTypes = [
- ("read", readProxyPrincipals),
- ("write", writeProxyPrincipals),
- ]
- for proxyType, proxyIDs in proxyTypes:
- if proxyIDs is None:
- continue
- subPrincipal = proxySubprincipal(principal, proxyType)
- if subPrincipal is None:
- raise ProxyError("Unable to edit %s proxies for %s\n" % (proxyType,
- prettyPrincipal(principal)))
- memberURLs = []
- for proxyID in proxyIDs:
- proxyPrincipal = principalForPrincipalID(proxyID, directory=directory)
- proxyURL = proxyPrincipal.url()
- memberURLs.append(davxml.HRef(proxyURL))
- membersProperty = davxml.GroupMemberSet(*memberURLs)
- yield subPrincipal.writeProperty(membersProperty, None)
- if store is not None:
- # Schedule work the PeerConnectionPool will pick up as overdue
- yield schedulePolledGroupCachingUpdate(store)
+# @inlineCallbacks
+# def setProxies(store, principal, readProxyPrincipals, writeProxyPrincipals, directory=None):
+# """
+# Set read/write proxies en masse for a principal
+# @param principal: DirectoryPrincipalResource
+# @param readProxyPrincipals: a list of principal IDs (see principalForPrincipalID)
+# @param writeProxyPrincipals: a list of principal IDs (see principalForPrincipalID)
+# """
- at inlineCallbacks
-def getProxies(principal, directory=None):
- """
- Returns a tuple containing the GUIDs for read proxies and write proxies
- of the given principal
- """
+# proxyTypes = [
+# ("read", readProxyPrincipals),
+# ("write", writeProxyPrincipals),
+# ]
+# for proxyType, proxyIDs in proxyTypes:
+# if proxyIDs is None:
+# continue
+# subPrincipal = proxySubprincipal(principal, proxyType)
+# if subPrincipal is None:
+# raise ProxyError("Unable to edit %s proxies for %s\n" % (proxyType,
+# prettyPrincipal(principal)))
+# memberURLs = []
+# for proxyID in proxyIDs:
+# proxyPrincipal = yield principalForPrincipalID(proxyID, directory=directory)
+# proxyURL = proxyPrincipal.url()
+# memberURLs.append(davxml.HRef(proxyURL))
+# membersProperty = davxml.GroupMemberSet(*memberURLs)
+# yield subPrincipal.writeProperty(membersProperty, None)
+# if store is not None:
+# # Schedule work the PeerConnectionPool will pick up as overdue
+# yield schedulePolledGroupCachingUpdate(store)
- proxies = {
- "read" : [],
- "write" : [],
- }
- for proxyType in proxies.iterkeys():
- subPrincipal = proxySubprincipal(principal, proxyType)
- if subPrincipal is not None:
- membersProperty = (yield subPrincipal.readProperty(davxml.GroupMemberSet, None))
- if membersProperty.children:
- for member in membersProperty.children:
- proxyPrincipal = principalForPrincipalID(str(member), directory=directory)
- proxies[proxyType].append(proxyPrincipal.record.guid)
- returnValue((proxies['read'], proxies['write']))
+# @inlineCallbacks
+# def getProxies(principal, directory=None):
+# """
+# Returns a tuple containing the GUIDs for read proxies and write proxies
+# of the given principal
+# """
+# proxies = {
+# "read": [],
+# "write": [],
+# }
+# for proxyType in proxies.iterkeys():
+# subPrincipal = proxySubprincipal(principal, proxyType)
+# if subPrincipal is not None:
+# membersProperty = (yield subPrincipal.readProperty(davxml.GroupMemberSet, None))
+# if membersProperty.children:
+# for member in membersProperty.children:
+# proxyPrincipal = yield principalForPrincipalID(str(member), directory=directory)
+# proxies[proxyType].append(proxyPrincipal.record.guid)
- at inlineCallbacks
-def action_removeProxy(rootResource, directory, store, principal, *proxyIDs, **kwargs):
- for proxyID in proxyIDs:
- proxyPrincipal = principalForPrincipalID(proxyID, directory=directory)
- if proxyPrincipal is None:
- print("Invalid principal ID: %s" % (proxyID,))
- else:
- (yield action_removeProxyPrincipal(rootResource, directory, store,
- principal, proxyPrincipal, **kwargs))
+# returnValue((proxies['read'], proxies['write']))
- at inlineCallbacks
-def action_setAutoSchedule(rootResource, directory, store, principal, autoSchedule):
- if principal.record.recordType == "groups":
- print("Enabling auto-schedule for %s is not allowed." % (principal,))
- elif principal.record.recordType == "users" and not config.Scheduling.Options.AutoSchedule.AllowUsers:
- print("Enabling auto-schedule for %s is not allowed." % (principal,))
- else:
- print("Setting auto-schedule to %s for %s" % (
- {True: "true", False: "false"}[autoSchedule],
- prettyPrincipal(principal),
- ))
+# @inlineCallbacks
+# def action_setAutoSchedule(rootResource, directory, store, principal, autoSchedule):
+# if principal.record.recordType == "groups":
+# print("Enabling auto-schedule for %s is not allowed." % (principal,))
- (yield updateRecord(False, directory,
- principal.record.recordType,
- guid=principal.record.guid,
- shortNames=principal.record.shortNames,
- fullName=principal.record.fullName,
- autoSchedule=autoSchedule,
- **principal.record.extras
- ))
+# elif principal.record.recordType == "users" and not config.Scheduling.Options.AutoSchedule.AllowUsers:
+# print("Enabling auto-schedule for %s is not allowed." % (principal,))
+# else:
+# print("Setting auto-schedule to %s for %s" % (
+# {True: "true", False: "false"}[autoSchedule],
+# prettyPrincipal(principal),
+# ))
+# (yield updateRecord(False, directory,
+# principal.record.recordType,
+# guid=principal.record.guid,
+# shortNames=principal.record.shortNames,
+# fullName=principal.record.fullName,
+# autoSchedule=autoSchedule,
+# **principal.record.extras
+# ))
-def action_getAutoSchedule(rootResource, directory, store, principal):
- autoSchedule = principal.getAutoSchedule()
- print("Auto-schedule for %s is %s" % (
- prettyPrincipal(principal),
- {True: "true", False: "false"}[autoSchedule],
- ))
+# def action_getAutoSchedule(rootResource, directory, store, principal):
+# autoSchedule = principal.getAutoSchedule()
+# print("Auto-schedule for %s is %s" % (
+# prettyPrincipal(principal),
+# {True: "true", False: "false"}[autoSchedule],
+# ))
- at inlineCallbacks
-def action_setAutoScheduleMode(rootResource, directory, store, principal, autoScheduleMode):
- if principal.record.recordType == "groups":
- print("Setting auto-schedule mode for %s is not allowed." % (principal,))
- elif principal.record.recordType == "users" and not config.Scheduling.Options.AutoSchedule.AllowUsers:
- print("Setting auto-schedule mode for %s is not allowed." % (principal,))
- else:
- print("Setting auto-schedule mode to %s for %s" % (
- autoScheduleMode,
- prettyPrincipal(principal),
- ))
+# @inlineCallbacks
+# def action_setAutoScheduleMode(rootResource, directory, store, principal, autoScheduleMode):
+# if principal.record.recordType == "groups":
+# print("Setting auto-schedule mode for %s is not allowed." % (principal,))
- (yield updateRecord(False, directory,
- principal.record.recordType,
- guid=principal.record.guid,
- shortNames=principal.record.shortNames,
- fullName=principal.record.fullName,
- autoScheduleMode=autoScheduleMode,
- **principal.record.extras
- ))
+# elif principal.record.recordType == "users" and not config.Scheduling.Options.AutoSchedule.AllowUsers:
+# print("Setting auto-schedule mode for %s is not allowed." % (principal,))
+# else:
+# print("Setting auto-schedule mode to %s for %s" % (
+# autoScheduleMode,
+# prettyPrincipal(principal),
+# ))
+# (yield updateRecord(False, directory,
+# principal.record.recordType,
+# guid=principal.record.guid,
+# shortNames=principal.record.shortNames,
+# fullName=principal.record.fullName,
+# autoScheduleMode=autoScheduleMode,
+# **principal.record.extras
+# ))
-def action_getAutoScheduleMode(rootResource, directory, store, principal):
- autoScheduleMode = principal.getAutoScheduleMode()
- if not autoScheduleMode:
- autoScheduleMode = "automatic"
- print("Auto-schedule mode for %s is %s" % (
- prettyPrincipal(principal),
- autoScheduleMode,
- ))
+# def action_getAutoScheduleMode(rootResource, directory, store, principal):
+# autoScheduleMode = principal.getAutoScheduleMode()
+# if not autoScheduleMode:
+# autoScheduleMode = "automatic"
+# print("Auto-schedule mode for %s is %s" % (
+# prettyPrincipal(principal),
+# autoScheduleMode,
+# ))
- at inlineCallbacks
-def action_setAutoAcceptGroup(rootResource, directory, store, principal, autoAcceptGroup):
- if principal.record.recordType == "groups":
- print("Setting auto-accept-group for %s is not allowed." % (principal,))
- elif principal.record.recordType == "users" and not config.Scheduling.Options.AutoSchedule.AllowUsers:
- print("Setting auto-accept-group for %s is not allowed." % (principal,))
- else:
- groupPrincipal = principalForPrincipalID(autoAcceptGroup, directory=directory)
- if groupPrincipal is None or groupPrincipal.record.recordType != "groups":
- print("Invalid principal ID: %s" % (autoAcceptGroup,))
- else:
- print("Setting auto-accept-group to %s for %s" % (
- prettyPrincipal(groupPrincipal),
- prettyPrincipal(principal),
- ))
+# @inlineCallbacks
+# def action_setAutoAcceptGroup(rootResource, directory, store, principal, autoAcceptGroup):
+# if principal.record.recordType == "groups":
+# print("Setting auto-accept-group for %s is not allowed." % (principal,))
- (yield updateRecord(False, directory,
- principal.record.recordType,
- guid=principal.record.guid,
- shortNames=principal.record.shortNames,
- fullName=principal.record.fullName,
- autoAcceptGroup=groupPrincipal.record.guid,
- **principal.record.extras
- ))
+# elif principal.record.recordType == "users" and not config.Scheduling.Options.AutoSchedule.AllowUsers:
+# print("Setting auto-accept-group for %s is not allowed." % (principal,))
+# else:
+# groupPrincipal = yield principalForPrincipalID(autoAcceptGroup, directory=directory)
+# if groupPrincipal is None or groupPrincipal.record.recordType != "groups":
+# print("Invalid principal ID: %s" % (autoAcceptGroup,))
+# else:
+# print("Setting auto-accept-group to %s for %s" % (
+# prettyPrincipal(groupPrincipal),
+# prettyPrincipal(principal),
+# ))
+# (yield updateRecord(False, directory,
+# principal.record.recordType,
+# guid=principal.record.guid,
+# shortNames=principal.record.shortNames,
+# fullName=principal.record.fullName,
+# autoAcceptGroup=groupPrincipal.record.guid,
+# **principal.record.extras
+# ))
-def action_getAutoAcceptGroup(rootResource, directory, store, principal):
- autoAcceptGroup = principal.getAutoAcceptGroup()
- if autoAcceptGroup:
- record = directory.recordWithGUID(autoAcceptGroup)
- if record is not None:
- groupPrincipal = directory.principalCollection.principalForUID(record.uid)
- if groupPrincipal is not None:
- print("Auto-accept-group for %s is %s" % (
- prettyPrincipal(principal),
- prettyPrincipal(groupPrincipal),
- ))
- return
- print("Invalid auto-accept-group assigned: %s" % (autoAcceptGroup,))
- else:
- print("No auto-accept-group assigned to %s" % (prettyPrincipal(principal),))
+# def action_getAutoAcceptGroup(rootResource, directory, store, principal):
+# autoAcceptGroup = principal.getAutoAcceptGroup()
+# if autoAcceptGroup:
+# record = yield directory.recordWithGUID(autoAcceptGroup)
+# if record is not None:
+# groupPrincipal = yield directory.principalCollection.principalForUID(record.uid)
+# if groupPrincipal is not None:
+# print("Auto-accept-group for %s is %s" % (
+# prettyPrincipal(principal),
+# prettyPrincipal(groupPrincipal),
+# ))
+# return
+# print("Invalid auto-accept-group assigned: %s" % (autoAcceptGroup,))
+# else:
+# print("No auto-accept-group assigned to %s" % (prettyPrincipal(principal),))
- at inlineCallbacks
-def action_setValue(rootResource, directory, store, principal, name, value):
- print("Setting %s to %s for %s" % (
- name, value, prettyPrincipal(principal),
- ))
- principal.record.extras[attrMap[name]["attr"]] = value
- (yield updateRecord(False, directory,
- principal.record.recordType,
- guid=principal.record.guid,
- shortNames=principal.record.shortNames,
- fullName=principal.record.fullName,
- **principal.record.extras
- ))
+# @inlineCallbacks
+# def action_setValue(rootResource, directory, store, principal, name, value):
+# print("Setting %s to %s for %s" % (
+# name, value, prettyPrincipal(principal),
+# ))
+# principal.record.extras[attrMap[name]["attr"]] = value
+# (yield updateRecord(False, directory,
+# principal.record.recordType,
+# guid=principal.record.guid,
+# shortNames=principal.record.shortNames,
+# fullName=principal.record.fullName,
+# **principal.record.extras
+# ))
-def action_getValue(rootResource, directory, store, principal, name):
- print("%s for %s is %s" % (
- name,
- prettyPrincipal(principal),
- principal.record.extras[attrMap[name]["attr"]]
- ))
+# def action_getValue(rootResource, directory, store, principal, name):
+# print("%s for %s is %s" % (
+# name,
+# prettyPrincipal(principal),
+# principal.record.extras[attrMap[name]["attr"]]
+# ))
+
+
def abort(msg, status=1):
sys.stdout.write("%s\n" % (msg,))
try:
@@ -803,14 +822,16 @@
def printRecordList(records):
- results = [(record.fullName, record.shortNames[0], record.guid)
- for record in records]
+ results = [
+ (record.displayName, record.recordType.name, record.uid, record.shortNames)
+ for record in records
+ ]
results.sort()
- format = "%-22s %-17s %s"
- print(format % ("Full name", "Record name", "UUID"))
- print(format % ("---------", "-----------", "----"))
- for fullName, shortName, guid in results:
- print(format % (fullName, shortName, guid))
+ format = "%-22s %-10s %-20s %s"
+ print(format % ("Full name", "Type", "UID", "Short names"))
+ print(format % ("---------", "----", "---", "-----------"))
+ for fullName, recordType, uid, shortNames in results:
+ print(format % (fullName, recordType, uid, u", ".join(shortNames)))
@@ -859,16 +880,16 @@
kwargs[key] = newValue
if create:
- record = directory.createRecord(recordType, **kwargs)
+ record = yield directory.createRecord(recordType, **kwargs)
kwargs['guid'] = record.guid
else:
try:
- record = directory.updateRecord(recordType, **kwargs)
+ record = yield directory.updateRecord(recordType, **kwargs)
except NotImplementedError:
# Updating of directory information is not supported by underlying
# directory implementation, but allow augment information to be
# updated
- record = directory.recordWithGUID(kwargs["guid"])
+ record = yield directory.recordWithGUID(kwargs["guid"])
pass
augmentService = directory.serviceForRecordType(recordType).augmentService
@@ -882,7 +903,7 @@
augmentRecord.autoAcceptGroup = autoAcceptGroup
(yield augmentService.addAugmentRecords([augmentRecord]))
try:
- directory.updateRecord(recordType, **kwargs)
+ yield directory.updateRecord(recordType, **kwargs)
except NotImplementedError:
# Updating of directory information is not supported by underlying
# directory implementation, but allow augment information to be
Modified: CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/shell/directory.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/shell/directory.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/shell/directory.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -83,11 +83,17 @@
add("First Name", record.firstName)
add("Last Name" , record.lastName )
- for email in record.emailAddresses:
- add("Email Address", email)
+ try:
+ for email in record.emailAddresses:
+ add("Email Address", email)
+ except AttributeError:
+ pass
- for cua in record.calendarUserAddresses:
- add("Calendar User Address", cua)
+ try:
+ for cua in record.calendarUserAddresses:
+ add("Calendar User Address", cua)
+ except AttributeError:
+ pass
add("Server ID" , record.serverID)
add("Enabled" , record.enabled)
Modified: CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/test/test_principals.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/test/test_principals.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/test/test_principals.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -17,22 +17,23 @@
import os
import sys
+from calendarserver.tools.principals import (
+ parseCreationArgs, matchStrings,
+ updateRecord, principalForPrincipalID, getProxies, setProxies
+)
from twext.python.filepath import CachingFilePath as FilePath
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, Deferred, returnValue
-
from twistedcaldav.config import config
+from twistedcaldav.directory import calendaruserproxy
from twistedcaldav.directory.directory import DirectoryError
-from twistedcaldav.directory import calendaruserproxy
+from twistedcaldav.test.util import (
+ TestCase, CapturingProcessProtocol, ErrorOutput
+)
+from txdav.who.util import directoryFromConfig
-from twistedcaldav.test.util import TestCase, CapturingProcessProtocol, \
- ErrorOutput
-from calendarserver.tap.util import directoryFromConfig
-from calendarserver.tools.principals import (parseCreationArgs, matchStrings,
- updateRecord, principalForPrincipalID, getProxies, setProxies)
-
class ManagePrincipalsTestCase(TestCase):
def setUp(self):
Modified: CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/util.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/util.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/calendarserver/tools/util.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -20,8 +20,6 @@
__all__ = [
"loadConfig",
- "getDirectory",
- "dummyDirectoryRecord",
"UsageError",
"booleanArgument",
]
@@ -48,8 +46,7 @@
from twistedcaldav import memcachepool
from twistedcaldav.directory import calendaruserproxy
-from twistedcaldav.directory.aggregate import AggregateDirectoryService
-from twistedcaldav.directory.directory import DirectoryService, DirectoryRecord
+# from twistedcaldav.directory.directory import DirectoryService, DirectoryRecord
from txdav.who.groups import schedulePolledGroupCachingUpdate
from calendarserver.push.notifier import NotifierFactory
@@ -78,145 +75,145 @@
-def getDirectory(config=config):
+# def getDirectory(config=config):
- class MyDirectoryService (AggregateDirectoryService):
- def getPrincipalCollection(self):
- if not hasattr(self, "_principalCollection"):
+# class MyDirectoryService (AggregateDirectoryService):
+# def getPrincipalCollection(self):
+# if not hasattr(self, "_principalCollection"):
- if config.Notifications.Enabled:
- # FIXME: NotifierFactory needs reference to the store in order
- # to get a txn in order to create a Work item
- notifierFactory = NotifierFactory(
- None, config.ServerHostName,
- config.Notifications.CoalesceSeconds,
- )
- else:
- notifierFactory = None
+# if config.Notifications.Enabled:
+# # FIXME: NotifierFactory needs reference to the store in order
+# # to get a txn in order to create a Work item
+# notifierFactory = NotifierFactory(
+# None, config.ServerHostName,
+# config.Notifications.CoalesceSeconds,
+# )
+# else:
+# notifierFactory = None
- # Need a data store
- _newStore = CommonDataStore(FilePath(config.DocumentRoot),
- notifierFactory, self, True, False)
- if notifierFactory is not None:
- notifierFactory.store = _newStore
+# # Need a data store
+# _newStore = CommonDataStore(FilePath(config.DocumentRoot),
+# notifierFactory, self, True, False)
+# if notifierFactory is not None:
+# notifierFactory.store = _newStore
- #
- # Instantiating a DirectoryCalendarHomeProvisioningResource with a directory
- # will register it with the directory (still smells like a hack).
- #
- # We need that in order to locate calendar homes via the directory.
- #
- from twistedcaldav.directory.calendar import DirectoryCalendarHomeProvisioningResource
- DirectoryCalendarHomeProvisioningResource(self, "/calendars/", _newStore)
+# #
+# # Instantiating a DirectoryCalendarHomeProvisioningResource with a directory
+# # will register it with the directory (still smells like a hack).
+# #
+# # We need that in order to locate calendar homes via the directory.
+# #
+# from twistedcaldav.directory.calendar import DirectoryCalendarHomeProvisioningResource
+# DirectoryCalendarHomeProvisioningResource(self, "/calendars/", _newStore)
- from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
- self._principalCollection = DirectoryPrincipalProvisioningResource("/principals/", self)
+# from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
+# self._principalCollection = DirectoryPrincipalProvisioningResource("/principals/", self)
- return self._principalCollection
+# return self._principalCollection
- def setPrincipalCollection(self, coll):
- # See principal.py line 237: self.directory.principalCollection = self
- pass
+# def setPrincipalCollection(self, coll):
+# # See principal.py line 237: self.directory.principalCollection = self
+# pass
- principalCollection = property(getPrincipalCollection, setPrincipalCollection)
+# principalCollection = property(getPrincipalCollection, setPrincipalCollection)
- def calendarHomeForRecord(self, record):
- principal = self.principalCollection.principalForRecord(record)
- if principal:
- try:
- return principal.calendarHome()
- except AttributeError:
- pass
- return None
+# def calendarHomeForRecord(self, record):
+# principal = self.principalCollection.principalForRecord(record)
+# if principal:
+# try:
+# return principal.calendarHome()
+# except AttributeError:
+# pass
+# return None
- def calendarHomeForShortName(self, recordType, shortName):
- principal = self.principalCollection.principalForShortName(recordType, shortName)
- if principal:
- return principal.calendarHome()
- return None
+# def calendarHomeForShortName(self, recordType, shortName):
+# principal = self.principalCollection.principalForShortName(recordType, shortName)
+# if principal:
+# return principal.calendarHome()
+# return None
- def principalForCalendarUserAddress(self, cua):
- return self.principalCollection.principalForCalendarUserAddress(cua)
+# def principalForCalendarUserAddress(self, cua):
+# return self.principalCollection.principalForCalendarUserAddress(cua)
- def principalForUID(self, uid):
- return self.principalCollection.principalForUID(uid)
+# def principalForUID(self, uid):
+# return self.principalCollection.principalForUID(uid)
- # Load augment/proxy db classes now
- if config.AugmentService.type:
- augmentClass = namedClass(config.AugmentService.type)
- augmentService = augmentClass(**config.AugmentService.params)
- else:
- augmentService = None
+# # Load augment/proxy db classes now
+# if config.AugmentService.type:
+# augmentClass = namedClass(config.AugmentService.type)
+# augmentService = augmentClass(**config.AugmentService.params)
+# else:
+# augmentService = None
- proxydbClass = namedClass(config.ProxyDBService.type)
- calendaruserproxy.ProxyDBService = proxydbClass(**config.ProxyDBService.params)
+# proxydbClass = namedClass(config.ProxyDBService.type)
+# calendaruserproxy.ProxyDBService = proxydbClass(**config.ProxyDBService.params)
- # Wait for directory service to become available
- BaseDirectoryService = namedClass(config.DirectoryService.type)
- config.DirectoryService.params.augmentService = augmentService
- directory = BaseDirectoryService(config.DirectoryService.params)
- while not directory.isAvailable():
- sleep(5)
+# # Wait for directory service to become available
+# BaseDirectoryService = namedClass(config.DirectoryService.type)
+# config.DirectoryService.params.augmentService = augmentService
+# directory = BaseDirectoryService(config.DirectoryService.params)
+# while not directory.isAvailable():
+# sleep(5)
- directories = [directory]
+# directories = [directory]
- if config.ResourceService.Enabled:
- resourceClass = namedClass(config.ResourceService.type)
- config.ResourceService.params.augmentService = augmentService
- resourceDirectory = resourceClass(config.ResourceService.params)
- resourceDirectory.realmName = directory.realmName
- directories.append(resourceDirectory)
+# if config.ResourceService.Enabled:
+# resourceClass = namedClass(config.ResourceService.type)
+# config.ResourceService.params.augmentService = augmentService
+# resourceDirectory = resourceClass(config.ResourceService.params)
+# resourceDirectory.realmName = directory.realmName
+# directories.append(resourceDirectory)
- aggregate = MyDirectoryService(directories, None)
- aggregate.augmentService = augmentService
+# aggregate = MyDirectoryService(directories, None)
+# aggregate.augmentService = augmentService
- #
- # Wire up the resource hierarchy
- #
- principalCollection = aggregate.getPrincipalCollection()
- root = RootResource(
- config.DocumentRoot,
- principalCollections=(principalCollection,),
- )
- root.putChild("principals", principalCollection)
+# #
+# # Wire up the resource hierarchy
+# #
+# principalCollection = aggregate.getPrincipalCollection()
+# root = RootResource(
+# config.DocumentRoot,
+# principalCollections=(principalCollection,),
+# )
+# root.putChild("principals", principalCollection)
- # Need a data store
- _newStore = CommonDataStore(FilePath(config.DocumentRoot), None, aggregate, True, False)
+# # Need a data store
+# _newStore = CommonDataStore(FilePath(config.DocumentRoot), None, aggregate, True, False)
- from twistedcaldav.directory.calendar import DirectoryCalendarHomeProvisioningResource
- calendarCollection = DirectoryCalendarHomeProvisioningResource(
- aggregate, "/calendars/",
- _newStore,
- )
- root.putChild("calendars", calendarCollection)
+# from twistedcaldav.directory.calendar import DirectoryCalendarHomeProvisioningResource
+# calendarCollection = DirectoryCalendarHomeProvisioningResource(
+# aggregate, "/calendars/",
+# _newStore,
+# )
+# root.putChild("calendars", calendarCollection)
- return aggregate
+# return aggregate
-class DummyDirectoryService (DirectoryService):
- realmName = ""
- baseGUID = "51856FD4-5023-4890-94FE-4356C4AAC3E4"
- def recordTypes(self):
- return ()
+# class DummyDirectoryService (DirectoryService):
+# realmName = ""
+# baseGUID = "51856FD4-5023-4890-94FE-4356C4AAC3E4"
+# def recordTypes(self):
+# return ()
- def listRecords(self):
- return ()
+# def listRecords(self):
+# return ()
- def recordWithShortName(self):
- return None
+# def recordWithShortName(self):
+# return None
-dummyDirectoryRecord = DirectoryRecord(
- service=DummyDirectoryService(),
- recordType="dummy",
- guid="8EF0892F-7CB6-4B8E-B294-7C5A5321136A",
- shortNames=("dummy",),
- fullName="Dummy McDummerson",
- firstName="Dummy",
- lastName="McDummerson",
-)
+# dummyDirectoryRecord = DirectoryRecord(
+# service=DummyDirectoryService(),
+# recordType="dummy",
+# guid="8EF0892F-7CB6-4B8E-B294-7C5A5321136A",
+# shortNames=("dummy",),
+# fullName="Dummy McDummerson",
+# firstName="Dummy",
+# lastName="McDummerson",
+# )
class UsageError (StandardError):
pass
@@ -334,6 +331,7 @@
+ at inlineCallbacks
def principalForPrincipalID(principalID, checkOnly=False, directory=None):
# Allow a directory parameter to be passed in, but default to config.directory
@@ -351,16 +349,16 @@
raise ValueError("Can't resolve all paths yet")
if checkOnly:
- return None
+ returnValue(None)
- return directory.principalCollection.principalForUID(uid)
+ returnValue((yield directory.principalCollection.principalForUID(uid)))
if principalID.startswith("("):
try:
i = principalID.index(")")
if checkOnly:
- return None
+ returnValue(None)
recordType = principalID[1:i]
shortName = principalID[i + 1:]
@@ -368,34 +366,87 @@
if not recordType or not shortName or "(" in recordType:
raise ValueError()
- return directory.principalCollection.principalForShortName(recordType, shortName)
+ returnValue((yield directory.principalCollection.principalForShortName(recordType, shortName)))
except ValueError:
pass
if ":" in principalID:
if checkOnly:
- return None
+ returnValue(None)
recordType, shortName = principalID.split(":", 1)
- return directory.principalCollection.principalForShortName(recordType, shortName)
+ returnValue((yield directory.principalCollection.principalForShortName(recordType, shortName)))
try:
UUID(principalID)
if checkOnly:
- return None
+ returnValue(None)
- x = directory.principalCollection.principalForUID(principalID)
- return x
+ returnValue((yield directory.principalCollection.principalForUID(principalID)))
except ValueError:
pass
raise ValueError("Invalid principal identifier: %s" % (principalID,))
+ at inlineCallbacks
+def recordForPrincipalID(directory, principalID, checkOnly=False):
+ if principalID.startswith("/"):
+ segments = principalID.strip("/").split("/")
+ if (len(segments) == 3 and
+ segments[0] == "principals" and segments[1] == "__uids__"):
+ uid = segments[2]
+ else:
+ raise ValueError("Can't resolve all paths yet")
+
+ if checkOnly:
+ returnValue(None)
+
+ returnValue((yield directory.recordWithUID(uid)))
+
+ if principalID.startswith("("):
+ try:
+ i = principalID.index(")")
+
+ if checkOnly:
+ returnValue(None)
+
+ recordType = directory.oldNameToRecordType(principalID[1:i])
+ shortName = principalID[i + 1:]
+
+ if not recordType or not shortName or "(" in recordType:
+ raise ValueError()
+
+ returnValue((yield directory.recordWithShortName(recordType, shortName)))
+
+ except ValueError:
+ pass
+
+ if ":" in principalID:
+ if checkOnly:
+ returnValue(None)
+
+ recordType, shortName = principalID.split(":", 1)
+ recordType = directory.oldNameToRecordType(recordType)
+
+ returnValue((yield directory.recordWithShortName(recordType, shortName)))
+
+ try:
+ if checkOnly:
+ returnValue(None)
+
+ returnValue((yield directory.recordWithUID(principalID)))
+ except ValueError:
+ pass
+
+ raise ValueError("Invalid principal identifier: %s" % (principalID,))
+
+
+
def proxySubprincipal(principal, proxyType):
return principal.getChild("calendar-proxy-" + proxyType)
@@ -501,12 +552,19 @@
def prettyPrincipal(principal):
- record = principal.record
- return "\"%s\" (%s:%s)" % (record.fullName, record.recordType,
- record.shortNames[0])
+ prettyRecord(principal.record)
+def prettyRecord(record):
+ return "\"{d}\" {uid} ({rt}) {sn}".format(
+ d=record.displayName,
+ rt=record.recordType.name,
+ uid=record.uid,
+ sn=(", ".join(record.shortNames))
+ )
+
+
class ProxyError(Exception):
"""
Raised when proxy assignments cannot be performed
Modified: CalendarServer/branches/users/sagen/move2who-3/calendarserver/webcal/resource.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/calendarserver/webcal/resource.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/calendarserver/webcal/resource.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -48,15 +48,17 @@
class WebCalendarResource (ReadOnlyResourceMixIn, DAVFile):
def defaultAccessControlList(self):
- return davxml.ACL(
- davxml.ACE(
- davxml.Principal(davxml.Authenticated()),
- davxml.Grant(
- davxml.Privilege(davxml.Read()),
+ return succeed(
+ davxml.ACL(
+ davxml.ACE(
+ davxml.Principal(davxml.Authenticated()),
+ davxml.Grant(
+ davxml.Privilege(davxml.Read()),
+ ),
+ davxml.Protected(),
+ TwistedACLInheritable(),
),
- davxml.Protected(),
- TwistedACLInheritable(),
- ),
+ )
)
Modified: CalendarServer/branches/users/sagen/move2who-3/conf/auth/accounts-test.xml
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/conf/auth/accounts-test.xml 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/conf/auth/accounts-test.xml 2014-03-13 20:40:10 UTC (rev 12899)
@@ -18,172 +18,339 @@
<!DOCTYPE accounts SYSTEM "accounts.dtd">
-<accounts realm="Test Realm">
- <user>
+<directory realm="Test Realm">
+ <record type="user">
<uid>admin</uid>
- <guid>admin</guid>
+ <short-name>admin</short-name>
<password>admin</password>
- <name>Super User</name>
- <first-name>Super</first-name>
- <last-name>User</last-name>
- </user>
- <user>
+ <full-name>Super User</full-name>
+ </record>
+ <record type="user">
<uid>apprentice</uid>
- <guid>apprentice</guid>
+ <short-name>apprentice</short-name>
<password>apprentice</password>
- <name>Apprentice Super User</name>
- <first-name>Apprentice</first-name>
- <last-name>Super User</last-name>
- </user>
- <user>
+ <full-name>Apprentice Super User</full-name>
+ </record>
+ <record type="user">
<uid>wsanchez</uid>
- <guid>wsanchez</guid>
- <email-address>wsanchez at example.com</email-address>
+ <short-name>wsanchez</short-name>
+ <email>wsanchez at example.com</email>
<password>test</password>
- <name>Wilfredo Sanchez Vega</name>
- <first-name>Wilfredo</first-name>
- <last-name>Sanchez Vega</last-name>
- </user>
- <user>
+ <full-name>Wilfredo Sanchez Vega</full-name>
+ </record>
+ <record type="user">
<uid>cdaboo</uid>
- <guid>cdaboo</guid>
- <email-address>cdaboo at example.com</email-address>
+ <short-name>cdaboo</short-name>
+ <email>cdaboo at example.com</email>
<password>test</password>
- <name>Cyrus Daboo</name>
- <first-name>Cyrus</first-name>
- <last-name>Daboo</last-name>
- </user>
- <user>
+ <full-name>cyrus Daboo</full-name>
+ </record>
+ <record type="user">
<uid>sagen</uid>
- <guid>sagen</guid>
- <email-address>sagen at example.com</email-address>
+ <short-name>sagen</short-name>
+ <email>sagen at example.com</email>
<password>test</password>
- <name>Morgen Sagen</name>
- <first-name>Morgen</first-name>
- <last-name>Sagen</last-name>
- </user>
- <user>
+ <full-name>Morgen Sagen</full-name>
+ </record>
+ <record type="user">
<uid>dre</uid>
- <guid>andre</guid>
- <email-address>dre at example.com</email-address>
+ <short-name>andre</short-name>
+ <email>dre at example.com</email>
<password>test</password>
- <name>Andre LaBranche</name>
- <first-name>Andre</first-name>
- <last-name>LaBranche</last-name>
- </user>
- <user>
+ <full-name>Andre LaBranche</full-name>
+ </record>
+ <record type="user">
<uid>glyph</uid>
- <guid>glyph</guid>
- <email-address>glyph at example.com</email-address>
+ <short-name>glyph</short-name>
+ <email>glyph at example.com</email>
<password>test</password>
- <name>Glyph Lefkowitz</name>
- <first-name>Glyph</first-name>
- <last-name>Lefkowitz</last-name>
- </user>
- <user>
+ <full-name>Glyph Lefkowitz</full-name>
+ </record>
+ <record type="user">
<uid>i18nuser</uid>
- <guid>i18nuser</guid>
- <email-address>i18nuser at example.com</email-address>
+ <short-name>i18nuser</short-name>
+ <email>i18nuser at example.com</email>
<password>i18nuser</password>
- <name>まだ</name>
- <first-name>ま</first-name>
- <last-name>だ</last-name>
- </user>
+ <full-name>まだ</full-name>
+ </record>
+
+ <!-- twext.who xml doesn't (yet) support repeat
<user repeat="101">
<uid>user%02d</uid>
<uid>User %02d</uid>
- <guid>user%02d</guid>
+ <short-name>user%02d</short-name>
<password>user%02d</password>
- <name>User %02d</name>
- <first-name>User</first-name>
- <last-name>%02d</last-name>
- <email-address>user%02d at example.com</email-address>
- </user>
+ <full-name>User %02d</full-name>
+ <email>user%02d at example.com</email>
+ </record>
<user repeat="10">
<uid>public%02d</uid>
- <guid>public%02d</guid>
+ <short-name>public%02d</short-name>
<password>public%02d</password>
- <name>Public %02d</name>
- <first-name>Public</first-name>
- <last-name>%02d</last-name>
- </user>
- <group>
+ <full-name>Public %02d</full-name>
+ </record>
+ -->
+ <record type="user">
+ <short-name>user01</short-name>
+ <uid>user01</uid>
+ <password>user01</password>
+ <full-name>User 01</full-name>
+ <email>user01 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user02</short-name>
+ <uid>user02</uid>
+ <password>user02</password>
+ <full-name>User 02</full-name>
+ <email>user02 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user03</short-name>
+ <uid>user03</uid>
+ <password>user03</password>
+ <full-name>User 03</full-name>
+ <email>user03 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user04</short-name>
+ <uid>user04</uid>
+ <password>user04</password>
+ <full-name>User 04</full-name>
+ <email>user04 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user05</short-name>
+ <uid>user05</uid>
+ <password>user05</password>
+ <full-name>User 05</full-name>
+ <email>user05 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user06</short-name>
+ <uid>user06</uid>
+ <password>user06</password>
+ <full-name>User 06</full-name>
+ <email>user06 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user07</short-name>
+ <uid>user07</uid>
+ <password>user07</password>
+ <full-name>User 07</full-name>
+ <email>user07 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user08</short-name>
+ <uid>user08</uid>
+ <password>user08</password>
+ <full-name>User 08</full-name>
+ <email>user08 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user09</short-name>
+ <uid>user09</uid>
+ <password>user09</password>
+ <full-name>User 09</full-name>
+ <email>user09 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user10</short-name>
+ <uid>user10</uid>
+ <password>user10</password>
+ <full-name>User 10</full-name>
+ <email>user10 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user11</short-name>
+ <uid>user11</uid>
+ <password>user11</password>
+ <full-name>User 11</full-name>
+ <email>user11 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user12</short-name>
+ <uid>user12</uid>
+ <password>user12</password>
+ <full-name>User 12</full-name>
+ <email>user12 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user13</short-name>
+ <uid>user13</uid>
+ <password>user13</password>
+ <full-name>User 13</full-name>
+ <email>user13 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user14</short-name>
+ <uid>user14</uid>
+ <password>user14</password>
+ <full-name>User 14</full-name>
+ <email>user14 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user15</short-name>
+ <uid>user15</uid>
+ <password>user15</password>
+ <full-name>User 15</full-name>
+ <email>user15 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user16</short-name>
+ <uid>user16</uid>
+ <password>user16</password>
+ <full-name>User 16</full-name>
+ <email>user16 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user17</short-name>
+ <uid>user17</uid>
+ <password>user17</password>
+ <full-name>User 17</full-name>
+ <email>user17 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user18</short-name>
+ <uid>user18</uid>
+ <password>user18</password>
+ <full-name>User 18</full-name>
+ <email>user18 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user19</short-name>
+ <uid>user19</uid>
+ <password>user19</password>
+ <full-name>User 19</full-name>
+ <email>user19 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user20</short-name>
+ <uid>user20</uid>
+ <password>user20</password>
+ <full-name>User 20</full-name>
+ <email>user20 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user21</short-name>
+ <uid>user21</uid>
+ <password>user21</password>
+ <full-name>User 21</full-name>
+ <email>user21 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user22</short-name>
+ <uid>user22</uid>
+ <password>user22</password>
+ <full-name>User 22</full-name>
+ <email>user22 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user23</short-name>
+ <uid>user23</uid>
+ <password>user23</password>
+ <full-name>User 23</full-name>
+ <email>user23 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user24</short-name>
+ <uid>user24</uid>
+ <password>user24</password>
+ <full-name>User 24</full-name>
+ <email>user24 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user25</short-name>
+ <uid>user25</uid>
+ <password>user25</password>
+ <full-name>User 25</full-name>
+ <email>user25 at example.com</email>
+ </record>
+
+ <record type="group">
<uid>group01</uid>
- <guid>group01</guid>
+ <short-name>group01</short-name>
<password>group01</password>
- <name>Group 01</name>
- <members>
- <member type="users">user01</member>
- </members>
- </group>
- <group>
+ <full-name>Group 01</full-name>
+ <member-uid type="users">user01</member-uid>
+ </record>
+ <record type="group">
<uid>group02</uid>
- <guid>group02</guid>
+ <short-name>group02</short-name>
<password>group02</password>
- <name>Group 02</name>
- <members>
- <member type="users">user06</member>
- <member type="users">user07</member>
- </members>
- </group>
- <group>
+ <full-name>Group 02</full-name>
+ <member-uid type="users">user06</member-uid>
+ <member-uid type="users">user07</member-uid>
+ </record>
+ <record type="group">
<uid>group03</uid>
- <guid>group03</guid>
+ <short-name>group03</short-name>
<password>group03</password>
- <name>Group 03</name>
- <members>
- <member type="users">user08</member>
- <member type="users">user09</member>
- </members>
- </group>
- <group>
+ <full-name>Group 03</full-name>
+ <member-uid type="users">user08</member-uid>
+ <member-uid type="users">user09</member-uid>
+ </record>
+ <record type="group">
<uid>group04</uid>
- <guid>group04</guid>
+ <short-name>group04</short-name>
<password>group04</password>
- <name>Group 04</name>
- <members>
- <member type="groups">group02</member>
- <member type="groups">group03</member>
- <member type="users">user10</member>
- </members>
- </group>
- <group> <!-- delegategroup -->
+ <full-name>Group 04</full-name>
+ <member-uid type="groups">group02</member-uid>
+ <member-uid type="groups">group03</member-uid>
+ <member-uid type="users">user10</member-uid>
+ </record>
+ <record type="group"> <!-- delegategroup -->
<uid>group05</uid>
- <guid>group05</guid>
+ <short-name>group05</short-name>
<password>group05</password>
- <name>Group 05</name>
- <members>
- <member type="groups">group06</member>
- <member type="users">user20</member>
- </members>
- </group>
- <group> <!-- delegatesubgroup -->
+ <full-name>Group 05</full-name>
+ <member-uid type="groups">group06</member-uid>
+ <member-uid type="users">user20</member-uid>
+ </record>
+ <record type="group"> <!-- delegatesubgroup -->
<uid>group06</uid>
- <guid>group06</guid>
+ <short-name>group06</short-name>
<password>group06</password>
- <name>Group 06</name>
- <members>
- <member type="users">user21</member>
- </members>
- </group>
- <group> <!-- readonlydelegategroup -->
+ <full-name>Group 06</full-name>
+ <member-uid type="users">user21</member-uid>
+ </record>
+ <record type="group"> <!-- readonlydelegategroup -->
<uid>group07</uid>
- <guid>group07</guid>
+ <short-name>group07</short-name>
<password>group07</password>
- <name>Group 07</name>
- <members>
- <member type="users">user22</member>
- <member type="users">user23</member>
- <member type="users">user24</member>
- </members>
- </group>
- <group>
+ <full-name>Group 07</full-name>
+ <member-uid type="users">user22</member-uid>
+ <member-uid type="users">user23</member-uid>
+ <member-uid type="users">user24</member-uid>
+ </record>
+ <record type="group">
<uid>disabledgroup</uid>
- <guid>disabledgroup</guid>
+ <short-name>disabledgroup</short-name>
<password>disabledgroup</password>
- <name>Disabled Group</name>
- <members>
- <member type="users">user01</member>
- </members>
- </group>
-</accounts>
+ <full-name>Disabled Group</full-name>
+ <member-uid type="users">user01</member-uid>
+ </record>
+</directory>
Modified: CalendarServer/branches/users/sagen/move2who-3/conf/auth/resources-test.xml
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/conf/auth/resources-test.xml 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/conf/auth/resources-test.xml 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1,135 +1,273 @@
-<accounts realm="Test Realm">
- <location repeat="10">
- <uid>location%02d</uid>
- <guid>location%02d</guid>
- <password>location%02d</password>
- <name>Room %02d</name>
- </location>
- <resource repeat="20">
- <uid>resource%02d</uid>
- <guid>resource%02d</guid>
- <password>resource%02d</password>
- <name>Resource %02d</name>
- </resource>
- <location>
- <uid>fantastic</uid>
- <guid>4D66A20A-1437-437D-8069-2F14E8322234</guid>
- <name>Fantastic Conference Room</name>
+<directory realm="Test Realm">
+ <record type="location">
+ <short-name>fantastic</short-name>
+ <uid>4D66A20A-1437-437D-8069-2F14E8322234</uid>
+ <full-name>Fantastic Conference Room</full-name>
<extras>
<associatedAddress>63A2F949-2D8D-4C8D-B8A5-DCF2A94610F3</associatedAddress>
</extras>
- </location>
- <location>
+ </record>
+ <record type="location">
+ <short-name>jupiter</short-name>
<uid>jupiter</uid>
- <guid>jupiter</guid>
- <name>Jupiter Conference Room, Building 2, 1st Floor</name>
- </location>
- <location>
+ <full-name>Jupiter Conference Room, Building 2, 1st Floor</full-name>
+ </record>
+ <record type="location">
+ <short-name>uranus</short-name>
<uid>uranus</uid>
- <guid>uranus</guid>
- <name>Uranus Conference Room, Building 3, 1st Floor</name>
- </location>
- <location>
- <uid>morgensroom</uid>
- <guid>03DFF660-8BCC-4198-8588-DD77F776F518</guid>
- <name>Morgen's Room</name>
- </location>
- <location>
+ <full-name>Uranus Conference Room, Building 3, 1st Floor</full-name>
+ </record>
+ <record type="location">
+ <short-name>morgensroom</short-name>
+ <uid>03DFF660-8BCC-4198-8588-DD77F776F518</uid>
+ <full-name>Morgen's Room</full-name>
+ </record>
+ <record type="location">
+ <short-name>mercury</short-name>
<uid>mercury</uid>
- <guid>mercury</guid>
- <name>Mercury Conference Room, Building 1, 2nd Floor</name>
- </location>
- <location>
+ <full-name>Mercury Conference Room, Building 1, 2nd Floor</full-name>
+ </record>
+ <record type="location">
+ <short-name>location09</short-name>
+ <uid>location09</uid>
+ <full-name>Room 09</full-name>
+ </record>
+ <record type="location">
+ <short-name>location08</short-name>
+ <uid>location08</uid>
+ <full-name>Room 08</full-name>
+ </record>
+ <record type="location">
+ <short-name>location07</short-name>
+ <uid>location07</uid>
+ <full-name>Room 07</full-name>
+ </record>
+ <record type="location">
+ <short-name>location06</short-name>
+ <uid>location06</uid>
+ <full-name>Room 06</full-name>
+ </record>
+ <record type="location">
+ <short-name>location05</short-name>
+ <uid>location05</uid>
+ <full-name>Room 05</full-name>
+ </record>
+ <record type="location">
+ <short-name>location04</short-name>
+ <uid>location04</uid>
+ <full-name>Room 04</full-name>
+ </record>
+ <record type="location">
+ <short-name>location03</short-name>
+ <uid>location03</uid>
+ <full-name>Room 03</full-name>
+ </record>
+ <record type="location">
+ <short-name>location02</short-name>
+ <uid>location02</uid>
+ <full-name>Room 02</full-name>
+ </record>
+ <record type="location">
+ <short-name>location01</short-name>
+ <uid>location01</uid>
+ <full-name>Room 01</full-name>
+ </record>
+ <record type="location">
+ <short-name>delegatedroom</short-name>
<uid>delegatedroom</uid>
- <guid>delegatedroom</guid>
- <name>Delegated Conference Room</name>
- </location>
- <location>
- <uid>mars</uid>
- <guid>redplanet</guid>
- <name>Mars Conference Room, Building 1, 1st Floor</name>
- </location>
- <location>
- <uid>sharissroom</uid>
- <guid>80689D41-DAF8-4189-909C-DB017B271892</guid>
- <name>Shari's Room</name>
+ <full-name>Delegated Conference Room</full-name>
+ </record>
+ <record type="location">
+ <short-name>mars</short-name>
+ <uid>redplanet</uid>
+ <full-name>Mars Conference Room, Building 1, 1st Floor</full-name>
+ </record>
+ <record type="location">
+ <short-name>sharissroom</short-name>
+ <uid>80689D41-DAF8-4189-909C-DB017B271892</uid>
+ <full-name>Shari's Room</full-name>
<extras>
<associatedAddress>6F9EE33B-78F6-481B-9289-3D0812FF0D64</associatedAddress>
</extras>
- </location>
- <location>
+ </record>
+ <record type="location">
+ <short-name>pluto</short-name>
<uid>pluto</uid>
- <guid>pluto</guid>
- <name>Pluto Conference Room, Building 2, 1st Floor</name>
- </location>
- <location>
+ <full-name>Pluto Conference Room, Building 2, 1st Floor</full-name>
+ </record>
+ <record type="location">
+ <short-name>saturn</short-name>
<uid>saturn</uid>
- <guid>saturn</guid>
- <name>Saturn Conference Room, Building 2, 1st Floor</name>
- </location>
- <location>
- <uid>pretend</uid>
- <guid>06E3BDCB-9C19-485A-B14E-F146A80ADDC6</guid>
- <name>Pretend Conference Room</name>
+ <full-name>Saturn Conference Room, Building 2, 1st Floor</full-name>
+ </record>
+ <record type="location">
+ <short-name>location10</short-name>
+ <uid>location10</uid>
+ <full-name>Room 10</full-name>
+ </record>
+ <record type="location">
+ <short-name>pretend</short-name>
+ <uid>06E3BDCB-9C19-485A-B14E-F146A80ADDC6</uid>
+ <full-name>Pretend Conference Room</full-name>
<extras>
<associatedAddress>76E7ECA6-08BC-4AE7-930D-F2E7453993A5</associatedAddress>
</extras>
- </location>
- <location>
+ </record>
+ <record type="location">
+ <short-name>neptune</short-name>
<uid>neptune</uid>
- <guid>neptune</guid>
- <name>Neptune Conference Room, Building 2, 1st Floor</name>
- </location>
- <location>
+ <full-name>Neptune Conference Room, Building 2, 1st Floor</full-name>
+ </record>
+ <record type="location">
+ <short-name>Earth</short-name>
<uid>Earth</uid>
- <guid>Earth</guid>
- <name>Earth Conference Room, Building 1, 1st Floor</name>
- </location>
- <location>
+ <full-name>Earth Conference Room, Building 1, 1st Floor</full-name>
+ </record>
+ <record type="location">
+ <short-name>venus</short-name>
<uid>venus</uid>
- <guid>venus</guid>
- <name>Venus Conference Room, Building 1, 2nd Floor</name>
- </location>
- <resource>
- <uid>sharisotherresource</uid>
- <guid>CCE95217-A57B-481A-AC3D-FEC9AB6CE3A9</guid>
- <name>Shari's Other Resource</name>
- </resource>
- <resource>
- <uid>sharisresource</uid>
- <guid>C38BEE7A-36EE-478C-9DCB-CBF4612AFE65</guid>
- <name>Shari's Resource</name>
- </resource>
- <resource>
- <uid>sharisotherresource1</uid>
- <guid>0CE0BF31-5F9E-4801-A489-8C70CF287F5F</guid>
- <name>Shari's Other Resource1</name>
- </resource>
- <address>
- <uid>testaddress1</uid>
- <guid>6F9EE33B-78F6-481B-9289-3D0812FF0D64</guid>
- <name>Test Address One</name>
+ <full-name>Venus Conference Room, Building 1, 2nd Floor</full-name>
+ </record>
+ <record type="resource">
+ <short-name>sharisotherresource</short-name>
+ <uid>CCE95217-A57B-481A-AC3D-FEC9AB6CE3A9</uid>
+ <full-name>Shari's Other Resource</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource15</short-name>
+ <uid>resource15</uid>
+ <full-name>Resource 15</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource14</short-name>
+ <uid>resource14</uid>
+ <full-name>Resource 14</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource17</short-name>
+ <uid>resource17</uid>
+ <full-name>Resource 17</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource16</short-name>
+ <uid>resource16</uid>
+ <full-name>Resource 16</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource11</short-name>
+ <uid>resource11</uid>
+ <full-name>Resource 11</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource10</short-name>
+ <uid>resource10</uid>
+ <full-name>Resource 10</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource13</short-name>
+ <uid>resource13</uid>
+ <full-name>Resource 13</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource12</short-name>
+ <uid>resource12</uid>
+ <full-name>Resource 12</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource19</short-name>
+ <uid>resource19</uid>
+ <full-name>Resource 19</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource18</short-name>
+ <uid>resource18</uid>
+ <full-name>Resource 18</full-name>
+ </record>
+ <record type="resource">
+ <short-name>sharisresource</short-name>
+ <uid>C38BEE7A-36EE-478C-9DCB-CBF4612AFE65</uid>
+ <full-name>Shari's Resource</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource20</short-name>
+ <uid>resource20</uid>
+ <full-name>Resource 20</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource06</short-name>
+ <uid>resource06</uid>
+ <full-name>Resource 06</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource07</short-name>
+ <uid>resource07</uid>
+ <full-name>Resource 07</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource04</short-name>
+ <uid>resource04</uid>
+ <full-name>Resource 04</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource05</short-name>
+ <uid>resource05</uid>
+ <full-name>Resource 05</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource02</short-name>
+ <uid>resource02</uid>
+ <full-name>Resource 02</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource03</short-name>
+ <uid>resource03</uid>
+ <full-name>Resource 03</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource01</short-name>
+ <uid>resource01</uid>
+ <full-name>Resource 01</full-name>
+ </record>
+ <record type="resource">
+ <short-name>sharisotherresource1</short-name>
+ <uid>0CE0BF31-5F9E-4801-A489-8C70CF287F5F</uid>
+ <full-name>Shari's Other Resource1</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource08</short-name>
+ <uid>resource08</uid>
+ <full-name>Resource 08</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource09</short-name>
+ <uid>resource09</uid>
+ <full-name>Resource 09</full-name>
+ </record>
+ <record type="address">
+ <short-name>testaddress1</short-name>
+ <uid>6F9EE33B-78F6-481B-9289-3D0812FF0D64</uid>
+ <full-name>Test Address One</full-name>
<extras>
<streetAddress>20300 Stevens Creek Blvd, Cupertino, CA 95014</streetAddress>
<geo>37.322281,-122.028345</geo>
</extras>
- </address>
- <address>
- <uid>il2</uid>
- <guid>63A2F949-2D8D-4C8D-B8A5-DCF2A94610F3</guid>
- <name>IL2</name>
+ </record>
+ <record type="address">
+ <short-name>il2</short-name>
+ <uid>63A2F949-2D8D-4C8D-B8A5-DCF2A94610F3</uid>
+ <full-name>IL2</full-name>
<extras>
<streetAddress>2 Infinite Loop, Cupertino, CA 95014</streetAddress>
<geo>37.332633,-122.030502</geo>
</extras>
- </address>
- <address>
- <uid>il1</uid>
- <guid>76E7ECA6-08BC-4AE7-930D-F2E7453993A5</guid>
- <name>IL1</name>
+ </record>
+ <record type="address">
+ <short-name>il1</short-name>
+ <uid>76E7ECA6-08BC-4AE7-930D-F2E7453993A5</uid>
+ <full-name>IL1</full-name>
<extras>
<streetAddress>1 Infinite Loop, Cupertino, CA 95014</streetAddress>
<geo>37.331741,-122.030333</geo>
</extras>
- </address>
-</accounts>
+ </record>
+</directory>
Modified: CalendarServer/branches/users/sagen/move2who-3/contrib/performance/loadtest/test_sim.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/contrib/performance/loadtest/test_sim.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/contrib/performance/loadtest/test_sim.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -24,32 +24,34 @@
from twisted.internet.defer import Deferred, succeed
from twisted.trial.unittest import TestCase
-from twistedcaldav.directory.directory import DirectoryRecord
-
from contrib.performance.stats import NormalDistribution
from contrib.performance.loadtest.ical import OS_X_10_6
from contrib.performance.loadtest.profiles import Eventer, Inviter, Accepter
from contrib.performance.loadtest.population import (
SmoothRampUp, ClientType, PopulationParameters, Populator, CalendarClientSimulator,
- ProfileType, SimpleStatistics)
+ ProfileType, SimpleStatistics
+)
from contrib.performance.loadtest.sim import (
- Arrival, SimOptions, LoadSimulator, LagTrackingReactor)
+ Arrival, SimOptions, LoadSimulator, LagTrackingReactor,
+ _DirectoryRecord
+)
+
VALID_CONFIG = {
'server': 'tcp:127.0.0.1:8008',
'webadmin': {
'enabled': True,
'HTTPPort': 8080,
- },
+ },
'arrival': {
'factory': 'contrib.performance.loadtest.population.SmoothRampUp',
'params': {
'groups': 10,
'groupSize': 1,
'interval': 3,
- },
},
- }
+ },
+}
VALID_CONFIG_PLIST = writePlistToString(VALID_CONFIG)
@@ -104,8 +106,9 @@
realmName = 'stub'
def _user(self, name):
- record = DirectoryRecord(self, 'user', name, (name,))
- record.password = 'password-' + name
+ password = 'password-' + name
+ email = name + "@example.com"
+ record = _DirectoryRecord(name, password, name, email)
return record
@@ -119,10 +122,10 @@
[self._user('alice'), self._user('bob'), self._user('carol')],
Populator(None), None, None, 'http://example.org:1234/', None, None)
users = sorted([
- calsim._createUser(0)[0],
- calsim._createUser(1)[0],
- calsim._createUser(2)[0],
- ])
+ calsim._createUser(0)[0],
+ calsim._createUser(1)[0],
+ calsim._createUser(2)[0],
+ ])
self.assertEqual(['alice', 'bob', 'carol'], users)
@@ -171,8 +174,9 @@
params = PopulationParameters()
params.addClient(1, ClientType(
- BrokenClient, {'runResult': clientRunResult},
- [ProfileType(BrokenProfile, {'runResult': profileRunResult})]))
+ BrokenClient, {'runResult': clientRunResult},
+ [ProfileType(BrokenProfile, {'runResult': profileRunResult})])
+ )
sim = CalendarClientSimulator(
[self._user('alice')], Populator(None), params, None, 'http://example.com:1234/', None, None)
sim.add(1, 1)
@@ -284,8 +288,9 @@
config["accounts"] = {
"loader": "contrib.performance.loadtest.sim.recordsFromCSVFile",
"params": {
- "path": accounts.path},
- }
+ "path": accounts.path
+ },
+ }
configpath = FilePath(self.mktemp())
configpath.setContent(writePlistToString(config))
io = StringIO()
@@ -312,8 +317,9 @@
config["accounts"] = {
"loader": "contrib.performance.loadtest.sim.recordsFromCSVFile",
"params": {
- "path": ""},
- }
+ "path": ""
+ },
+ }
configpath = FilePath(self.mktemp())
configpath.setContent(writePlistToString(config))
sim = LoadSimulator.fromCommandLine(['--config', configpath.path],
@@ -406,8 +412,9 @@
section of the configuration file specified.
"""
config = FilePath(self.mktemp())
- config.setContent(writePlistToString({
- "server": "https://127.0.0.3:8432/"}))
+ config.setContent(
+ writePlistToString({"server": "https://127.0.0.3:8432/"})
+ )
sim = LoadSimulator.fromCommandLine(['--config', config.path])
self.assertEquals(sim.server, "https://127.0.0.3:8432/")
@@ -418,16 +425,18 @@
[arrival] section of the configuration file specified.
"""
config = FilePath(self.mktemp())
- config.setContent(writePlistToString({
- "arrival": {
- "factory": "contrib.performance.loadtest.population.SmoothRampUp",
- "params": {
- "groups": 10,
- "groupSize": 1,
- "interval": 3,
- },
- },
- }))
+ config.setContent(
+ writePlistToString({
+ "arrival": {
+ "factory": "contrib.performance.loadtest.population.SmoothRampUp",
+ "params": {
+ "groups": 10,
+ "groupSize": 1,
+ "interval": 3,
+ },
+ },
+ })
+ )
sim = LoadSimulator.fromCommandLine(['--config', config.path])
self.assertEquals(
sim.arrival,
@@ -461,11 +470,17 @@
section of the configuration file specified.
"""
config = FilePath(self.mktemp())
- config.setContent(writePlistToString({
- "clients": [{
+ config.setContent(
+ writePlistToString(
+ {
+ "clients": [
+ {
"software": "contrib.performance.loadtest.ical.OS_X_10_6",
- "params": {"foo": "bar"},
- "profiles": [{
+ "params": {
+ "foo": "bar"
+ },
+ "profiles": [
+ {
"params": {
"interval": 25,
"eventStartDistribution": {
@@ -473,19 +488,38 @@
"params": {
"mu": 123,
"sigma": 456,
- }}},
- "class": "contrib.performance.loadtest.profiles.Eventer"}],
+ }
+ }
+ },
+ "class": "contrib.performance.loadtest.profiles.Eventer"
+ }
+ ],
"weight": 3,
- }]}))
+ }
+ ]
+ }
+ )
+ )
sim = LoadSimulator.fromCommandLine(
['--config', config.path, '--clients', config.path]
)
expectedParameters = PopulationParameters()
expectedParameters.addClient(
- 3, ClientType(OS_X_10_6, {"foo": "bar"}, [ProfileType(Eventer, {
+ 3,
+ ClientType(
+ OS_X_10_6,
+ {"foo": "bar"},
+ [
+ ProfileType(
+ Eventer, {
"interval": 25,
- "eventStartDistribution": NormalDistribution(123, 456)})]))
+ "eventStartDistribution": NormalDistribution(123, 456)
+ }
+ )
+ ]
+ )
+ )
self.assertEquals(sim.parameters, expectedParameters)
@@ -512,9 +546,18 @@
configuration file are added to the logging system.
"""
config = FilePath(self.mktemp())
- config.setContent(writePlistToString({
- "observers": [{"type":"contrib.performance.loadtest.population.SimpleStatistics", "params":{}, }, ]
- }))
+ config.setContent(
+ writePlistToString(
+ {
+ "observers": [
+ {
+ "type": "contrib.performance.loadtest.population.SimpleStatistics",
+ "params": {},
+ },
+ ]
+ }
+ )
+ )
sim = LoadSimulator.fromCommandLine(['--config', config.path])
self.assertEquals(len(sim.observers), 1)
self.assertIsInstance(sim.observers[0], SimpleStatistics)
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/customxml.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/customxml.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/customxml.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1456,6 +1456,8 @@
ResourceType.calendarproxyread = ResourceType(Principal(), Collection(), CalendarProxyRead())
ResourceType.calendarproxywrite = ResourceType(Principal(), Collection(), CalendarProxyWrite())
+ResourceType.calendarproxyreadfor = ResourceType(Principal(), Collection(), CalendarProxyReadFor())
+ResourceType.calendarproxywritefor = ResourceType(Principal(), Collection(), CalendarProxyWriteFor())
ResourceType.timezones = ResourceType(Timezones())
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/addressbook.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/addressbook.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/addressbook.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -34,7 +34,6 @@
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twistedcaldav.config import config
-from twistedcaldav.directory.idirectory import IDirectoryService
from twistedcaldav.directory.common import CommonUIDProvisioningResource,\
uidsResourceName, CommonHomeTypeProvisioningResource
@@ -58,14 +57,14 @@
-class DirectoryAddressBookProvisioningResource (
+class DirectoryAddressBookProvisioningResource(
ReadOnlyResourceMixIn,
CalDAVComplianceMixIn,
DAVResourceWithChildrenMixin,
DAVResource,
):
def defaultAccessControlList(self):
- return config.ProvisioningResourceACL
+ return succeed(config.ProvisioningResourceACL)
def etag(self):
@@ -77,9 +76,9 @@
-class DirectoryAddressBookHomeProvisioningResource (
- DirectoryAddressBookProvisioningResource
- ):
+class DirectoryAddressBookHomeProvisioningResource(
+ DirectoryAddressBookProvisioningResource
+):
"""
Resource which provisions address book home collections as needed.
"""
@@ -93,7 +92,8 @@
super(DirectoryAddressBookHomeProvisioningResource, self).__init__()
- self.directory = IDirectoryService(directory)
+ # MOVE2WHO
+ self.directory = directory # IDirectoryService(directory)
self._url = url
self._newStore = store
@@ -103,8 +103,14 @@
#
# Create children
#
- for recordType in self.directory.recordTypes():
- self.putChild(recordType, DirectoryAddressBookHomeTypeProvisioningResource(self, recordType))
+ # ...just "users" though. If we iterate all of the directory's
+ # recordTypes, we also get the proxy sub principal types.
+ for recordTypeName in [
+ self.directory.recordTypeToOldName(r) for r in [
+ self.directory.recordType.user
+ ]
+ ]:
+ self.putChild(recordTypeName, DirectoryAddressBookHomeTypeProvisioningResource(self, r))
self.putChild(uidsResourceName, DirectoryAddressBookHomeUIDProvisioningResource(self))
@@ -114,7 +120,7 @@
def listChildren(self):
- return self.directory.recordTypes()
+ return [self.directory.recordTypeToOldName(r) for r in self.directory.recordTypes()]
def principalCollections(self):
@@ -129,12 +135,13 @@
return self.directory.principalCollection.principalForRecord(record)
+ @inlineCallbacks
def homeForDirectoryRecord(self, record, request):
- uidResource = self.getChild(uidsResourceName)
+ uidResource = yield self.getChild(uidsResourceName)
if uidResource is None:
- return None
+ returnValue(None)
else:
- return uidResource.homeResourceForRecord(record, request)
+ returnValue((yield uidResource.homeResourceForRecord(record, request)))
##
@@ -151,9 +158,9 @@
class DirectoryAddressBookHomeTypeProvisioningResource (
- CommonHomeTypeProvisioningResource,
- DirectoryAddressBookProvisioningResource
- ):
+ CommonHomeTypeProvisioningResource,
+ DirectoryAddressBookProvisioningResource
+):
"""
Resource which provisions address book home collections of a specific
record type as needed.
@@ -174,19 +181,19 @@
def url(self):
- return joinURL(self._parent.url(), self.recordType)
+ return joinURL(self._parent.url(), self.directory.recordTypeToOldName(self.recordType))
+ @inlineCallbacks
def listChildren(self):
if config.EnablePrincipalListings:
+ children = []
+ for record in (yield self.directory.listRecords(self.recordType)):
+ if record.enabledForAddressBooks:
+ for shortName in record.shortNames:
+ children.append(shortName)
- def _recordShortnameExpand():
- for record in self.directory.listRecords(self.recordType):
- if record.enabledForAddressBooks:
- for shortName in record.shortNames:
- yield shortName
-
- return _recordShortnameExpand()
+ returnValue(children)
else:
# Not a listable collection
raise HTTPError(responsecode.FORBIDDEN)
@@ -222,9 +229,9 @@
class DirectoryAddressBookHomeUIDProvisioningResource (
- CommonUIDProvisioningResource,
- DirectoryAddressBookProvisioningResource
- ):
+ CommonUIDProvisioningResource,
+ DirectoryAddressBookProvisioningResource
+):
homeResourceTypeName = 'addressbooks'
Deleted: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/aggregate.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/aggregate.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/aggregate.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1,385 +0,0 @@
-##
-# Copyright (c) 2006-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-"""
-Directory service implementation which aggregates multiple directory
-services.
-"""
-
-__all__ = [
- "AggregateDirectoryService",
- "DuplicateRecordTypeError",
-]
-
-import itertools
-from twisted.cred.error import UnauthorizedLogin
-
-from twistedcaldav.directory.idirectory import IDirectoryService
-from twistedcaldav.directory.directory import DirectoryService, DirectoryError
-from twistedcaldav.directory.directory import UnknownRecordTypeError
-from twisted.internet.defer import inlineCallbacks, returnValue
-
-class AggregateDirectoryService(DirectoryService):
- """
- L{IDirectoryService} implementation which aggregates multiple directory
- services.
-
- @ivar _recordTypes: A map of record types to L{IDirectoryService}s.
- @type _recordTypes: L{dict} mapping L{bytes} to L{IDirectoryService}
- provider.
- """
- baseGUID = "06FB225F-39E7-4D34-B1D1-29925F5E619B"
-
- def __init__(self, services, groupMembershipCache):
- super(AggregateDirectoryService, self).__init__()
-
- realmName = None
- recordTypes = {}
- self.groupMembershipCache = groupMembershipCache
-
- for service in services:
- service = IDirectoryService(service)
-
- if service.realmName != realmName:
- assert realmName is None, (
- "Aggregated directory services must have the same realm name: %r != %r\nServices: %r"
- % (service.realmName, realmName, services)
- )
- realmName = service.realmName
-
- if not hasattr(service, "recordTypePrefix"):
- service.recordTypePrefix = ""
- prefix = service.recordTypePrefix
-
- for recordType in (prefix + r for r in service.recordTypes()):
- if recordType in recordTypes:
- raise DuplicateRecordTypeError(
- "%r is in multiple services: %s, %s"
- % (recordType, recordTypes[recordType], service)
- )
- recordTypes[recordType] = service
-
- service.aggregateService = self
-
- self.realmName = realmName
- self._recordTypes = recordTypes
-
- # FIXME: This is a temporary workaround until new data store is in
- # place. During the purging of deprovisioned users' data, we need
- # to be able to look up records by uid and shortName. The purge
- # tool sticks temporary fake records in here.
- self._tmpRecords = {
- "uids" : { },
- "shortNames" : { },
- }
-
-
- def __repr__(self):
- return "<%s (%s): %r>" % (self.__class__.__name__, self.realmName, self._recordTypes)
-
-
- #
- # Define calendarHomesCollection as a property so we can set it on contained services
- #
- def _getCalendarHomesCollection(self):
- return self._calendarHomesCollection
-
-
- def _setCalendarHomesCollection(self, value):
- for service in self._recordTypes.values():
- service.calendarHomesCollection = value
- self._calendarHomesCollection = value
-
- calendarHomesCollection = property(_getCalendarHomesCollection, _setCalendarHomesCollection)
-
- #
- # Define addressBookHomesCollection as a property so we can set it on contained services
- #
- def _getAddressBookHomesCollection(self):
- return self._addressBookHomesCollection
-
-
- def _setAddressBookHomesCollection(self, value):
- for service in self._recordTypes.values():
- service.addressBookHomesCollection = value
- self._addressBookHomesCollection = value
-
- addressBookHomesCollection = property(_getAddressBookHomesCollection, _setAddressBookHomesCollection)
-
-
- def addService(self, service):
- """
- Add another service to this aggregate.
-
- @param service: the service to add
- @type service: L{IDirectoryService}
- """
- service = IDirectoryService(service)
-
- if service.realmName != self.realmName:
- assert self.realmName is None, (
- "Aggregated directory services must have the same realm name: %r != %r\nServices: %r"
- % (service.realmName, self.realmName, service)
- )
-
- if not hasattr(service, "recordTypePrefix"):
- service.recordTypePrefix = ""
- prefix = service.recordTypePrefix
-
- for recordType in (prefix + r for r in service.recordTypes()):
- if recordType in self._recordTypes:
- raise DuplicateRecordTypeError(
- "%r is in multiple services: %s, %s"
- % (recordType, self.recordTypes[recordType], service)
- )
- self._recordTypes[recordType] = service
-
- service.aggregateService = self
-
-
- def recordTypes(self):
- return set(self._recordTypes)
-
-
- def listRecords(self, recordType):
- records = self._query("listRecords", recordType)
- if records is None:
- return ()
- else:
- return records
-
-
- def recordWithShortName(self, recordType, shortName):
-
- # FIXME: These temporary records shouldn't be needed when we move
- # to the new data store API. They're currently needed when purging
- # deprovisioned users' data.
- record = self._tmpRecords["shortNames"].get(shortName, None)
- if record:
- return record
-
- return self._query("recordWithShortName", recordType, shortName)
-
-
- def recordWithUID(self, uid):
-
- # FIXME: These temporary records shouldn't be needed when we move
- # to the new data store API. They're currently needed when purging
- # deprovisioned users' data.
- record = self._tmpRecords["uids"].get(uid, None)
- if record:
- return record
-
- return self._queryAll("recordWithUID", uid)
-
- recordWithGUID = recordWithUID
-
- def recordWithAuthID(self, authID):
- return self._queryAll("recordWithAuthID", authID)
-
-
- def recordWithCalendarUserAddress(self, address):
- return self._queryAll("recordWithCalendarUserAddress", address)
-
-
- def recordWithCachedGroupsAlias(self, recordType, alias):
- """
- @param recordType: the type of the record to look up.
- @param alias: the cached-groups alias of the record to look up.
- @type alias: C{str}
-
- @return: a deferred L{IDirectoryRecord} with the given cached-groups
- alias, or C{None} if no such record is found.
- """
- service = self.serviceForRecordType(recordType)
- return service.recordWithCachedGroupsAlias(recordType, alias)
-
-
- @inlineCallbacks
- def recordsMatchingFields(self, fields, operand="or", recordType=None):
-
- if recordType:
- services = (self.serviceForRecordType(recordType),)
- else:
- services = set(self._recordTypes.values())
-
- generators = []
- for service in services:
- generator = (yield service.recordsMatchingFields(fields,
- operand=operand, recordType=recordType))
- generators.append(generator)
-
- returnValue(itertools.chain(*generators))
-
-
- @inlineCallbacks
- def recordsMatchingTokens(self, tokens, context=None):
- """
- Combine the results from the sub-services.
-
- Each token is searched for within each record's full name and email
- address; if each token is found within a record that record is returned
- in the results.
-
- If context is None, all record types are considered. If context is
- "location", only locations are considered. If context is "attendee",
- only users, groups, and resources are considered.
-
- @param tokens: The tokens to search on
- @type tokens: C{list} of C{str} (utf-8 bytes)
-
- @param context: An indication of what the end user is searching for;
- "attendee", "location", or None
- @type context: C{str}
-
- @return: a deferred sequence of L{IDirectoryRecord}s which match the
- given tokens and optional context.
- """
-
- services = set(self._recordTypes.values())
-
- generators = []
- for service in services:
- generator = (yield service.recordsMatchingTokens(tokens,
- context=context))
- generators.append(generator)
-
- returnValue(itertools.chain(*generators))
-
-
- def getGroups(self, guids):
- """
- Returns a set of group records for the list of guids passed in. For
- any group that also contains subgroups, those subgroups' records are
- also returned, and so on.
- """
- recordType = self.recordType_groups
- service = self.serviceForRecordType(recordType)
- return service.getGroups(guids)
-
-
- def serviceForRecordType(self, recordType):
- try:
- return self._recordTypes[recordType]
- except KeyError:
- raise UnknownRecordTypeError(recordType)
-
-
- def _query(self, query, recordType, *args):
- try:
- service = self.serviceForRecordType(recordType)
- except UnknownRecordTypeError:
- return None
-
- return getattr(service, query)(
- recordType[len(service.recordTypePrefix):],
- *[a[len(service.recordTypePrefix):] for a in args]
- )
-
-
- def _queryAll(self, query, *args):
- for service in self._recordTypes.values():
- try:
- record = getattr(service, query)(*args)
- except UnknownRecordTypeError:
- record = None
- if record is not None:
- return record
- else:
- return None
-
-
- def flushCaches(self):
- for service in self._recordTypes.values():
- if hasattr(service, "_initCaches"):
- service._initCaches()
-
- userRecordTypes = [DirectoryService.recordType_users]
-
- def requestAvatarId(self, credentials):
-
- if credentials.authnPrincipal:
- return credentials.authnPrincipal.record.service.requestAvatarId(credentials)
-
- raise UnauthorizedLogin("No such user: %s" % (credentials.credentials.username,))
-
-
- def getResourceInfo(self):
- results = []
- for service in self._recordTypes.values():
- for result in service.getResourceInfo():
- if result:
- results.append(result)
- return results
-
-
- def getExternalProxyAssignments(self):
- service = self.serviceForRecordType(self.recordType_locations)
- return service.getExternalProxyAssignments()
-
-
- def createRecord(self, recordType, guid=None, shortNames=(), authIDs=set(),
- fullName=None, firstName=None, lastName=None, emailAddresses=set(),
- uid=None, password=None, **kwargs):
- service = self.serviceForRecordType(recordType)
- return service.createRecord(recordType, guid=guid,
- shortNames=shortNames, authIDs=authIDs, fullName=fullName,
- firstName=firstName, lastName=lastName,
- emailAddresses=emailAddresses, uid=uid, password=password, **kwargs)
-
-
- def updateRecord(self, recordType, guid=None, shortNames=(), authIDs=set(),
- fullName=None, firstName=None, lastName=None, emailAddresses=set(),
- uid=None, password=None, **kwargs):
- service = self.serviceForRecordType(recordType)
- return service.updateRecord(recordType, guid=guid,
- shortNames=shortNames,
- authIDs=authIDs, fullName=fullName, firstName=firstName,
- lastName=lastName, emailAddresses=emailAddresses, uid=uid,
- password=password, **kwargs)
-
-
- def destroyRecord(self, recordType, guid=None):
- service = self.serviceForRecordType(recordType)
- return service.destroyRecord(recordType, guid=guid)
-
-
- def setRealm(self, realmName):
- """
- Set a new realm name for this and nested services
- """
- self.realmName = realmName
- for service in self._recordTypes.values():
- service.setRealm(realmName)
-
-
- def setPrincipalCollection(self, principalCollection):
- """
- Set the principal service that the directory relies on for doing proxy tests.
-
- @param principalService: the principal service.
- @type principalService: L{DirectoryProvisioningResource}
- """
- self.principalCollection = principalCollection
- for service in self._recordTypes.values():
- service.setPrincipalCollection(principalCollection)
-
-
-
-class DuplicateRecordTypeError(DirectoryError):
- """
- Duplicate record type.
- """
Deleted: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/appleopendirectory.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/appleopendirectory.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/appleopendirectory.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1,1584 +0,0 @@
-# -*- test-case-name: twistedcaldav.directory.test.test_opendirectory -*-
-##
-# Copyright (c) 2006-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-"""
-Apple OpenDirectory directory service implementation.
-"""
-
-__all__ = [
- "OpenDirectoryService",
- "OpenDirectoryInitError",
-]
-
-import sys
-import time
-from uuid import UUID
-
-from twisted.internet.defer import succeed, inlineCallbacks, returnValue
-from twisted.cred.credentials import UsernamePassword
-from txweb2.auth.digest import DigestedCredentials
-from twext.python.log import Logger
-
-from twistedcaldav.directory.cachingdirectory import CachingDirectoryService, \
- CachingDirectoryRecord
-from twistedcaldav.directory.directory import DirectoryService, DirectoryRecord
-from twistedcaldav.directory.directory import DirectoryError, UnknownRecordTypeError
-from twistedcaldav.directory.util import splitIntoBatches
-from twistedcaldav.directory.principal import cuAddressConverter
-
-from calendarserver.platform.darwin.od import opendirectory, dsattributes, dsquery
-
-
-
-class OpenDirectoryService(CachingDirectoryService):
- """
- OpenDirectory implementation of L{IDirectoryService}.
- """
- log = Logger()
-
- baseGUID = "891F8321-ED02-424C-BA72-89C32F215C1E"
-
- def __repr__(self):
- return "<%s %r: %r>" % (self.__class__.__name__, self.realmName, self.node)
-
-
- def __init__(self, params, odModule=None):
- """
- @param params: a dictionary containing the following keys:
-
- - node: an OpenDirectory node name to bind to.
-
- - restrictEnabledRecords: C{True} if a group in the directory is to
- be used to determine which calendar users are enabled.
-
- - restrictToGroup: C{str} guid or name of group used to restrict
- enabled users.
-
- - cacheTimeout: C{int} number of minutes before cache is
- invalidated.
-
- - negativeCache: C{False} cache the fact that a record wasn't found
- """
- defaults = {
- 'node' : '/Search',
- 'restrictEnabledRecords' : False,
- 'restrictToGroup' : '',
- 'cacheTimeout' : 1, # Minutes
- 'batchSize' : 100, # for splitting up large queries
- 'negativeCaching' : False,
- 'recordTypes' : (
- self.recordType_users,
- self.recordType_groups,
- ),
- 'augmentService' : None,
- 'groupMembershipCache' : None,
- }
- ignored = ('requireComputerRecord',)
- params = self.getParams(params, defaults, ignored)
-
- self._recordTypes = params['recordTypes']
-
- super(OpenDirectoryService, self).__init__(params['cacheTimeout'],
- params['negativeCaching'])
-
- if odModule is None:
- odModule = opendirectory
- self.odModule = odModule
-
- try:
- directory = self.odModule.odInit(params['node'])
- except self.odModule.ODError, e:
- self.log.error("OpenDirectory (node=%s) Initialization error: %s" % (params['node'], e))
- raise
-
- self.augmentService = params['augmentService']
- self.groupMembershipCache = params['groupMembershipCache']
- self.realmName = params['node']
- self.directory = directory
- self.node = params['node']
- self.restrictEnabledRecords = params['restrictEnabledRecords']
- self.restrictToGroup = params['restrictToGroup']
- self.batchSize = params['batchSize']
- try:
- UUID(self.restrictToGroup)
- except:
- self.restrictToGUID = False
- else:
- self.restrictToGUID = True
- self.restrictedTimestamp = 0
-
- # Set up the /Local/Default node if it's in the search path so we can
- # send custom queries to it
- self.localNode = None
- try:
- if self.node == "/Search":
- result = self.odModule.getNodeAttributes(self.directory, "/Search",
- (dsattributes.kDS1AttrSearchPath,))
- if "/Local/Default" in result[dsattributes.kDS1AttrSearchPath]:
- try:
- self.localNode = self.odModule.odInit("/Local/Default")
- except self.odModule.ODError, e:
- self.log.error("Failed to open /Local/Default): %s" % (e,))
- except AttributeError:
- pass
-
-
- @property
- def restrictedGUIDs(self):
- """
- Look up (and cache) the set of guids that are members of the
- restrictToGroup. If restrictToGroup is not set, return None to
- indicate there are no group restrictions.
- """
- if self.restrictEnabledRecords:
- if time.time() - self.restrictedTimestamp > self.cacheTimeout:
- attributeToMatch = dsattributes.kDS1AttrGeneratedUID if self.restrictToGUID else dsattributes.kDSNAttrRecordName
- valueToMatch = self.restrictToGroup
- self.log.debug("Doing restricted group membership check")
- self.log.debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
- self.directory,
- attributeToMatch,
- valueToMatch,
- dsattributes.eDSExact,
- False,
- dsattributes.kDSStdRecordTypeGroups,
- [dsattributes.kDSNAttrGroupMembers, dsattributes.kDSNAttrNestedGroups, ],
- ))
- results = self.odModule.queryRecordsWithAttribute_list(
- self.directory,
- attributeToMatch,
- valueToMatch,
- dsattributes.eDSExact,
- False,
- dsattributes.kDSStdRecordTypeGroups,
- [dsattributes.kDSNAttrGroupMembers, dsattributes.kDSNAttrNestedGroups, ],
- )
-
- if len(results) == 1:
- members = results[0][1].get(dsattributes.kDSNAttrGroupMembers, [])
- nestedGroups = results[0][1].get(dsattributes.kDSNAttrNestedGroups, [])
- else:
- members = []
- nestedGroups = []
- self._cachedRestrictedGUIDs = set(self._expandGroupMembership(members, nestedGroups, returnGroups=True))
- self.log.debug("Got %d restricted group members" % (len(self._cachedRestrictedGUIDs),))
- self.restrictedTimestamp = time.time()
- return self._cachedRestrictedGUIDs
- else:
- # No restrictions
- return None
-
-
- def __cmp__(self, other):
- if not isinstance(other, DirectoryRecord):
- return super(DirectoryRecord, self).__eq__(other)
-
- for attr in ("directory", "node"):
- diff = cmp(getattr(self, attr), getattr(other, attr))
- if diff != 0:
- return diff
- return 0
-
-
- def __hash__(self):
- h = hash(self.__class__.__name__)
- for attr in ("node",):
- h = (h + hash(getattr(self, attr))) & sys.maxint
- return h
-
-
- def _expandGroupMembership(self, members, nestedGroups, processedGUIDs=None, returnGroups=False):
-
- if processedGUIDs is None:
- processedGUIDs = set()
-
- if isinstance(members, str):
- members = [members]
-
- if isinstance(nestedGroups, str):
- nestedGroups = [nestedGroups]
-
- for memberGUID in members:
- if memberGUID not in processedGUIDs:
- processedGUIDs.add(memberGUID)
- yield memberGUID
-
- for groupGUID in nestedGroups:
- if groupGUID in processedGUIDs:
- continue
-
- self.log.debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
- self.directory,
- dsattributes.kDS1AttrGeneratedUID,
- groupGUID,
- dsattributes.eDSExact,
- False,
- dsattributes.kDSStdRecordTypeGroups,
- [dsattributes.kDSNAttrGroupMembers, dsattributes.kDSNAttrNestedGroups]
- ))
- result = self.odModule.queryRecordsWithAttribute_list(
- self.directory,
- dsattributes.kDS1AttrGeneratedUID,
- groupGUID,
- dsattributes.eDSExact,
- False,
- dsattributes.kDSStdRecordTypeGroups,
- [dsattributes.kDSNAttrGroupMembers, dsattributes.kDSNAttrNestedGroups]
- )
-
- if not result:
- self.log.error("Couldn't find group %s when trying to expand nested groups."
- % (groupGUID,))
- continue
-
- group = result[0][1]
-
- processedGUIDs.add(groupGUID)
- if returnGroups:
- yield groupGUID
-
- for GUID in self._expandGroupMembership(
- group.get(dsattributes.kDSNAttrGroupMembers, []),
- group.get(dsattributes.kDSNAttrNestedGroups, []),
- processedGUIDs,
- returnGroups,
- ):
- yield GUID
-
-
- def recordTypes(self):
- return self._recordTypes
-
-
- def listRecords(self, recordType):
- """
- Retrieve all the records of recordType from the directory, but for
- expediency don't index them or cache them locally, nor in memcached.
- """
-
- records = []
-
- attrs = [
- dsattributes.kDS1AttrGeneratedUID,
- dsattributes.kDSNAttrRecordName,
- dsattributes.kDS1AttrDistinguishedName,
- ]
-
- if recordType == DirectoryService.recordType_users:
- ODRecordType = self._toODRecordTypes[recordType]
-
- elif recordType in (
- DirectoryService.recordType_resources,
- DirectoryService.recordType_locations,
- ):
- attrs.append(dsattributes.kDSNAttrResourceInfo)
- ODRecordType = self._toODRecordTypes[recordType]
-
- elif recordType == DirectoryService.recordType_groups:
- attrs.append(dsattributes.kDSNAttrGroupMembers)
- attrs.append(dsattributes.kDSNAttrNestedGroups)
- ODRecordType = dsattributes.kDSStdRecordTypeGroups
-
- self.log.debug("Querying OD for all %s records" % (recordType,))
- results = self.odModule.listAllRecordsWithAttributes_list(
- self.directory, ODRecordType, attrs)
- self.log.debug("Retrieved %d %s records" % (len(results), recordType,))
-
- for key, value in results:
- recordGUID = value.get(dsattributes.kDS1AttrGeneratedUID)
- if not recordGUID:
- self.log.warn("Ignoring record missing GUID: %s %s" %
- (key, value,))
- continue
-
- # Skip if group restriction is in place and guid is not
- # a member (but don't skip any groups)
- if (recordType != self.recordType_groups and
- self.restrictedGUIDs is not None):
- if str(recordGUID) not in self.restrictedGUIDs:
- continue
-
- recordShortNames = self._uniqueTupleFromAttribute(
- value.get(dsattributes.kDSNAttrRecordName))
- recordFullName = value.get(
- dsattributes.kDS1AttrDistinguishedName)
-
- proxyGUIDs = ()
- readOnlyProxyGUIDs = ()
-
- if recordType in (
- DirectoryService.recordType_resources,
- DirectoryService.recordType_locations,
- ):
- resourceInfo = value.get(dsattributes.kDSNAttrResourceInfo)
- if resourceInfo is not None:
- if type(resourceInfo) is not str:
- resourceInfo = resourceInfo[0]
- try:
- (
- _ignore_autoSchedule,
- proxy,
- readOnlyProxy
- ) = self.parseResourceInfo(
- resourceInfo,
- recordGUID,
- recordType,
- recordShortNames[0]
- )
- except ValueError:
- continue
- if proxy:
- proxyGUIDs = (proxy,)
- if readOnlyProxy:
- readOnlyProxyGUIDs = (readOnlyProxy,)
-
- # Special case for groups, which have members.
- if recordType == self.recordType_groups:
- memberGUIDs = value.get(dsattributes.kDSNAttrGroupMembers)
- if memberGUIDs is None:
- memberGUIDs = ()
- elif type(memberGUIDs) is str:
- memberGUIDs = (memberGUIDs,)
- nestedGUIDs = value.get(dsattributes.kDSNAttrNestedGroups)
- if nestedGUIDs:
- if type(nestedGUIDs) is str:
- nestedGUIDs = (nestedGUIDs,)
- memberGUIDs += tuple(nestedGUIDs)
- else:
- nestedGUIDs = ()
- else:
- memberGUIDs = ()
- nestedGUIDs = ()
-
- record = OpenDirectoryRecord(
- service=self,
- recordType=recordType,
- guid=recordGUID,
- nodeName="",
- shortNames=recordShortNames,
- authIDs=(),
- fullName=recordFullName,
- firstName="",
- lastName="",
- emailAddresses="",
- memberGUIDs=memberGUIDs,
- nestedGUIDs=nestedGUIDs,
- extProxies=proxyGUIDs,
- extReadOnlyProxies=readOnlyProxyGUIDs,
- )
-
- # (Copied from below)
- # Look up augment information
- # TODO: this needs to be deferred but for now we hard code
- # the deferred result because we know it is completing
- # immediately.
- if self.augmentService is not None:
- d = self.augmentService.getAugmentRecord(record.guid,
- recordType)
- d.addCallback(lambda x: record.addAugmentInformation(x))
- records.append(record)
-
- self.log.debug("ListRecords returning %d %s records" % (len(records),
- recordType))
-
- return records
-
-
- def groupsForGUID(self, guid):
-
- attrs = [
- dsattributes.kDS1AttrGeneratedUID,
- ]
-
- recordType = dsattributes.kDSStdRecordTypeGroups
-
- guids = set()
-
- self.log.debug("Looking up which groups %s is a member of" % (guid,))
- try:
- self.log.debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
- self.directory,
- dsattributes.kDSNAttrGroupMembers,
- guid,
- dsattributes.eDSExact,
- False,
- recordType,
- attrs,
- ))
- results = self.odModule.queryRecordsWithAttribute_list(
- self.directory,
- dsattributes.kDSNAttrGroupMembers,
- guid,
- dsattributes.eDSExact,
- False,
- recordType,
- attrs,
- )
- except self.odModule.ODError, ex:
- self.log.error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
- raise
-
- for (_ignore_recordShortName, value) in results:
-
- # Now get useful record info.
- recordGUID = value.get(dsattributes.kDS1AttrGeneratedUID)
- if recordGUID:
- guids.add(recordGUID)
-
- try:
- self.log.debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
- self.directory,
- dsattributes.kDSNAttrNestedGroups,
- guid,
- dsattributes.eDSExact,
- False,
- recordType,
- attrs,
- ))
- results = self.odModule.queryRecordsWithAttribute_list(
- self.directory,
- dsattributes.kDSNAttrNestedGroups,
- guid,
- dsattributes.eDSExact,
- False,
- recordType,
- attrs,
- )
- except self.odModule.ODError, ex:
- self.log.error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
- raise
-
- for (_ignore_recordShortName, value) in results:
-
- # Now get useful record info.
- recordGUID = value.get(dsattributes.kDS1AttrGeneratedUID)
- if recordGUID:
- guids.add(recordGUID)
-
- self.log.debug("%s is a member of %d groups" % (guid, len(guids)))
-
- return guids
-
- _ODFields = {
- 'fullName' : {
- 'odField' : dsattributes.kDS1AttrDistinguishedName,
- 'appliesTo' : set([
- dsattributes.kDSStdRecordTypeUsers,
- dsattributes.kDSStdRecordTypeGroups,
- dsattributes.kDSStdRecordTypeResources,
- dsattributes.kDSStdRecordTypePlaces,
- ]),
- },
- 'firstName' : {
- 'odField' : dsattributes.kDS1AttrFirstName,
- 'appliesTo' : set([
- dsattributes.kDSStdRecordTypeUsers,
- ]),
- },
- 'lastName' : {
- 'odField' : dsattributes.kDS1AttrLastName,
- 'appliesTo' : set([
- dsattributes.kDSStdRecordTypeUsers,
- ]),
- },
- 'emailAddresses' : {
- 'odField' : dsattributes.kDSNAttrEMailAddress,
- 'appliesTo' : set([
- dsattributes.kDSStdRecordTypeUsers,
- dsattributes.kDSStdRecordTypeGroups,
- ]),
- },
- 'recordName' : {
- 'odField' : dsattributes.kDSNAttrRecordName,
- 'appliesTo' : set([
- dsattributes.kDSStdRecordTypeUsers,
- dsattributes.kDSStdRecordTypeGroups,
- dsattributes.kDSStdRecordTypeResources,
- dsattributes.kDSStdRecordTypePlaces,
- ]),
- },
- 'guid' : {
- 'odField' : dsattributes.kDS1AttrGeneratedUID,
- 'appliesTo' : set([
- dsattributes.kDSStdRecordTypeUsers,
- dsattributes.kDSStdRecordTypeGroups,
- dsattributes.kDSStdRecordTypeResources,
- dsattributes.kDSStdRecordTypePlaces,
- ]),
- },
- }
-
- _toODRecordTypes = {
- DirectoryService.recordType_users :
- dsattributes.kDSStdRecordTypeUsers,
- DirectoryService.recordType_groups :
- dsattributes.kDSStdRecordTypeGroups,
- DirectoryService.recordType_resources :
- dsattributes.kDSStdRecordTypeResources,
- DirectoryService.recordType_locations :
- dsattributes.kDSStdRecordTypePlaces,
- }
-
- _fromODRecordTypes = dict([(b, a) for a, b in _toODRecordTypes.iteritems()])
-
- def _uniqueTupleFromAttribute(self, attribute):
- if attribute:
- if isinstance(attribute, str):
- return (attribute,)
- else:
- s = set()
- return tuple([(s.add(x), x)[1] for x in attribute if x not in s])
- else:
- return ()
-
-
- def _setFromAttribute(self, attribute, lower=False):
- if attribute:
- if isinstance(attribute, str):
- return set((attribute.lower() if lower else attribute,))
- else:
- return set([item.lower() if lower else item for item in attribute])
- else:
- return ()
-
-
- def recordsMatchingTokens(self, tokens, context=None, lookupMethod=None):
- """
- @param tokens: The tokens to search on
- @type tokens: C{list} of C{str} (utf-8 bytes)
- @param context: An indication of what the end user is searching
- for; "attendee", "location", or None
- @type context: C{str}
- @return: a deferred sequence of L{IDirectoryRecord}s which
- match the given tokens and optional context.
-
- Each token is searched for within each record's full name and
- email address; if each token is found within a record that
- record is returned in the results.
-
- If context is None, all record types are considered. If
- context is "location", only locations are considered. If
- context is "attendee", only users, groups, and resources
- are considered.
- """
-
- if lookupMethod is None:
- lookupMethod = self.odModule.queryRecordsWithAttributes_list
-
- def collectResults(results):
- self.log.debug("Got back %d records from OD" % (len(results),))
- for _ignore_key, value in results:
- # self.log.debug("OD result: {key} {value}", key=key, value=value)
- try:
- recordNodeName = value.get(
- dsattributes.kDSNAttrMetaNodeLocation)
- recordShortNames = self._uniqueTupleFromAttribute(
- value.get(dsattributes.kDSNAttrRecordName))
-
- recordGUID = value.get(dsattributes.kDS1AttrGeneratedUID)
-
- recordType = value.get(dsattributes.kDSNAttrRecordType)
- if isinstance(recordType, list):
- recordType = recordType[0]
- if not recordType:
- continue
- recordType = self._fromODRecordTypes[recordType]
-
- # Skip if group restriction is in place and guid is not
- # a member (but don't skip any groups)
- if (recordType != self.recordType_groups and
- self.restrictedGUIDs is not None):
- if str(recordGUID) not in self.restrictedGUIDs:
- continue
-
- recordAuthIDs = self._setFromAttribute(
- value.get(dsattributes.kDSNAttrAltSecurityIdentities))
- recordFullName = value.get(
- dsattributes.kDS1AttrDistinguishedName)
- recordFirstName = value.get(dsattributes.kDS1AttrFirstName)
- recordLastName = value.get(dsattributes.kDS1AttrLastName)
- recordEmailAddresses = self._setFromAttribute(
- value.get(dsattributes.kDSNAttrEMailAddress),
- lower=True)
-
- # Special case for groups, which have members.
- if recordType == self.recordType_groups:
- memberGUIDs = value.get(dsattributes.kDSNAttrGroupMembers)
- if memberGUIDs is None:
- memberGUIDs = ()
- elif type(memberGUIDs) is str:
- memberGUIDs = (memberGUIDs,)
- nestedGUIDs = value.get(dsattributes.kDSNAttrNestedGroups)
- if nestedGUIDs:
- if type(nestedGUIDs) is str:
- nestedGUIDs = (nestedGUIDs,)
- memberGUIDs += tuple(nestedGUIDs)
- else:
- nestedGUIDs = ()
- else:
- nestedGUIDs = ()
- memberGUIDs = ()
-
- # Create records but don't store them in our index or
- # send them to memcached, because these are transient,
- # existing only so we can create principal resource
- # objects that are used to generate the REPORT result.
-
- record = OpenDirectoryRecord(
- service=self,
- recordType=recordType,
- guid=recordGUID,
- nodeName=recordNodeName,
- shortNames=recordShortNames,
- authIDs=recordAuthIDs,
- fullName=recordFullName,
- firstName=recordFirstName,
- lastName=recordLastName,
- emailAddresses=recordEmailAddresses,
- memberGUIDs=memberGUIDs,
- nestedGUIDs=nestedGUIDs,
- extProxies=(),
- extReadOnlyProxies=(),
- )
-
- # (Copied from below)
- # Look up augment information
- # TODO: this needs to be deferred but for now we hard code
- # the deferred result because we know it is completing
- # immediately.
- if self.augmentService is not None:
- d = self.augmentService.getAugmentRecord(record.guid,
- recordType)
- d.addCallback(lambda x: record.addAugmentInformation(x))
-
- yield record
-
- except KeyError:
- pass
-
-
- def multiQuery(directory, queries, recordTypes, attrs):
- byGUID = {}
- sets = []
-
- caseInsensitive = True
- for compound in queries:
- compound = compound.generate()
-
- try:
- startTime = time.time()
- queryResults = lookupMethod(
- directory,
- compound,
- caseInsensitive,
- recordTypes,
- attrs,
- )
- totalTime = time.time() - startTime
-
- newSet = set()
- for recordName, data in queryResults:
- guid = data.get(dsattributes.kDS1AttrGeneratedUID, None)
- if guid:
- byGUID[guid] = (recordName, data)
- newSet.add(guid)
-
- self.log.debug("Attendee OD query: Types %s, Query %s, %.2f sec, %d results" %
- (recordTypes, compound, totalTime, len(queryResults)))
- sets.append(newSet)
-
- except self.odModule.ODError, e:
- self.log.error("Ignoring OD Error: %d %s" %
- (e.message[1], e.message[0]))
- continue
-
- results = []
- for guid in set.intersection(*sets):
- recordName, data = byGUID.get(guid, None)
- if data is not None:
- results.append((data[dsattributes.kDSNAttrRecordName], data))
- return results
-
- localQueries = buildLocalQueriesFromTokens(tokens, self._ODFields)
- nestedQuery = buildNestedQueryFromTokens(tokens, self._ODFields)
-
- # Starting with the record types corresponding to the context...
- recordTypes = self.recordTypesForSearchContext(context)
- # ...limit to the types this service supports...
- recordTypes = [r for r in recordTypes if r in self.recordTypes()]
- # ...and map those to OD representations...
- recordTypes = [self._toODRecordTypes[r] for r in recordTypes]
-
- if recordTypes:
- # Perform the complex/nested query. If there was more than one
- # token, this won't match anything in /Local, therefore we run
- # the un-nested queries below and AND the results ourselves in
- # multiQuery.
- results = multiQuery(
- self.directory,
- [nestedQuery],
- recordTypes,
- [
- dsattributes.kDS1AttrGeneratedUID,
- dsattributes.kDSNAttrRecordName,
- dsattributes.kDSNAttrAltSecurityIdentities,
- dsattributes.kDSNAttrRecordType,
- dsattributes.kDS1AttrDistinguishedName,
- dsattributes.kDS1AttrFirstName,
- dsattributes.kDS1AttrLastName,
- dsattributes.kDSNAttrEMailAddress,
- dsattributes.kDSNAttrMetaNodeLocation,
- dsattributes.kDSNAttrGroupMembers,
- dsattributes.kDSNAttrNestedGroups,
- ]
- )
- if self.localNode is not None and len(tokens) > 1:
- # /Local is in our search path and the complex query above
- # would not have matched anything in /Local. So now run
- # the un-nested queries.
- results.extend(
- multiQuery(
- self.localNode,
- localQueries,
- recordTypes,
- [
- dsattributes.kDS1AttrGeneratedUID,
- dsattributes.kDSNAttrRecordName,
- dsattributes.kDSNAttrAltSecurityIdentities,
- dsattributes.kDSNAttrRecordType,
- dsattributes.kDS1AttrDistinguishedName,
- dsattributes.kDS1AttrFirstName,
- dsattributes.kDS1AttrLastName,
- dsattributes.kDSNAttrEMailAddress,
- dsattributes.kDSNAttrMetaNodeLocation,
- dsattributes.kDSNAttrGroupMembers,
- dsattributes.kDSNAttrNestedGroups,
- ]
- )
- )
- return succeed(collectResults(results))
- else:
- return succeed([])
-
-
- def recordsMatchingFields(self, fields, operand="or", recordType=None,
- lookupMethod=None):
-
- if lookupMethod is None:
- lookupMethod = self.odModule.queryRecordsWithAttribute_list
-
- # Note that OD applies case-sensitivity globally across the entire
- # query, not per expression, so the current code uses whatever is
- # specified in the last field in the fields list
-
- def collectResults(results):
- self.log.debug("Got back %d records from OD" % (len(results),))
- for _ignore_key, value in results:
- # self.log.debug("OD result: {key} {value}", key=key, value=value)
- try:
- recordNodeName = value.get(
- dsattributes.kDSNAttrMetaNodeLocation)
- recordShortNames = self._uniqueTupleFromAttribute(
- value.get(dsattributes.kDSNAttrRecordName))
-
- recordGUID = value.get(dsattributes.kDS1AttrGeneratedUID)
-
- recordType = value.get(dsattributes.kDSNAttrRecordType)
- if isinstance(recordType, list):
- recordType = recordType[0]
- if not recordType:
- continue
- recordType = self._fromODRecordTypes[recordType]
-
- # Skip if group restriction is in place and guid is not
- # a member (but don't skip any groups)
- if (recordType != self.recordType_groups and
- self.restrictedGUIDs is not None):
- if str(recordGUID) not in self.restrictedGUIDs:
- continue
-
- recordAuthIDs = self._setFromAttribute(
- value.get(dsattributes.kDSNAttrAltSecurityIdentities))
- recordFullName = value.get(
- dsattributes.kDS1AttrDistinguishedName)
- recordFirstName = value.get(dsattributes.kDS1AttrFirstName)
- recordLastName = value.get(dsattributes.kDS1AttrLastName)
- recordEmailAddresses = self._setFromAttribute(
- value.get(dsattributes.kDSNAttrEMailAddress),
- lower=True)
-
- # Special case for groups, which have members.
- if recordType == self.recordType_groups:
- memberGUIDs = value.get(dsattributes.kDSNAttrGroupMembers)
- if memberGUIDs is None:
- memberGUIDs = ()
- elif type(memberGUIDs) is str:
- memberGUIDs = (memberGUIDs,)
- nestedGUIDs = value.get(dsattributes.kDSNAttrNestedGroups)
- if nestedGUIDs:
- if type(nestedGUIDs) is str:
- nestedGUIDs = (nestedGUIDs,)
- memberGUIDs += tuple(nestedGUIDs)
- else:
- nestedGUIDs = ()
- else:
- nestedGUIDs = ()
- memberGUIDs = ()
-
- # Create records but don't store them in our index or
- # send them to memcached, because these are transient,
- # existing only so we can create principal resource
- # objects that are used to generate the REPORT result.
-
- record = OpenDirectoryRecord(
- service=self,
- recordType=recordType,
- guid=recordGUID,
- nodeName=recordNodeName,
- shortNames=recordShortNames,
- authIDs=recordAuthIDs,
- fullName=recordFullName,
- firstName=recordFirstName,
- lastName=recordLastName,
- emailAddresses=recordEmailAddresses,
- memberGUIDs=memberGUIDs,
- nestedGUIDs=nestedGUIDs,
- extProxies=(),
- extReadOnlyProxies=(),
- )
-
- # (Copied from below)
- # Look up augment information
- # TODO: this needs to be deferred but for now we hard code
- # the deferred result because we know it is completing
- # immediately.
- if self.augmentService is not None:
- d = self.augmentService.getAugmentRecord(record.guid,
- recordType)
- d.addCallback(lambda x: record.addAugmentInformation(x))
-
- yield record
-
- except KeyError:
- pass
-
-
- def multiQuery(directory, queries, attrs, operand):
- byGUID = {}
- sets = []
-
- for query, recordTypes in queries.iteritems():
- ODField, value, caseless, matchType = query
- if matchType == "starts-with":
- comparison = dsattributes.eDSStartsWith
- elif matchType == "contains":
- comparison = dsattributes.eDSContains
- else:
- comparison = dsattributes.eDSExact
-
- self.log.debug("Calling OD: Types %s, Field %s, Value %s, Match %s, Caseless %s" %
- (recordTypes, ODField, value, matchType, caseless))
-
- try:
- queryResults = lookupMethod(
- directory,
- ODField,
- value,
- comparison,
- caseless,
- recordTypes,
- attrs,
- )
-
- if operand == dsquery.expression.OR:
- for recordName, data in queryResults:
- guid = data.get(dsattributes.kDS1AttrGeneratedUID, None)
- if guid:
- byGUID[guid] = (recordName, data)
- else: # AND
- newSet = set()
- for recordName, data in queryResults:
- guid = data.get(dsattributes.kDS1AttrGeneratedUID, None)
- if guid:
- byGUID[guid] = (recordName, data)
- newSet.add(guid)
-
- sets.append(newSet)
-
- except self.odModule.ODError, e:
- self.log.error("Ignoring OD Error: %d %s" %
- (e.message[1], e.message[0]))
- continue
-
- if operand == dsquery.expression.OR:
- return byGUID.values()
-
- else:
- results = []
- for guid in set.intersection(*sets):
- recordName, data = byGUID.get(guid, None)
- if data is not None:
- results.append((data[dsattributes.kDSNAttrRecordName], data))
- return results
-
- operand = (dsquery.expression.OR if operand == "or"
- else dsquery.expression.AND)
-
- if recordType is None:
- # The client is looking for records in any of the four types
- recordTypes = set(self._toODRecordTypes.values())
- else:
- # The client is after only one recordType
- recordTypes = [self._toODRecordTypes[recordType]]
-
- queries = buildQueries(recordTypes, fields, self._ODFields)
-
- results = multiQuery(
- self.directory,
- queries,
- [
- dsattributes.kDS1AttrGeneratedUID,
- dsattributes.kDSNAttrRecordName,
- dsattributes.kDSNAttrAltSecurityIdentities,
- dsattributes.kDSNAttrRecordType,
- dsattributes.kDS1AttrDistinguishedName,
- dsattributes.kDS1AttrFirstName,
- dsattributes.kDS1AttrLastName,
- dsattributes.kDSNAttrEMailAddress,
- dsattributes.kDSNAttrMetaNodeLocation,
- dsattributes.kDSNAttrGroupMembers,
- dsattributes.kDSNAttrNestedGroups,
- ],
- operand
- )
- return succeed(collectResults(results))
-
-
- def queryDirectory(self, recordTypes, indexType, indexKey,
- lookupMethod=None):
-
- if lookupMethod is None:
- lookupMethod = self.odModule.queryRecordsWithAttribute_list
-
- origIndexKey = indexKey
- if indexType == self.INDEX_TYPE_CUA:
- # The directory doesn't contain CUAs, so we need to convert
- # the CUA to the appropriate field name and value:
- queryattr, indexKey = cuAddressConverter(indexKey)
- # queryattr will be one of:
- # guid, emailAddresses, or recordName
- # ...which will need to be mapped to DS
- queryattr = self._ODFields[queryattr]['odField']
-
- else:
- queryattr = {
- self.INDEX_TYPE_SHORTNAME : dsattributes.kDSNAttrRecordName,
- self.INDEX_TYPE_GUID : dsattributes.kDS1AttrGeneratedUID,
- self.INDEX_TYPE_AUTHID : dsattributes.kDSNAttrAltSecurityIdentities,
- }.get(indexType)
- assert queryattr is not None, "Invalid type for record faulting query"
- # Make all OD queries case insensitive
- caseInsensitive = True
-
- results = []
- for recordType in recordTypes:
-
- attrs = [
- dsattributes.kDS1AttrGeneratedUID,
- dsattributes.kDSNAttrRecordName,
- dsattributes.kDSNAttrAltSecurityIdentities,
- dsattributes.kDSNAttrRecordType,
- dsattributes.kDS1AttrDistinguishedName,
- dsattributes.kDS1AttrFirstName,
- dsattributes.kDS1AttrLastName,
- dsattributes.kDSNAttrEMailAddress,
- dsattributes.kDSNAttrMetaNodeLocation,
- ]
-
- if recordType == DirectoryService.recordType_users:
- listRecordTypes = [self._toODRecordTypes[recordType]]
-
- elif recordType in (
- DirectoryService.recordType_resources,
- DirectoryService.recordType_locations,
- ):
- if queryattr == dsattributes.kDSNAttrEMailAddress:
- continue
-
- listRecordTypes = [self._toODRecordTypes[recordType]]
-
- elif recordType == DirectoryService.recordType_groups:
-
- if queryattr == dsattributes.kDSNAttrEMailAddress:
- continue
-
- listRecordTypes = [dsattributes.kDSStdRecordTypeGroups]
- attrs.append(dsattributes.kDSNAttrGroupMembers)
- attrs.append(dsattributes.kDSNAttrNestedGroups)
-
- else:
- raise UnknownRecordTypeError("Unknown OpenDirectory record type: %s" % (recordType))
-
- # Because we're getting transient OD error -14987, try 3 times:
- for _ignore in xrange(3):
- try:
- self.log.debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
- self.directory,
- queryattr,
- indexKey,
- dsattributes.eDSExact,
- caseInsensitive,
- listRecordTypes,
- attrs,
- ))
- lookedUp = lookupMethod(
- self.directory,
- queryattr,
- indexKey,
- dsattributes.eDSExact,
- caseInsensitive,
- listRecordTypes,
- attrs,
- )
- results.extend(lookedUp)
-
- except self.odModule.ODError, ex:
- if ex.message[1] == -14987:
- # Fall through and retry
- self.log.error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
- elif ex.message[1] == -14140 or ex.message[1] == -14200:
- # Unsupported attribute on record - don't fail
- return
- else:
- self.log.error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
- raise
- else:
- # Success, so break the retry loop
- break
-
- self.log.debug("opendirectory.queryRecordsWithAttribute_list matched records: %s" % (len(results),))
-
- enabledRecords = []
- disabledRecords = []
-
- for (recordShortName, value) in results:
-
- # Now get useful record info.
- recordGUID = value.get(dsattributes.kDS1AttrGeneratedUID)
- recordShortNames = self._uniqueTupleFromAttribute(value.get(dsattributes.kDSNAttrRecordName))
- recordType = value.get(dsattributes.kDSNAttrRecordType)
- if isinstance(recordType, list):
- recordType = recordType[0]
- recordAuthIDs = self._setFromAttribute(value.get(dsattributes.kDSNAttrAltSecurityIdentities))
- recordFullName = value.get(dsattributes.kDS1AttrDistinguishedName)
- recordFirstName = value.get(dsattributes.kDS1AttrFirstName)
- recordLastName = value.get(dsattributes.kDS1AttrLastName)
- recordEmailAddresses = self._setFromAttribute(value.get(dsattributes.kDSNAttrEMailAddress), lower=True)
- recordNodeName = value.get(dsattributes.kDSNAttrMetaNodeLocation)
-
- if not recordType:
- self.log.debug("Record (unknown)%s in node %s has no recordType; ignoring."
- % (recordShortName, recordNodeName))
- continue
-
- recordType = self._fromODRecordTypes[recordType]
-
- if not recordGUID:
- self.log.debug("Record (%s)%s in node %s has no GUID; ignoring."
- % (recordType, recordShortName, recordNodeName))
- continue
-
- if recordGUID.lower().startswith("ffffeeee-dddd-cccc-bbbb-aaaa"):
- self.log.debug("Ignoring system record (%s)%s in node %s."
- % (recordType, recordShortName, recordNodeName))
- continue
-
- # If restrictToGroup is in effect, all guids which are not a member
- # of that group are disabled (overriding the augments db).
- if (self.restrictedGUIDs is not None):
- unrestricted = recordGUID in self.restrictedGUIDs
- else:
- unrestricted = True
-
- # Special case for groups, which have members.
- if recordType == self.recordType_groups:
- memberGUIDs = value.get(dsattributes.kDSNAttrGroupMembers)
- if memberGUIDs is None:
- memberGUIDs = ()
- elif type(memberGUIDs) is str:
- memberGUIDs = (memberGUIDs,)
- nestedGUIDs = value.get(dsattributes.kDSNAttrNestedGroups)
- if nestedGUIDs:
- if type(nestedGUIDs) is str:
- nestedGUIDs = (nestedGUIDs,)
- memberGUIDs += tuple(nestedGUIDs)
- else:
- nestedGUIDs = ()
- else:
- memberGUIDs = ()
- nestedGUIDs = ()
-
- # Special case for resources and locations
- autoSchedule = False
- proxyGUIDs = ()
- readOnlyProxyGUIDs = ()
- if recordType in (DirectoryService.recordType_resources, DirectoryService.recordType_locations):
- resourceInfo = value.get(dsattributes.kDSNAttrResourceInfo)
- if resourceInfo is not None:
- if type(resourceInfo) is not str:
- resourceInfo = resourceInfo[0]
- try:
- autoSchedule, proxy, read_only_proxy = self.parseResourceInfo(resourceInfo, recordGUID, recordType, recordShortName)
- except ValueError:
- continue
- if proxy:
- proxyGUIDs = (proxy,)
- if read_only_proxy:
- readOnlyProxyGUIDs = (read_only_proxy,)
-
- record = OpenDirectoryRecord(
- service=self,
- recordType=recordType,
- guid=recordGUID,
- nodeName=recordNodeName,
- shortNames=recordShortNames,
- authIDs=recordAuthIDs,
- fullName=recordFullName,
- firstName=recordFirstName,
- lastName=recordLastName,
- emailAddresses=recordEmailAddresses,
- memberGUIDs=memberGUIDs,
- nestedGUIDs=nestedGUIDs,
- extProxies=proxyGUIDs,
- extReadOnlyProxies=readOnlyProxyGUIDs,
- )
-
- # Look up augment information
- # TODO: this needs to be deferred but for now we hard code the deferred result because
- # we know it is completing immediately.
- if self.augmentService is not None:
- d = self.augmentService.getAugmentRecord(record.guid,
- recordType)
- d.addCallback(lambda x: record.addAugmentInformation(x))
-
- # Override based on ResourceInfo
- if autoSchedule:
- record.autoSchedule = True
-
- if not unrestricted:
- self.log.debug("%s is not enabled because it's not a member of group: %s" % (recordGUID, self.restrictToGroup))
- record.enabledForCalendaring = False
- record.enabledForAddressBooks = False
-
- record.applySACLs()
-
- if record.enabledForCalendaring:
- enabledRecords.append(record)
- else:
- disabledRecords.append(record)
-
- record = None
- if len(enabledRecords) == 1:
- record = enabledRecords[0]
- elif len(enabledRecords) == 0 and len(disabledRecords) == 1:
- record = disabledRecords[0]
- elif indexType == self.INDEX_TYPE_GUID and len(enabledRecords) > 1:
- self.log.error("Duplicate records found for GUID %s:" % (indexKey,))
- for duplicateRecord in enabledRecords:
- self.log.error("Duplicate: %s" % (", ".join(duplicateRecord.shortNames)))
-
- if record:
- if isinstance(origIndexKey, unicode):
- origIndexKey = origIndexKey.encode("utf-8")
- self.log.debug("Storing (%s %s) %s in internal cache" % (indexType, origIndexKey, record))
-
- self.recordCacheForType(recordType).addRecord(record, indexType, origIndexKey)
-
-
- def getResourceInfo(self):
- """
- Resource information including proxy assignments for resource and
- locations, as well as auto-schedule settings, used to live in the
- directory. This method fetches old resource info for migration
- purposes.
- """
- attrs = [
- dsattributes.kDS1AttrGeneratedUID,
- dsattributes.kDSNAttrResourceInfo,
- ]
-
- for recordType in (dsattributes.kDSStdRecordTypePlaces, dsattributes.kDSStdRecordTypeResources):
- try:
- self.log.debug("opendirectory.listAllRecordsWithAttributes_list(%r,%r,%r)" % (
- self.directory,
- recordType,
- attrs,
- ))
- results = self.odModule.listAllRecordsWithAttributes_list(
- self.directory,
- recordType,
- attrs,
- )
- except self.odModule.ODError, ex:
- self.log.error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
- raise
-
- for (recordShortName, value) in results:
- recordGUID = value.get(dsattributes.kDS1AttrGeneratedUID)
- resourceInfo = value.get(dsattributes.kDSNAttrResourceInfo)
- if resourceInfo is not None:
- if type(resourceInfo) is not str:
- resourceInfo = resourceInfo[0]
- try:
- autoSchedule, proxy, readOnlyProxy = self.parseResourceInfo(resourceInfo,
- recordGUID, recordType, recordShortName)
- except ValueError:
- continue
- yield recordGUID, autoSchedule, proxy, readOnlyProxy
-
-
- def isAvailable(self):
- """
- Returns True if all configured directory nodes are accessible, False otherwise
- """
-
- if self.node == "/Search":
- result = self.odModule.getNodeAttributes(self.directory, "/Search",
- (dsattributes.kDS1AttrSearchPath,))
- nodes = result[dsattributes.kDS1AttrSearchPath]
- else:
- nodes = [self.node]
-
- try:
- for node in nodes:
- self.odModule.getNodeAttributes(self.directory, node, [dsattributes.kDSNAttrNodePath])
- except self.odModule.ODError:
- self.log.warn("OpenDirectory Node %s not available" % (node,))
- return False
-
- return True
-
-
- @inlineCallbacks
- def getGroups(self, guids):
- """
- Returns a set of group records for the list of guids passed in. For
- any group that also contains subgroups, those subgroups' records are
- also returned, and so on.
- """
-
- recordsByGUID = {}
- valuesToFetch = guids
-
- loop = 1
- while valuesToFetch:
- self.log.debug("getGroups loop %d" % (loop,))
-
- results = []
-
- for batch in splitIntoBatches(valuesToFetch, self.batchSize):
- fields = []
- for value in batch:
- fields.append(["guid", value, False, "equals"])
- self.log.debug("getGroups fetching batch of %d" %
- (len(fields),))
- result = list((yield self.recordsMatchingFields(fields,
- recordType=self.recordType_groups)))
- results.extend(result)
- self.log.debug("getGroups got back batch of %d for subtotal of %d" %
- (len(result), len(results)))
-
- # Reset values for next iteration
- valuesToFetch = set()
-
- for record in results:
- guid = record.guid
- if guid not in recordsByGUID:
- recordsByGUID[guid] = record
-
- # record.nestedGUIDs() contains the sub groups of this group
- for memberGUID in record.nestedGUIDs():
- if memberGUID not in recordsByGUID:
- self.log.debug("getGroups group %s contains group %s" %
- (record.guid, memberGUID))
- valuesToFetch.add(memberGUID)
-
- loop += 1
-
- returnValue(recordsByGUID.values())
-
-
-
-def buildQueries(recordTypes, fields, mapping):
- """
- Determine how many queries need to be performed in order to work around opendirectory
- quirks, where searching on fields that don't apply to a given recordType returns incorrect
- results (either none, or all records).
- """
-
- queries = {}
- for recordType in recordTypes:
- for field, value, caseless, matchType in fields:
- if field in mapping:
- if recordType in mapping[field]['appliesTo']:
- ODField = mapping[field]['odField']
- key = (ODField, value, caseless, matchType)
- queries.setdefault(key, []).append(recordType)
-
- return queries
-
-
-
-def buildLocalQueriesFromTokens(tokens, mapping):
- """
- OD /Local doesn't support nested complex queries, so create a list of
- complex queries that will be ANDed together in recordsMatchingTokens()
-
- @param tokens: The tokens to search on
- @type tokens: C{list} of C{str}
- @param mapping: The mapping of DirectoryRecord attributes to OD attributes
- @type mapping: C{dict}
- @return: A list of expression objects
- @type: C{list}
- """
-
- if len(tokens) == 0:
- return None
-
- fields = [
- ("fullName", dsattributes.eDSContains),
- ("emailAddresses", dsattributes.eDSStartsWith),
- ]
-
- results = []
- for token in tokens:
- queries = []
- for field, comparison in fields:
- ODField = mapping[field]['odField']
- query = dsquery.match(ODField, token, comparison)
- queries.append(query)
- results.append(dsquery.expression(dsquery.expression.OR, queries))
- return results
-
-
-
-def buildNestedQueryFromTokens(tokens, mapping):
- """
- Build a DS query espression such that all the tokens must appear in either
- the fullName (anywhere), emailAddresses (at the beginning) or record name
- (at the beginning).
-
- @param tokens: The tokens to search on
- @type tokens: C{list} of C{str}
- @param mapping: The mapping of DirectoryRecord attributes to OD attributes
- @type mapping: C{dict}
- @return: The nested expression object
- @type: dsquery.expression
- """
-
- if len(tokens) == 0:
- return None
-
- fields = [
- ("fullName", dsattributes.eDSContains),
- ("emailAddresses", dsattributes.eDSStartsWith),
- ("recordName", dsattributes.eDSStartsWith),
- ]
-
- outer = []
- for token in tokens:
- inner = []
- for field, comparison in fields:
- ODField = mapping[field]['odField']
- query = dsquery.match(ODField, token, comparison)
- inner.append(query)
- outer.append(dsquery.expression(dsquery.expression.OR, inner))
- return dsquery.expression(dsquery.expression.AND, outer)
-
-
-
-class OpenDirectoryRecord(CachingDirectoryRecord):
- """
- OpenDirectory implementation of L{IDirectoryRecord}.
- """
- def __init__(
- self, service, recordType, guid, nodeName, shortNames, authIDs,
- fullName, firstName, lastName, emailAddresses, memberGUIDs, nestedGUIDs,
- extProxies, extReadOnlyProxies,
- ):
- super(OpenDirectoryRecord, self).__init__(
- service=service,
- recordType=recordType,
- guid=guid,
- shortNames=shortNames,
- authIDs=authIDs,
- fullName=fullName,
- firstName=firstName,
- lastName=lastName,
- emailAddresses=emailAddresses,
- extProxies=extProxies,
- extReadOnlyProxies=extReadOnlyProxies,
- )
- self.nodeName = nodeName
-
- self._memberGUIDs = tuple(memberGUIDs)
- self._nestedGUIDs = tuple(nestedGUIDs)
- self._groupMembershipGUIDs = None
-
-
- def __repr__(self):
- if self.service.realmName == self.nodeName:
- location = self.nodeName
- else:
- location = "%s->%s" % (self.service.realmName, self.nodeName)
-
- return "<%s[%s@%s(%s)] %s(%s) %r>" % (
- self.__class__.__name__,
- self.recordType,
- self.service.guid,
- location,
- self.guid,
- ",".join(self.shortNames),
- self.fullName
- )
-
-
- def members(self):
- if self.recordType != self.service.recordType_groups:
- return
-
- for guid in self._memberGUIDs:
- userRecord = self.service.recordWithGUID(guid)
- if userRecord is not None:
- yield userRecord
-
-
- def groups(self):
- if self._groupMembershipGUIDs is None:
- self._groupMembershipGUIDs = self.service.groupsForGUID(self.guid)
-
- for guid in self._groupMembershipGUIDs:
- record = self.service.recordWithGUID(guid)
- if record:
- yield record
-
-
- def memberGUIDs(self):
- return set(self._memberGUIDs)
-
-
- def nestedGUIDs(self):
- return set(self._nestedGUIDs)
-
-
- def verifyCredentials(self, credentials):
- if isinstance(credentials, UsernamePassword):
- # Check cached password
- try:
- if credentials.password == self.password:
- return True
- except AttributeError:
- pass
-
- # Check with directory services
- try:
- if self.service.odModule.authenticateUserBasic(self.service.directory, self.nodeName, self.shortNames[0], credentials.password):
- # Cache the password to avoid future DS queries
- self.password = credentials.password
- return True
- except self.service.odModule.ODError, e:
- self.log.error("OpenDirectory (node=%s) error while performing basic authentication for user %s: %s"
- % (self.service.realmName, self.shortNames[0], e))
-
- return False
-
- elif isinstance(credentials, DigestedCredentials):
- #
- # We need a special format for the "challenge" and "response" strings passed into OpenDirectory, as it is
- # picky about exactly what it receives.
- #
- try:
- if "algorithm" not in credentials.fields:
- credentials.fields["algorithm"] = "md5"
- challenge = 'Digest realm="%(realm)s", nonce="%(nonce)s", algorithm=%(algorithm)s' % credentials.fields
- response = (
- 'Digest username="%(username)s", '
- 'realm="%(realm)s", '
- 'nonce="%(nonce)s", '
- 'uri="%(uri)s", '
- 'response="%(response)s",'
- 'algorithm=%(algorithm)s'
- ) % credentials.fields
- except KeyError, e:
- self.log.error(
- "OpenDirectory (node=%s) error while performing digest authentication for user %s: "
- "missing digest response field: %s in: %s"
- % (self.service.realmName, self.shortNames[0], e, credentials.fields)
- )
- return False
-
- try:
- if self.digestcache[credentials.fields["uri"]] == response:
- return True
- except (AttributeError, KeyError):
- pass
-
- try:
- if self.service.odModule.authenticateUserDigest(
- self.service.directory,
- self.nodeName,
- self.shortNames[0],
- challenge,
- response,
- credentials.method
- ):
- try:
- cache = self.digestcache
- except AttributeError:
- cache = self.digestcache = {}
-
- cache[credentials.fields["uri"]] = response
-
- return True
- else:
- self.log.debug(
-"""OpenDirectory digest authentication failed with:
- Nodename: %s
- Username: %s
- Challenge: %s
- Response: %s
- Method: %s
-""" % (self.nodeName, self.shortNames[0], challenge, response,
- credentials.method))
-
- except self.service.odModule.ODError, e:
- self.log.error(
- "OpenDirectory (node=%s) error while performing digest authentication for user %s: %s"
- % (self.service.realmName, self.shortNames[0], e)
- )
- return False
-
- return False
-
- return super(OpenDirectoryRecord, self).verifyCredentials(credentials)
-
-
-
-class OpenDirectoryInitError(DirectoryError):
- """
- OpenDirectory initialization error.
- """
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/augment.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/augment.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/augment.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -46,6 +46,7 @@
"automatic",
))
+
class AugmentRecord(object):
"""
Augmented directory record information
@@ -75,13 +76,14 @@
self.clonedFromDefault = False
recordTypesMap = {
- "users" : "User",
- "groups" : "Group",
- "locations" : "Location",
- "resources" : "Resource",
- "addresses" : "Address",
+ "users": "User",
+ "groups": "Group",
+ "locations": "Location",
+ "resources": "Resource",
+ "addresses": "Address",
}
+
class AugmentDB(object):
"""
Abstract base class for an augment record database.
@@ -128,7 +130,6 @@
@return: L{Deferred}
"""
-
recordType = recordTypesMap[recordType]
result = (yield self._lookupAugmentRecord(uid))
@@ -266,9 +267,9 @@
self.xmlFiles = [fullServerPath(config.DataRoot, path) for path in xmlFiles]
self.xmlFileStats = {}
for path in self.xmlFiles:
- self.xmlFileStats[path] = (0, 0) # mtime, size
+ self.xmlFileStats[path] = (0, 0) # mtime, size
- self.statSeconds = statSeconds # Don't stat more often than this value
+ self.statSeconds = statSeconds # Don't stat more often than this value
self.lastCached = 0
self.db = {}
Deleted: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/cachingdirectory.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/cachingdirectory.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/cachingdirectory.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1,473 +0,0 @@
-# -*- test-case-name: twistedcaldav.directory.test.test_cachedirectory -*-
-##
-# Copyright (c) 2009-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-"""
-Caching directory service implementation.
-"""
-
-__all__ = [
- "CachingDirectoryService",
- "CachingDirectoryRecord",
- "DictRecordTypeCache",
-]
-
-
-import time
-
-import base64
-
-from twext.python.log import Logger
-
-from twistedcaldav.config import config
-from twistedcaldav.memcacheclient import ClientFactory, MemcacheError
-from twistedcaldav.directory.directory import DirectoryService, DirectoryRecord, DirectoryError, UnknownRecordTypeError
-from txdav.caldav.datastore.scheduling.cuaddress import normalizeCUAddr
-from twistedcaldav.directory.util import normalizeUUID
-
-
-class RecordTypeCache(object):
- """
- Abstract class for a record type cache. We will likely have dict and memcache implementations of this.
- """
-
- def __init__(self, directoryService, recordType):
-
- self.directoryService = directoryService
- self.recordType = recordType
-
-
- def addRecord(self, record, indexType, indexKey, useMemcache=True,
- neverExpire=False):
- raise NotImplementedError()
-
-
- def removeRecord(self, record):
- raise NotImplementedError()
-
-
- def findRecord(self, indexType, indexKey):
- raise NotImplementedError()
-
-
-
-class DictRecordTypeCache(RecordTypeCache):
- """
- Cache implementation using a dict, and uses memcached to share records
- with other instances.
- """
- log = Logger()
-
- def __init__(self, directoryService, recordType):
-
- super(DictRecordTypeCache, self).__init__(directoryService, recordType)
- self.records = set()
- self.recordsIndexedBy = {
- CachingDirectoryService.INDEX_TYPE_GUID : {},
- CachingDirectoryService.INDEX_TYPE_SHORTNAME: {},
- CachingDirectoryService.INDEX_TYPE_CUA : {},
- CachingDirectoryService.INDEX_TYPE_AUTHID : {},
- }
- self.directoryService = directoryService
- self.lastPurgedTime = time.time()
-
-
- def addRecord(self, record, indexType, indexKey, useMemcache=True,
- neverExpire=False):
-
- useMemcache = useMemcache and config.Memcached.Pools.Default.ClientEnabled
- if neverExpire:
- record.neverExpire()
-
- self.records.add(record)
-
- # Also index/cache on guid
- indexTypes = [(indexType, indexKey)]
- if indexType != CachingDirectoryService.INDEX_TYPE_GUID:
- indexTypes.append((CachingDirectoryService.INDEX_TYPE_GUID,
- record.guid))
-
- for indexType, indexKey in indexTypes:
- self.recordsIndexedBy[indexType][indexKey] = record
- if useMemcache:
- key = self.directoryService.generateMemcacheKey(indexType, indexKey,
- record.recordType)
- self.log.debug("Memcache: storing %s" % (key,))
- try:
- self.directoryService.memcacheSet(key, record)
- except DirectoryMemcacheError:
- self.log.error("Memcache: failed to store %s" % (key,))
- pass
-
-
- def removeRecord(self, record):
- if record in self.records:
- self.records.remove(record)
- self.log.debug("Removed record %s" % (record.guid,))
- for indexType in self.directoryService.indexTypes():
- try:
- indexData = getattr(record, CachingDirectoryService.indexTypeToRecordAttribute[indexType])
- except AttributeError:
- continue
- if isinstance(indexData, basestring):
- indexData = [indexData]
- for item in indexData:
- try:
- del self.recordsIndexedBy[indexType][item]
- except KeyError:
- pass
-
-
- def findRecord(self, indexType, indexKey):
- self.purgeExpiredRecords()
- return self.recordsIndexedBy[indexType].get(indexKey)
-
-
- def purgeExpiredRecords(self):
- """
- Scan the cached records and remove any that have expired.
- Does nothing if we've scanned within the past cacheTimeout seconds.
- """
- if time.time() - self.lastPurgedTime > self.directoryService.cacheTimeout:
- for record in list(self.records):
- if record.isExpired():
- self.removeRecord(record)
- self.lastPurgedTime = time.time()
-
-
-
-class CachingDirectoryService(DirectoryService):
- """
- Caching Directory implementation of L{IDirectoryService}.
-
- This is class must be overridden to provide a concrete implementation.
- """
- log = Logger()
-
- INDEX_TYPE_GUID = "guid"
- INDEX_TYPE_SHORTNAME = "shortname"
- INDEX_TYPE_CUA = "cua"
- INDEX_TYPE_AUTHID = "authid"
-
- indexTypeToRecordAttribute = {
- "guid" : "guid",
- "shortname": "shortNames",
- "cua" : "calendarUserAddresses",
- "authid" : "authIDs",
- }
-
- def __init__(
- self,
- cacheTimeout=1,
- negativeCaching=False,
- cacheClass=DictRecordTypeCache,
- ):
- """
- @param cacheTimeout: C{int} number of minutes before cache is invalidated.
- """
-
- self.cacheTimeout = cacheTimeout * 60
- self.negativeCaching = negativeCaching
-
- self.cacheClass = cacheClass
- self._initCaches()
-
- super(CachingDirectoryService, self).__init__()
-
-
- def _getMemcacheClient(self, refresh=False):
- if refresh or not hasattr(self, "memcacheClient"):
- self.memcacheClient = ClientFactory.getClient(['%s:%s' %
- (config.Memcached.Pools.Default.BindAddress, config.Memcached.Pools.Default.Port)],
- debug=0, pickleProtocol=2)
- return self.memcacheClient
-
-
- def memcacheSet(self, key, record):
-
- hideService = isinstance(record, DirectoryRecord)
-
- try:
- if hideService:
- record.service = None # so we don't pickle service
-
- key = base64.b64encode(key)
- if not self._getMemcacheClient().set(key, record, time=self.cacheTimeout):
- self.log.error("Could not write to memcache, retrying")
- if not self._getMemcacheClient(refresh=True).set(
- key, record,
- time=self.cacheTimeout
- ):
- self.log.error("Could not write to memcache again, giving up")
- del self.memcacheClient
- raise DirectoryMemcacheError("Failed to write to memcache")
- finally:
- if hideService:
- record.service = self
-
-
- def memcacheGet(self, key):
-
- key = base64.b64encode(key)
- try:
- record = self._getMemcacheClient().get(key)
- if record is not None and isinstance(record, DirectoryRecord):
- record.service = self
- except MemcacheError:
- self.log.error("Could not read from memcache, retrying")
- try:
- record = self._getMemcacheClient(refresh=True).get(key)
- if record is not None and isinstance(record, DirectoryRecord):
- record.service = self
- except MemcacheError:
- self.log.error("Could not read from memcache again, giving up")
- del self.memcacheClient
- raise DirectoryMemcacheError("Failed to read from memcache")
- return record
-
-
- def generateMemcacheKey(self, indexType, indexKey, recordType):
- """
- Return a key that can be used to store/retrieve a record in memcache.
- if short-name is the indexType the recordType be encoded into the key.
-
- @param indexType: one of the indexTypes( ) values
- @type indexType: C{str}
- @param indexKey: the value being indexed
- @type indexKey: C{str}
- @param recordType: the type of record being cached
- @type recordType: C{str}
- @return: a memcache key comprised of the passed-in values and the directory
- service's baseGUID
- @rtype: C{str}
- """
- keyVersion = 2
- if indexType == CachingDirectoryService.INDEX_TYPE_SHORTNAME:
- return "dir|v%d|%s|%s|%s|%s" % (keyVersion, self.baseGUID, recordType,
- indexType, indexKey)
- else:
- return "dir|v%d|%s|%s|%s" % (keyVersion, self.baseGUID, indexType,
- indexKey)
-
-
- def _initCaches(self):
- self._recordCaches = dict([
- (recordType, self.cacheClass(self, recordType))
- for recordType in self.recordTypes()
- ])
-
- self._disabledKeys = dict([(indexType, dict()) for indexType in self.indexTypes()])
-
-
- def indexTypes(self):
-
- return (
- CachingDirectoryService.INDEX_TYPE_GUID,
- CachingDirectoryService.INDEX_TYPE_SHORTNAME,
- CachingDirectoryService.INDEX_TYPE_CUA,
- CachingDirectoryService.INDEX_TYPE_AUTHID,
- )
-
-
- def recordCacheForType(self, recordType):
- try:
- return self._recordCaches[recordType]
- except KeyError:
- raise UnknownRecordTypeError(recordType)
-
-
- def listRecords(self, recordType):
- return self.recordCacheForType(recordType).records
-
-
- def recordWithShortName(self, recordType, shortName):
- return self._lookupRecord((recordType,), CachingDirectoryService.INDEX_TYPE_SHORTNAME, shortName)
-
-
- def recordWithCalendarUserAddress(self, address):
- address = normalizeCUAddr(address)
- record = None
- if address.startswith("mailto:"):
- record = self._lookupRecord(None, CachingDirectoryService.INDEX_TYPE_CUA, address)
- return record if record and record.enabledForCalendaring else None
- else:
- return DirectoryService.recordWithCalendarUserAddress(self, address)
-
-
- def recordWithAuthID(self, authID):
- return self._lookupRecord(None, CachingDirectoryService.INDEX_TYPE_AUTHID, authID)
-
-
- def recordWithGUID(self, guid):
- guid = normalizeUUID(guid)
- return self._lookupRecord(None, CachingDirectoryService.INDEX_TYPE_GUID, guid)
-
- recordWithUID = recordWithGUID
-
- def _lookupRecord(self, recordTypes, indexType, indexKey):
-
- if recordTypes is None:
- recordTypes = self.recordTypes()
- else:
- # Only use recordTypes this service supports:
- supportedRecordTypes = self.recordTypes()
- recordTypes = [t for t in recordTypes if t in supportedRecordTypes]
- if not recordTypes:
- return None
-
- def lookup():
- for recordType in recordTypes:
- record = self.recordCacheForType(recordType).findRecord(indexType, indexKey)
-
- if record:
- if record.isExpired():
- self.recordCacheForType(recordType).removeRecord(record)
- return None
- else:
- return record
- else:
- return None
-
- record = lookup()
- if record:
- return record
-
- if self.negativeCaching:
-
- # Check negative cache (take cache entry timeout into account)
- try:
- disabledTime = self._disabledKeys[indexType][indexKey]
- if time.time() - disabledTime < self.cacheTimeout:
- return None
- except KeyError:
- pass
-
- # Check memcache
- if config.Memcached.Pools.Default.ClientEnabled:
-
- # The only time the recordType arg matters is when indexType is
- # short-name, and in that case recordTypes will contain exactly
- # one recordType, so using recordTypes[0] here is always safe:
- key = self.generateMemcacheKey(indexType, indexKey, recordTypes[0])
-
- self.log.debug("Memcache: checking %s" % (key,))
-
- try:
- record = self.memcacheGet(key)
- except DirectoryMemcacheError:
- self.log.error("Memcache: failed to get %s" % (key,))
- record = None
-
- if record is None:
- self.log.debug("Memcache: miss %s" % (key,))
- else:
- self.log.debug("Memcache: hit %s" % (key,))
- self.recordCacheForType(record.recordType).addRecord(record, indexType, indexKey, useMemcache=False)
- return record
-
- if self.negativeCaching:
-
- # Check negative memcache
- try:
- val = self.memcacheGet("-%s" % (key,))
- except DirectoryMemcacheError:
- self.log.error("Memcache: failed to get -%s" % (key,))
- val = None
- if val == 1:
- self.log.debug("Memcache: negative %s" % (key,))
- self._disabledKeys[indexType][indexKey] = time.time()
- return None
-
- # Try query
- self.log.debug("Faulting record for attribute '%s' with value '%s'" % (indexType, indexKey,))
- self.queryDirectory(recordTypes, indexType, indexKey)
-
- # Now try again from cache
- record = lookup()
- if record:
- self.log.debug("Found record for attribute '%s' with value '%s'" % (indexType, indexKey,))
- return record
-
- if self.negativeCaching:
-
- # Add to negative cache with timestamp
- self.log.debug("Failed to fault record for attribute '%s' with value '%s'" % (indexType, indexKey,))
- self._disabledKeys[indexType][indexKey] = time.time()
-
- if config.Memcached.Pools.Default.ClientEnabled:
- self.log.debug("Memcache: storing (negative) %s" % (key,))
- try:
- self.memcacheSet("-%s" % (key,), 1)
- except DirectoryMemcacheError:
- self.log.error("Memcache: failed to set -%s" % (key,))
- pass
-
- return None
-
-
- def queryDirectory(self, recordTypes, indexType, indexKey):
- raise NotImplementedError()
-
-
-
-class CachingDirectoryRecord(DirectoryRecord):
-
- def __init__(
- self, service, recordType, guid,
- shortNames=(), authIDs=set(),
- fullName=None, firstName=None, lastName=None, emailAddresses=set(),
- uid=None, **kwargs
- ):
- super(CachingDirectoryRecord, self).__init__(
- service,
- recordType,
- guid,
- shortNames=shortNames,
- authIDs=authIDs,
- fullName=fullName,
- firstName=firstName,
- lastName=lastName,
- emailAddresses=emailAddresses,
- uid=uid,
- **kwargs
- )
-
- self.cachedTime = time.time()
-
-
- def neverExpire(self):
- self.cachedTime = 0
-
-
- def isExpired(self):
- """
- Returns True if this record was created more than cacheTimeout
- seconds ago
- """
- if (
- self.cachedTime != 0 and
- time.time() - self.cachedTime > self.service.cacheTimeout
- ):
- return True
- else:
- return False
-
-
-
-class DirectoryMemcacheError(DirectoryError):
- """
- Error communicating with memcached.
- """
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/calendar.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/calendar.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/calendar.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -35,7 +35,6 @@
from twisted.internet.defer import succeed, inlineCallbacks, returnValue
from twistedcaldav.config import config
-from twistedcaldav.directory.idirectory import IDirectoryService
from twistedcaldav.directory.common import uidsResourceName, \
CommonUIDProvisioningResource, CommonHomeTypeProvisioningResource
@@ -48,7 +47,10 @@
log = Logger()
+
# FIXME: copied from resource.py to avoid circular dependency
+
+
class CalDAVComplianceMixIn(object):
def davComplianceClasses(self):
return (
@@ -65,7 +67,7 @@
DAVResource,
):
def defaultAccessControlList(self):
- return config.ProvisioningResourceACL
+ return succeed(config.ProvisioningResourceACL)
def etag(self):
@@ -91,7 +93,8 @@
super(DirectoryCalendarHomeProvisioningResource, self).__init__()
- self.directory = IDirectoryService(directory)
+ # MOVE2WHO
+ self.directory = directory # IDirectoryService(directory)
self._url = url
self._newStore = store
@@ -101,8 +104,14 @@
#
# Create children
#
- for recordType in self.directory.recordTypes():
- self.putChild(recordType, DirectoryCalendarHomeTypeProvisioningResource(self, recordType))
+ # ...just "users" though. If we iterate all of the directory's
+ # recordTypes, we also get the proxy sub principal types.
+ for recordTypeName in [
+ self.directory.recordTypeToOldName(r) for r in [
+ self.directory.recordType.user
+ ]
+ ]:
+ self.putChild(recordTypeName, DirectoryCalendarHomeTypeProvisioningResource(self, recordTypeName, r))
self.putChild(uidsResourceName, DirectoryCalendarHomeUIDProvisioningResource(self))
@@ -112,7 +121,7 @@
def listChildren(self):
- return self.directory.recordTypes()
+ return [self.directory.recordTypeToOldName(r) for r in self.directory.recordTypes()]
def principalCollections(self):
@@ -127,12 +136,13 @@
return self.directory.principalCollection.principalForRecord(record)
+ @inlineCallbacks
def homeForDirectoryRecord(self, record, request):
- uidResource = self.getChild(uidsResourceName)
+ uidResource = yield self.getChild(uidsResourceName)
if uidResource is None:
- return None
+ returnValue(None)
else:
- return uidResource.homeResourceForRecord(record, request)
+ returnValue((yield uidResource.homeResourceForRecord(record, request)))
##
@@ -149,42 +159,43 @@
class DirectoryCalendarHomeTypeProvisioningResource(
- CommonHomeTypeProvisioningResource,
- DirectoryCalendarProvisioningResource
- ):
+ CommonHomeTypeProvisioningResource,
+ DirectoryCalendarProvisioningResource
+):
"""
Resource which provisions calendar home collections of a specific
record type as needed.
"""
- def __init__(self, parent, recordType):
+ def __init__(self, parent, name, recordType):
"""
@param parent: the parent of this resource
@param recordType: the directory record type to provision.
"""
assert parent is not None
+ assert name is not None
assert recordType is not None
super(DirectoryCalendarHomeTypeProvisioningResource, self).__init__()
self.directory = parent.directory
+ self.name = name
self.recordType = recordType
self._parent = parent
def url(self):
- return joinURL(self._parent.url(), self.recordType)
+ return joinURL(self._parent.url(), self.name)
+ @inlineCallbacks
def listChildren(self):
if config.EnablePrincipalListings:
-
- def _recordShortnameExpand():
- for record in self.directory.listRecords(self.recordType):
- if record.enabledForCalendaring:
- for shortName in record.shortNames:
- yield shortName
-
- return _recordShortnameExpand()
+ children = []
+ for record in (yield self.directory.listRecords(self.recordType)):
+ if record.enabledForCalendaring:
+ for shortName in record.shortNames:
+ children.append(shortName)
+ returnValue(children)
else:
# Not a listable collection
raise HTTPError(responsecode.FORBIDDEN)
@@ -203,7 +214,7 @@
def displayName(self):
- return self.recordType
+ return self.name
##
@@ -220,9 +231,9 @@
class DirectoryCalendarHomeUIDProvisioningResource (
- CommonUIDProvisioningResource,
- DirectoryCalendarProvisioningResource
- ):
+ CommonUIDProvisioningResource,
+ DirectoryCalendarProvisioningResource
+):
homeResourceTypeName = 'calendars'
@@ -258,7 +269,7 @@
else:
# ...otherwise permissions are fixed, and are not subject to
# inheritance rules, etc.
- return succeed(self.defaultAccessControlList())
+ return self.defaultAccessControlList()
d = getWikiACL(self, request)
d.addCallback(gotACL)
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/calendaruserproxy.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/calendaruserproxy.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/calendaruserproxy.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -29,37 +29,41 @@
import itertools
import time
+import uuid
+from twext.python.log import Logger
+from twext.who.idirectory import RecordType as BaseRecordType
from twisted.internet.defer import succeed, inlineCallbacks, returnValue
-from txweb2 import responsecode
-from txweb2.http import HTTPError, StatusResponse
-from txdav.xml import element as davxml
-from txdav.xml.base import dav_namespace
-from txweb2.dav.util import joinURL
-from txweb2.dav.noneprops import NonePropertyStore
+from twisted.python.modules import getModule
+from twisted.web.template import XMLFile, Element, renderer
+from twistedcaldav.config import config, fullServerPath
+from twistedcaldav.database import (
+ AbstractADBAPIDatabase, ADBAPISqliteMixin, ADBAPIPostgreSQLMixin
+)
+from twistedcaldav.directory.util import normalizeUUID
+from twistedcaldav.directory.util import (
+ formatLink, formatLinks, formatPrincipals
+)
-from twext.python.log import Logger
-
-from twisted.web.template import XMLFile, Element, renderer
-from twisted.python.modules import getModule
+from twistedcaldav.extensions import (
+ DAVPrincipalResource, DAVResourceWithChildrenMixin
+)
from twistedcaldav.extensions import DirectoryElement
-from twistedcaldav.directory.principal import formatLink
-from twistedcaldav.directory.principal import formatLinks
-from twistedcaldav.directory.principal import formatPrincipals
-
-from twistedcaldav.directory.util import normalizeUUID
-from twistedcaldav.config import config, fullServerPath
-from twistedcaldav.database import AbstractADBAPIDatabase, ADBAPISqliteMixin, \
- ADBAPIPostgreSQLMixin
-from twistedcaldav.extensions import DAVPrincipalResource, \
- DAVResourceWithChildrenMixin
from twistedcaldav.extensions import ReadOnlyWritePropertiesResourceMixIn
from twistedcaldav.memcacher import Memcacher
from twistedcaldav.resource import CalDAVComplianceMixIn
+from txdav.who.delegates import RecordType as DelegateRecordType
+from txdav.xml import element as davxml
+from txdav.xml.base import dav_namespace
+from txweb2 import responsecode
+from txweb2.dav.noneprops import NonePropertyStore
+from txweb2.dav.util import joinURL
+from txweb2.http import HTTPError, StatusResponse
thisModule = getModule(__name__)
log = Logger()
+
class PermissionsMixIn (ReadOnlyWritePropertiesResourceMixIn):
def defaultAccessControlList(self):
aces = (
@@ -86,13 +90,13 @@
for principal in config.AdminPrincipals
))
- return davxml.ACL(*aces)
+ return succeed(davxml.ACL(*aces))
def accessControlList(self, request, inheritance=True, expanding=False,
inherited_aces=None):
# Permissions here are fixed, and are not subject to inheritance rules, etc.
- return succeed(self.defaultAccessControlList())
+ return self.defaultAccessControlList()
@@ -119,13 +123,20 @@
record = self.resource.parent.record
resource = self.resource
parent = self.resource.parent
+ try:
+ if isinstance(record.guid, uuid.UUID):
+ guid = str(record.guid).upper()
+ else:
+ guid = record.guid
+ except AttributeError:
+ guid = ""
return tag.fillSlots(
directoryGUID=record.service.guid,
realm=record.service.realmName,
- guid=record.guid,
- recordType=record.recordType,
+ guid=guid,
+ recordType=record.recordType.name + "s", # MOVE2WHO need mapping
shortNames=record.shortNames,
- fullName=record.fullName,
+ fullName=record.displayName,
principalUID=parent.principalUID(),
principalURL=formatLink(parent.principalURL()),
proxyPrincipalUID=resource.principalUID(),
@@ -209,9 +220,13 @@
def resourceType(self):
if self.proxyType == "calendar-proxy-read":
- return davxml.ResourceType.calendarproxyread #@UndefinedVariable
+ return davxml.ResourceType.calendarproxyread # @UndefinedVariable
elif self.proxyType == "calendar-proxy-write":
- return davxml.ResourceType.calendarproxywrite #@UndefinedVariable
+ return davxml.ResourceType.calendarproxywrite # @UndefinedVariable
+ elif self.proxyType == "calendar-proxy-read-for":
+ return davxml.ResourceType.calendarproxyreadfor # @UndefinedVariable
+ elif self.proxyType == "calendar-proxy-write-for":
+ return davxml.ResourceType.calendarproxywritefor # @UndefinedVariable
else:
return super(CalendarUserProxyPrincipalResource, self).resourceType()
@@ -270,7 +285,7 @@
principals = []
newUIDs = set()
for uri in members:
- principal = self.pcollection._principalForURI(uri)
+ principal = yield self.pcollection._principalForURI(uri)
# Invalid principals MUST result in an error.
if principal is None or principal.principalURL() != uri:
raise HTTPError(StatusResponse(
@@ -282,7 +297,9 @@
newUIDs.add(principal.principalUID())
# Get the old set of UIDs
- oldUIDs = (yield self._index().getMembers(self.uid))
+ # oldUIDs = (yield self._index().getMembers(self.uid))
+ oldPrincipals = yield self.groupMembers()
+ oldUIDs = [p.principalUID() for p in oldPrincipals]
# Change membership
yield self.setGroupMemberSetPrincipals(principals)
@@ -293,19 +310,24 @@
changedUIDs = newUIDs.symmetric_difference(oldUIDs)
for uid in changedUIDs:
- principal = self.pcollection.principalForUID(uid)
+ principal = yield self.pcollection.principalForUID(uid)
if principal:
yield principal.cacheNotifier.changed()
returnValue(True)
+ @inlineCallbacks
def setGroupMemberSetPrincipals(self, principals):
- # Map the principals to UIDs.
- return self._index().setGroupMembers(
- self.uid,
- [p.principalUID() for p in principals],
+
+ # Find our pseudo-record
+ record = yield self.parent.record.service.recordWithShortName(
+ self._recordTypeFromProxyType(),
+ self.parent.principalUID()
)
+ # Set the members
+ memberRecords = [p.record for p in principals]
+ yield record.setMembers(memberRecords)
##
@@ -349,7 +371,7 @@
@inlineCallbacks
- def _expandMemberUIDs(self, uid=None, relatives=None, uids=None, infinity=False):
+ def _expandMemberPrincipals(self, uid=None, relatives=None, uids=None, infinity=False):
if uid is None:
uid = self.principalUID()
if relatives is None:
@@ -360,14 +382,14 @@
if uid not in uids:
from twistedcaldav.directory.principal import DirectoryPrincipalResource
uids.add(uid)
- principal = self.pcollection.principalForUID(uid)
+ principal = yield self.pcollection.principalForUID(uid)
if isinstance(principal, CalendarUserProxyPrincipalResource):
members = yield self._directGroupMembers()
for member in members:
if member.principalUID() not in uids:
relatives.add(member)
if infinity:
- yield self._expandMemberUIDs(member.principalUID(), relatives, uids, infinity=infinity)
+ yield self._expandMemberPrincipals(member.principalUID(), relatives, uids, infinity=infinity)
elif isinstance(principal, DirectoryPrincipalResource):
if infinity:
members = yield principal.expandedGroupMembers()
@@ -378,30 +400,45 @@
returnValue(relatives)
+ def _recordTypeFromProxyType(self):
+ return {
+ "calendar-proxy-read": DelegateRecordType.readDelegateGroup,
+ "calendar-proxy-write": DelegateRecordType.writeDelegateGroup,
+ "calendar-proxy-read-for": DelegateRecordType.readDelegatorGroup,
+ "calendar-proxy-write-for": DelegateRecordType.writeDelegatorGroup,
+ }.get(self.proxyType)
+
+
@inlineCallbacks
def _directGroupMembers(self):
- # Get member UIDs from database and map to principal resources
- members = yield self._index().getMembers(self.uid)
- found = []
- for uid in members:
- p = self.pcollection.principalForUID(uid)
- if p:
- # Only principals enabledForLogin can be a delegate
- # (and groups as well)
- if (p.record.enabledForLogin or
- p.record.recordType == p.record.service.recordType_groups):
- found.append(p)
- # Make sure any outstanding deletion timer entries for
- # existing principals are removed
- yield self._index().refreshPrincipal(uid)
- else:
- self.log.warn("Delegate is missing from directory: %s" % (uid,))
+ """
+ Fault in the record representing the sub principal for this proxy type
+ (either read-only or read-write), then fault in the direct members of
+ that record.
+ """
+ memberPrincipals = []
+ record = yield self.parent.record.service.recordWithShortName(
+ self._recordTypeFromProxyType(),
+ self.parent.principalUID()
+ )
+ if record is not None:
+ memberRecords = yield record.members()
+ for record in memberRecords:
+ if record is not None:
+ principal = yield self.pcollection.principalForRecord(
+ record
+ )
+ if principal is not None:
+ if (
+ principal.record.loginAllowed or
+ principal.record.recordType is BaseRecordType.group
+ ):
+ memberPrincipals.append(principal)
+ returnValue(memberPrincipals)
- returnValue(found)
-
def groupMembers(self):
- return self._expandMemberUIDs()
+ return self._expandMemberPrincipals()
@inlineCallbacks
@@ -410,18 +447,12 @@
Return the complete, flattened set of principals belonging to this
group.
"""
- returnValue((yield self._expandMemberUIDs(infinity=True)))
+ returnValue((yield self._expandMemberPrincipals(infinity=True)))
def groupMemberships(self):
- # Get membership UIDs and map to principal resources
- d = self._index().getMemberships(self.uid)
- d.addCallback(lambda memberships: [
- p for p
- in [self.pcollection.principalForUID(uid) for uid in memberships]
- if p
- ])
- return d
+ # Unlikely to ever want to put a subprincipal into a group
+ return succeed([])
@inlineCallbacks
@@ -437,7 +468,7 @@
@return: True if principal is a proxy (of the correct type) of our parent
@rtype: C{boolean}
"""
- readWrite = self.isProxyType(True) # is read-write
+ readWrite = self.isProxyType(True) # is read-write
if principal and self.parent in (yield principal.proxyFor(readWrite)):
returnValue(True)
returnValue(False)
@@ -630,7 +661,7 @@
overdue = yield self._memcacher.checkDeletionTimer(principalUID)
- if overdue == False:
+ if overdue is False:
# Do nothing
returnValue(None)
@@ -855,9 +886,9 @@
)
if alreadyDone is None:
for (groupname, member) in (
- (yield self._db_all_values_for_sql(
- "select GROUPNAME, MEMBER from GROUPS"))
- ):
+ (yield self._db_all_values_for_sql(
+ "select GROUPNAME, MEMBER from GROUPS"))
+ ):
grouplist = groupname.split("#")
grouplist[0] = normalizeUUID(grouplist[0])
newGroupName = "#".join(grouplist)
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/common.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/common.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/common.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -68,13 +68,14 @@
name = record.uid
if record is None:
- log.debug("No directory record with GUID %r" % (name,))
+ log.debug("No directory record with UID %r" % (name,))
returnValue(None)
- if not getattr(record, self.enabledAttribute):
- log.debug("Directory record %r is not enabled for %s" % (
- record, self.homeResourceTypeName))
- returnValue(None)
+ # MOVE2WHO
+ # if not getattr(record, self.enabledAttribute):
+ # log.debug("Directory record %r is not enabled for %s" % (
+ # record, self.homeResourceTypeName))
+ # returnValue(None)
assert len(name) > 4, "Directory record has an invalid GUID: %r" % (
name,)
@@ -94,7 +95,7 @@
if name == "":
returnValue((self, ()))
- record = self.directory.recordWithUID(name)
+ record = yield self.directory.recordWithUID(name)
if record:
child = yield self.homeResourceForRecord(record, request)
returnValue((child, segments[1:]))
@@ -149,7 +150,7 @@
if name == "":
returnValue((self, segments[1:]))
- record = self.directory.recordWithShortName(self.recordType, name)
+ record = yield self.directory.recordWithShortName(self.recordType, name)
if record is None:
returnValue(
(NotFoundResource(principalCollections=self._parent.principalCollections()), [])
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/directory-principal-resource.html
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/directory-principal-resource.html 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/directory-principal-resource.html 2014-03-13 20:40:10 UTC (rev 12899)
@@ -11,10 +11,7 @@
GUID: <t:slot name="principalGUID"/>
Record type: <t:slot name="recordType"/>
Short names: <t:slot name="shortNames"/>
-Security Identities: <t:slot name="securityIDs"/>
Full name: <t:slot name="fullName"/>
-First name: <t:slot name="firstName"/>
-Last name: <t:slot name="lastName"/>
Email addresses:
<t:slot name="emailAddresses" />Principal UID: <t:slot name="principalUID"/>
Principal URL: <t:slot name="principalURL"/>
Deleted: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/directory.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/directory.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/directory.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1,1509 +0,0 @@
-# -*- test-case-name: twistedcaldav.directory.test -*-
-##
-# Copyright (c) 2006-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-
-"""
-Generic directory service classes.
-"""
-
-__all__ = [
- "DirectoryService",
- "DirectoryRecord",
- "DirectoryError",
- "DirectoryConfigurationError",
- "UnknownRecordTypeError",
- "GroupMembershipCacheUpdater",
-]
-
-from plistlib import readPlistFromString
-
-from twext.python.log import Logger
-from txweb2.dav.auth import IPrincipalCredentials
-from txweb2.dav.util import joinURL
-
-from twisted.cred.checkers import ICredentialsChecker
-from twisted.cred.error import UnauthorizedLogin
-from twisted.internet.defer import succeed, inlineCallbacks, returnValue
-from twisted.python.filepath import FilePath
-
-from twistedcaldav.config import config
-from twistedcaldav.directory.idirectory import IDirectoryService, IDirectoryRecord
-from twistedcaldav.directory.util import uuidFromName, normalizeUUID
-from twistedcaldav.memcacher import Memcacher
-from txdav.caldav.datastore.scheduling.cuaddress import normalizeCUAddr
-from txdav.caldav.datastore.scheduling.ischedule.localservers import Servers
-
-from txdav.caldav.icalendardirectoryservice import ICalendarStoreDirectoryService, \
- ICalendarStoreDirectoryRecord
-
-from xml.parsers.expat import ExpatError
-
-from zope.interface import implements
-
-import cPickle as pickle
-import datetime
-import grp
-import itertools
-import os
-import pwd
-import sys
-import types
-from urllib import unquote
-
-log = Logger()
-
-
-class DirectoryService(object):
- implements(IDirectoryService, ICalendarStoreDirectoryService, ICredentialsChecker)
-
- log = Logger()
-
- ##
- # IDirectoryService
- ##
-
- realmName = None
-
- recordType_users = "users"
- recordType_people = "people"
- recordType_groups = "groups"
- recordType_locations = "locations"
- recordType_resources = "resources"
- recordType_addresses = "addresses"
-
- searchContext_location = "location"
- searchContext_resource = "resource"
- searchContext_user = "user"
- searchContext_group = "group"
- searchContext_attendee = "attendee"
-
- aggregateService = None
-
- def _generatedGUID(self):
- if not hasattr(self, "_guid"):
- realmName = self.realmName
-
- assert self.baseGUID, "Class %s must provide a baseGUID attribute" % (self.__class__.__name__,)
-
- if realmName is None:
- self.log.error("Directory service %s has no realm name or GUID; generated service GUID will not be unique." % (self,))
- realmName = ""
- else:
- self.log.info("Directory service %s has no GUID; generating service GUID from realm name." % (self,))
-
- self._guid = uuidFromName(self.baseGUID, realmName)
-
- return self._guid
-
- baseGUID = None
- guid = property(_generatedGUID)
-
- # Needed by twistedcaldav.directorybackedaddressbook
- liveQuery = False
-
- def setRealm(self, realmName):
- self.realmName = realmName
-
-
- def available(self):
- """
- By default, the directory is available. This may return a boolean or a
- Deferred which fires a boolean.
-
- A return value of "False" means that the directory is currently
- unavailable due to the service starting up.
- """
- return True
- # end directorybackedaddressbook requirements
-
- ##
- # ICredentialsChecker
- ##
-
- # For ICredentialsChecker
- credentialInterfaces = (IPrincipalCredentials,)
-
- def requestAvatarId(self, credentials):
- credentials = IPrincipalCredentials(credentials)
-
- # FIXME: ?
- # We were checking if principal is enabled; seems unnecessary in current
- # implementation because you shouldn't have a principal object for a
- # disabled directory principal.
-
- if credentials.authnPrincipal is None:
- raise UnauthorizedLogin("No such user: %s" % (credentials.credentials.username,))
-
- # See if record is enabledForLogin
- if not credentials.authnPrincipal.record.isLoginEnabled():
- raise UnauthorizedLogin("User not allowed to log in: %s" %
- (credentials.credentials.username,))
-
- # Handle Kerberos as a separate behavior
- try:
- from twistedcaldav.authkerb import NegotiateCredentials
- except ImportError:
- NegotiateCredentials = None
-
- if NegotiateCredentials and isinstance(credentials.credentials,
- NegotiateCredentials):
- # If we get here with Kerberos, then authentication has already succeeded
- return (
- credentials.authnPrincipal.principalURL(),
- credentials.authzPrincipal.principalURL(),
- credentials.authnPrincipal,
- credentials.authzPrincipal,
- )
- else:
- if credentials.authnPrincipal.record.verifyCredentials(credentials.credentials):
- return (
- credentials.authnPrincipal.principalURL(),
- credentials.authzPrincipal.principalURL(),
- credentials.authnPrincipal,
- credentials.authzPrincipal,
- )
- else:
- raise UnauthorizedLogin("Incorrect credentials for %s" % (credentials.credentials.username,))
-
-
- def recordTypes(self):
- raise NotImplementedError("Subclass must implement recordTypes()")
-
-
- def listRecords(self, recordType):
- raise NotImplementedError("Subclass must implement listRecords()")
-
-
- def recordWithShortName(self, recordType, shortName):
- for record in self.listRecords(recordType):
- if shortName in record.shortNames:
- return record
- return None
-
-
- def recordWithUID(self, uid):
- uid = normalizeUUID(uid)
- for record in self.allRecords():
- if record.uid == uid:
- return record
- return None
-
-
- def recordWithGUID(self, guid):
- guid = normalizeUUID(guid)
- for record in self.allRecords():
- if record.guid == guid:
- return record
- return None
-
-
- def recordWithAuthID(self, authID):
- for record in self.allRecords():
- if authID in record.authIDs:
- return record
- return None
-
-
- def recordWithCalendarUserAddress(self, address):
- address = normalizeCUAddr(address)
- record = None
- if address.startswith("urn:uuid:"):
- guid = address[9:]
- record = self.recordWithGUID(guid)
- elif address.startswith("mailto:"):
- for record in self.allRecords():
- if address[7:] in record.emailAddresses:
- break
- else:
- return None
- elif address.startswith("/principals/"):
- parts = map(unquote, address.split("/"))
- if len(parts) == 4:
- if parts[2] == "__uids__":
- guid = parts[3]
- record = self.recordWithGUID(guid)
- else:
- record = self.recordWithShortName(parts[2], parts[3])
-
- return record if record and record.enabledForCalendaring else None
-
-
- def recordWithCachedGroupsAlias(self, recordType, alias):
- """
- @param recordType: the type of the record to look up.
- @param alias: the cached-groups alias of the record to look up.
- @type alias: C{str}
-
- @return: a deferred L{IDirectoryRecord} with the given cached-groups
- alias, or C{None} if no such record is found.
- """
- # The default implementation uses guid
- return succeed(self.recordWithGUID(alias))
-
-
- def allRecords(self):
- for recordType in self.recordTypes():
- for record in self.listRecords(recordType):
- yield record
-
-
- def recordsMatchingFieldsWithCUType(self, fields, operand="or",
- cuType=None):
- if cuType:
- recordType = DirectoryRecord.fromCUType(cuType)
- else:
- recordType = None
-
- return self.recordsMatchingFields(fields, operand=operand,
- recordType=recordType)
-
-
- def recordTypesForSearchContext(self, context):
- """
- Map calendarserver-principal-search REPORT context value to applicable record types
-
- @param context: The context value to map
- @type context: C{str}
- @returns: The list of record types the context maps to
- @rtype: C{list} of C{str}
- """
- if context == self.searchContext_location:
- recordTypes = [self.recordType_locations]
- elif context == self.searchContext_resource:
- recordTypes = [self.recordType_resources]
- elif context == self.searchContext_user:
- recordTypes = [self.recordType_users]
- elif context == self.searchContext_group:
- recordTypes = [self.recordType_groups]
- elif context == self.searchContext_attendee:
- recordTypes = [self.recordType_users, self.recordType_groups,
- self.recordType_resources]
- else:
- recordTypes = list(self.recordTypes())
- return recordTypes
-
-
- def recordsMatchingTokens(self, tokens, context=None):
- """
- @param tokens: The tokens to search on
- @type tokens: C{list} of C{str} (utf-8 bytes)
- @param context: An indication of what the end user is searching
- for; "attendee", "location", or None
- @type context: C{str}
- @return: a deferred sequence of L{IDirectoryRecord}s which
- match the given tokens and optional context.
-
- Each token is searched for within each record's full name and
- email address; if each token is found within a record that
- record is returned in the results.
-
- If context is None, all record types are considered. If
- context is "location", only locations are considered. If
- context is "attendee", only users, groups, and resources
- are considered.
- """
-
- # Default, bruteforce method; override with one optimized for each
- # service
-
- def fieldMatches(fieldValue, value):
- if fieldValue is None:
- return False
- elif type(fieldValue) in types.StringTypes:
- fieldValue = (fieldValue,)
-
- for testValue in fieldValue:
- testValue = testValue.lower()
- value = value.lower()
-
- try:
- testValue.index(value)
- return True
- except ValueError:
- pass
-
- return False
-
- def recordMatches(record):
- for token in tokens:
- for fieldName in ["fullName", "emailAddresses"]:
- try:
- fieldValue = getattr(record, fieldName)
- if fieldMatches(fieldValue, token):
- break
- except AttributeError:
- # No value
- pass
- else:
- return False
- return True
-
-
- def yieldMatches(recordTypes):
- try:
- for recordType in [r for r in recordTypes if r in self.recordTypes()]:
- for record in self.listRecords(recordType):
- if recordMatches(record):
- yield record
-
- except UnknownRecordTypeError:
- # Skip this service since it doesn't understand this record type
- pass
-
- recordTypes = self.recordTypesForSearchContext(context)
- return succeed(yieldMatches(recordTypes))
-
-
- def recordsMatchingFields(self, fields, operand="or", recordType=None):
- # Default, bruteforce method; override with one optimized for each
- # service
-
- def fieldMatches(fieldValue, value, caseless, matchType):
- if fieldValue is None:
- return False
- elif type(fieldValue) in types.StringTypes:
- fieldValue = (fieldValue,)
-
- for testValue in fieldValue:
- if caseless:
- testValue = testValue.lower()
- value = value.lower()
-
- if matchType == 'starts-with':
- if testValue.startswith(value):
- return True
- elif matchType == 'contains':
- try:
- testValue.index(value)
- return True
- except ValueError:
- pass
- else: # exact
- if testValue == value:
- return True
-
- return False
-
- def recordMatches(record):
- if operand == "and":
- for fieldName, value, caseless, matchType in fields:
- try:
- fieldValue = getattr(record, fieldName)
- if not fieldMatches(fieldValue, value, caseless,
- matchType):
- return False
- except AttributeError:
- # No property => no match
- return False
- # we hit on every property
- return True
- else: # "or"
- for fieldName, value, caseless, matchType in fields:
- try:
- fieldValue = getattr(record, fieldName)
- if fieldMatches(fieldValue, value, caseless,
- matchType):
- return True
- except AttributeError:
- # No value
- pass
- # we didn't hit any
- return False
-
- def yieldMatches(recordType):
- try:
- if recordType is None:
- recordTypes = list(self.recordTypes())
- else:
- recordTypes = (recordType,)
-
- for recordType in recordTypes:
- for record in self.listRecords(recordType):
- if recordMatches(record):
- yield record
-
- except UnknownRecordTypeError:
- # Skip this service since it doesn't understand this record type
- pass
-
- return succeed(yieldMatches(recordType))
-
-
- def getGroups(self, guids):
- """
- This implementation returns all groups, not just the ones specified
- by guids
- """
- return succeed(self.listRecords(self.recordType_groups))
-
-
- def getResourceInfo(self):
- return ()
-
-
- def isAvailable(self):
- return True
-
-
- def getParams(self, params, defaults, ignore=None):
- """ Checks configuration parameters for unexpected/ignored keys, and
- applies default values. """
-
- keys = set(params.keys())
-
- result = {}
- for key in defaults.iterkeys():
- if key in params:
- result[key] = params[key]
- keys.remove(key)
- else:
- result[key] = defaults[key]
-
- if ignore:
- for key in ignore:
- if key in params:
- self.log.warn("Ignoring obsolete directory service parameter: %s" % (key,))
- keys.remove(key)
-
- if keys:
- raise DirectoryConfigurationError("Invalid directory service parameter(s): %s" % (", ".join(list(keys)),))
- return result
-
-
- def parseResourceInfo(self, plist, guid, recordType, shortname):
- """
- Parse ResourceInfo plist and extract information that the server needs.
-
- @param plist: the plist that is the attribute value.
- @type plist: str
- @param guid: the directory GUID of the record being parsed.
- @type guid: str
- @param shortname: the record shortname of the record being parsed.
- @type shortname: str
- @return: a C{tuple} of C{bool} for auto-accept, C{str} for proxy GUID, C{str} for read-only proxy GUID.
- """
- try:
- plist = readPlistFromString(plist)
- wpframework = plist.get("com.apple.WhitePagesFramework", {})
- autoaccept = wpframework.get("AutoAcceptsInvitation", False)
- proxy = wpframework.get("CalendaringDelegate", None)
- read_only_proxy = wpframework.get("ReadOnlyCalendaringDelegate", None)
- autoAcceptGroup = wpframework.get("AutoAcceptGroup", "")
- except (ExpatError, AttributeError), e:
- self.log.error(
- "Failed to parse ResourceInfo attribute of record (%s)%s (guid=%s): %s\n%s" %
- (recordType, shortname, guid, e, plist,)
- )
- raise ValueError("Invalid ResourceInfo")
-
- return (autoaccept, proxy, read_only_proxy, autoAcceptGroup)
-
-
- def getExternalProxyAssignments(self):
- """
- Retrieve proxy assignments for locations and resources from the
- directory and return a list of (principalUID, ([memberUIDs)) tuples,
- suitable for passing to proxyDB.setGroupMembers( )
-
- This generic implementation fetches all locations and resources.
- More specialized implementations can perform whatever operation is
- most efficient for their particular directory service.
- """
- assignments = []
-
- resources = itertools.chain(
- self.listRecords(self.recordType_locations),
- self.listRecords(self.recordType_resources)
- )
- for record in resources:
- guid = record.guid
- if record.enabledForCalendaring:
- assignments.append(("%s#calendar-proxy-write" % (guid,),
- record.externalProxies()))
- assignments.append(("%s#calendar-proxy-read" % (guid,),
- record.externalReadOnlyProxies()))
-
- return assignments
-
-
- def createRecord(self, recordType, guid=None, shortNames=(), authIDs=set(),
- fullName=None, firstName=None, lastName=None, emailAddresses=set(),
- uid=None, password=None, **kwargs):
- """
- Create/persist a directory record based on the given values
- """
- raise NotImplementedError("Subclass must implement createRecord")
-
-
- def updateRecord(self, recordType, guid=None, shortNames=(), authIDs=set(),
- fullName=None, firstName=None, lastName=None, emailAddresses=set(),
- uid=None, password=None, **kwargs):
- """
- Update/persist a directory record based on the given values
- """
- raise NotImplementedError("Subclass must implement updateRecord")
-
-
- def destroyRecord(self, recordType, guid=None):
- """
- Remove a directory record from the directory
- """
- raise NotImplementedError("Subclass must implement destroyRecord")
-
-
- def createRecords(self, data):
- """
- Create directory records in bulk
- """
- raise NotImplementedError("Subclass must implement createRecords")
-
-
- def setPrincipalCollection(self, principalCollection):
- """
- Set the principal service that the directory relies on for doing proxy tests.
-
- @param principalService: the principal service.
- @type principalService: L{DirectoryProvisioningResource}
- """
- self.principalCollection = principalCollection
-
-
- def isProxyFor(self, test, other):
- """
- Test whether one record is a calendar user proxy for the specified record.
-
- @param test: record to test
- @type test: L{DirectoryRecord}
- @param other: record to check against
- @type other: L{DirectoryRecord}
-
- @return: C{True} if test is a proxy of other.
- @rtype: C{bool}
- """
- return self.principalCollection.isProxyFor(test, other)
-
-
-
-class GroupMembershipCache(Memcacher):
- """
- Caches group membership information
-
- This cache is periodically updated by a side car so that worker processes
- never have to ask the directory service directly for group membership
- information.
-
- Keys in this cache are:
-
- "groups-for:<GUID>" : comma-separated list of groups that GUID is a member
- of. Note that when using LDAP, the key for this is an LDAP DN.
-
- "group-cacher-populated" : contains a datestamp indicating the most recent
- population.
- """
- log = Logger()
-
- def __init__(self, namespace, pickle=True, no_invalidation=False,
- key_normalization=True, expireSeconds=0, lockSeconds=60):
-
- super(GroupMembershipCache, self).__init__(namespace, pickle=pickle,
- no_invalidation=no_invalidation,
- key_normalization=key_normalization)
-
- self.expireSeconds = expireSeconds
- self.lockSeconds = lockSeconds
-
-
- def setGroupsFor(self, guid, memberships):
- self.log.debug("set groups-for %s : %s" % (guid, memberships))
- return self.set("groups-for:%s" %
- (str(guid)), memberships,
- expireTime=self.expireSeconds)
-
-
- def getGroupsFor(self, guid):
- self.log.debug("get groups-for %s" % (guid,))
- def _value(value):
- if value:
- return value
- else:
- return set()
- d = self.get("groups-for:%s" % (str(guid),))
- d.addCallback(_value)
- return d
-
-
- def deleteGroupsFor(self, guid):
- self.log.debug("delete groups-for %s" % (guid,))
- return self.delete("groups-for:%s" % (str(guid),))
-
-
- def setPopulatedMarker(self):
- self.log.debug("set group-cacher-populated")
- return self.set("group-cacher-populated", str(datetime.datetime.now()))
-
-
- @inlineCallbacks
- def isPopulated(self):
- self.log.debug("is group-cacher-populated")
- value = (yield self.get("group-cacher-populated"))
- returnValue(value is not None)
-
-
- def acquireLock(self):
- """
- Acquire a memcached lock named group-cacher-lock
-
- return: Deferred firing True if successful, False if someone already has
- the lock
- """
- self.log.debug("add group-cacher-lock")
- return self.add("group-cacher-lock", "1", expireTime=self.lockSeconds)
-
-
- def extendLock(self):
- """
- Update the expiration time of the memcached lock
- Return: Deferred firing True if successful, False otherwise
- """
- self.log.debug("extend group-cacher-lock")
- return self.set("group-cacher-lock", "1", expireTime=self.lockSeconds)
-
-
- def releaseLock(self):
- """
- Release the memcached lock
- Return: Deferred firing True if successful, False otherwise
- """
- self.log.debug("delete group-cacher-lock")
- return self.delete("group-cacher-lock")
-
-
-
-class GroupMembershipCacheUpdater(object):
- """
- Responsible for updating memcached with group memberships. This will run
- in a sidecar. There are two sources of proxy data to pull from: the local
- proxy database, and the location/resource info in the directory system.
- """
- log = Logger()
-
- def __init__(self, proxyDB, directory, updateSeconds, expireSeconds,
- lockSeconds, cache=None, namespace=None, useExternalProxies=False,
- externalProxiesSource=None):
- self.proxyDB = proxyDB
- self.directory = directory
- self.updateSeconds = updateSeconds
- self.useExternalProxies = useExternalProxies
- if useExternalProxies and externalProxiesSource is None:
- externalProxiesSource = self.directory.getExternalProxyAssignments
- self.externalProxiesSource = externalProxiesSource
-
- if cache is None:
- assert namespace is not None, "namespace must be specified if GroupMembershipCache is not provided"
- cache = GroupMembershipCache(namespace, expireSeconds=expireSeconds,
- lockSeconds=lockSeconds)
- self.cache = cache
-
-
- @inlineCallbacks
- def getGroups(self, guids=None):
- """
- Retrieve all groups and their member info (but don't actually fault in
- the records of the members), and return two dictionaries. The first
- contains group records; the keys for this dictionary are the identifiers
- used by the directory service to specify members. In OpenDirectory
- these would be guids, but in LDAP these could be DNs, or some other
- attribute. This attribute can be retrieved from a record using
- record.cachedGroupsAlias().
- The second dictionary returned maps that member attribute back to the
- corresponding guid. These dictionaries are used to reverse-index the
- groups that users are in by expandedMembers().
-
- @param guids: if provided, retrieve only the groups corresponding to
- these guids (including their sub groups)
- @type guids: list of guid strings
- """
- groups = {}
- aliases = {}
-
- if guids is None: # get all group guids
- records = self.directory.listRecords(self.directory.recordType_groups)
- else: # get only the ones we know have been delegated to
- records = (yield self.directory.getGroups(guids))
-
- for record in records:
- alias = record.cachedGroupsAlias()
- groups[alias] = record.memberGUIDs()
- aliases[record.guid] = alias
-
- returnValue((groups, aliases))
-
-
- def expandedMembers(self, groups, guid, members=None, seen=None):
- """
- Return the complete, flattened set of members of a group, including
- all sub-groups, based on the group hierarchy described in the
- groups dictionary.
- """
- if members is None:
- members = set()
- if seen is None:
- seen = set()
-
- if guid not in seen:
- seen.add(guid)
- for member in groups[guid]:
- members.add(member)
- if member in groups: # it's a group then
- self.expandedMembers(groups, member, members=members,
- seen=seen)
- return members
-
-
- @inlineCallbacks
- def updateCache(self, fast=False):
- """
- Iterate the proxy database to retrieve all the principals who have been
- delegated to. Fault these principals in. For any of these principals
- that are groups, expand the members of that group and store those in
- the cache
-
- If fast=True, we're in quick-start mode, used only by the master process
- to start servicing requests as soon as possible. In this mode we look
- for DataRoot/memberships_cache which is a pickle of a dictionary whose
- keys are guids (except when using LDAP where the keys will be DNs), and
- the values are lists of group guids. If the cache file does not exist
- we switch to fast=False.
-
- The return value is mainly used for unit tests; it's a tuple containing
- the (possibly modified) value for fast, and the number of members loaded
- into the cache (which can be zero if fast=True and isPopulated(), or
- fast=False and the cache is locked by someone else).
-
- The pickled snapshot file is a dict whose keys represent a record and
- the values are the guids of the groups that record is a member of. The
- keys are normally guids except in the case of a directory system like LDAP
- where there can be a different attribute used for referring to members,
- such as a DN.
- """
-
- # TODO: add memcached eviction protection
-
- useLock = True
-
- # See if anyone has completely populated the group membership cache
- isPopulated = (yield self.cache.isPopulated())
-
- if fast:
- # We're in quick-start mode. Check first to see if someone has
- # populated the membership cache, and if so, return immediately
- if isPopulated:
- self.log.info("Group membership cache is already populated")
- returnValue((fast, 0, 0))
-
- # We don't care what others are doing right now, we need to update
- useLock = False
-
- self.log.info("Updating group membership cache")
-
- dataRoot = FilePath(config.DataRoot)
- membershipsCacheFile = dataRoot.child("memberships_cache")
- extProxyCacheFile = dataRoot.child("external_proxy_cache")
-
- if not membershipsCacheFile.exists():
- self.log.info("Group membership snapshot file does not yet exist")
- fast = False
- previousMembers = {}
- callGroupsChanged = False
- else:
- self.log.info("Group membership snapshot file exists: %s" %
- (membershipsCacheFile.path,))
- callGroupsChanged = True
- try:
- previousMembers = pickle.loads(membershipsCacheFile.getContent())
- except:
- self.log.warn("Could not parse snapshot; will regenerate cache")
- fast = False
- previousMembers = {}
- callGroupsChanged = False
-
- if useLock:
- self.log.info("Attempting to acquire group membership cache lock")
- acquiredLock = (yield self.cache.acquireLock())
- if not acquiredLock:
- self.log.info("Group membership cache lock held by another process")
- returnValue((fast, 0, 0))
- self.log.info("Acquired lock")
-
- if not fast and self.useExternalProxies:
-
- # Load in cached copy of external proxies so we can diff against them
- previousAssignments = []
- if extProxyCacheFile.exists():
- self.log.info("External proxies snapshot file exists: %s" %
- (extProxyCacheFile.path,))
- try:
- previousAssignments = pickle.loads(extProxyCacheFile.getContent())
- except:
- self.log.warn("Could not parse external proxies snapshot")
- previousAssignments = []
-
- if useLock:
- yield self.cache.extendLock()
-
- self.log.info("Retrieving proxy assignments from directory")
- assignments = self.externalProxiesSource()
- self.log.info("%d proxy assignments retrieved from directory" %
- (len(assignments),))
-
- if useLock:
- yield self.cache.extendLock()
-
- changed, removed = diffAssignments(previousAssignments, assignments)
- # changed is the list of proxy assignments (either new or updates).
- # removed is the list of principals who used to have an external
- # delegate but don't anymore.
-
- # populate proxy DB from external resource info
- if changed:
- self.log.info("Updating proxy assignments")
- assignmentCount = 0
- totalNumAssignments = len(changed)
- currentAssignmentNum = 0
- for principalUID, members in changed:
- currentAssignmentNum += 1
- if currentAssignmentNum % 1000 == 0:
- self.log.info("...proxy assignment %d of %d" % (currentAssignmentNum,
- totalNumAssignments))
- try:
- current = (yield self.proxyDB.getMembers(principalUID))
- if members != current:
- assignmentCount += 1
- yield self.proxyDB.setGroupMembers(principalUID, members)
- except Exception, e:
- self.log.error("Unable to update proxy assignment: principal=%s, members=%s, error=%s" % (principalUID, members, e))
- self.log.info("Updated %d assignment%s in proxy database" %
- (assignmentCount, "" if assignmentCount == 1 else "s"))
-
- if removed:
- self.log.info("Deleting proxy assignments")
- assignmentCount = 0
- totalNumAssignments = len(removed)
- currentAssignmentNum = 0
- for principalUID in removed:
- currentAssignmentNum += 1
- if currentAssignmentNum % 1000 == 0:
- self.log.info("...proxy assignment %d of %d" % (currentAssignmentNum,
- totalNumAssignments))
- try:
- assignmentCount += 1
- yield self.proxyDB.setGroupMembers(principalUID, [])
- except Exception, e:
- self.log.error("Unable to remove proxy assignment: principal=%s, members=%s, error=%s" % (principalUID, members, e))
- self.log.info("Removed %d assignment%s from proxy database" %
- (assignmentCount, "" if assignmentCount == 1 else "s"))
-
- # Store external proxy snapshot
- self.log.info("Taking snapshot of external proxies to %s" %
- (extProxyCacheFile.path,))
- extProxyCacheFile.setContent(pickle.dumps(assignments))
-
- if fast:
- # If there is an on-disk snapshot of the membership information,
- # load that and put into memcached, bypassing the faulting in of
- # any records, so that the server can start up quickly.
-
- self.log.info("Loading group memberships from snapshot")
- members = pickle.loads(membershipsCacheFile.getContent())
-
- else:
- # Fetch the group hierarchy from the directory, fetch the list
- # of delegated-to guids, intersect those and build a dictionary
- # containing which delegated-to groups a user is a member of
-
- self.log.info("Retrieving list of all proxies")
- # This is always a set of guids:
- delegatedGUIDs = set((yield self.proxyDB.getAllMembers()))
- self.log.info("There are %d proxies" % (len(delegatedGUIDs),))
- self.log.info("Retrieving group hierarchy from directory")
-
- # "groups" maps a group to its members; the keys and values consist
- # of whatever directory attribute is used to refer to members. The
- # attribute value comes from record.cachedGroupsAlias().
- # "aliases" maps the record.cachedGroupsAlias() value for a group
- # back to the group's guid.
- groups, aliases = (yield self.getGroups(guids=delegatedGUIDs))
- groupGUIDs = set(aliases.keys())
- self.log.info("%d groups retrieved from the directory" %
- (len(groupGUIDs),))
-
- delegatedGUIDs = delegatedGUIDs.intersection(groupGUIDs)
- self.log.info("%d groups are proxies" % (len(delegatedGUIDs),))
-
- # Reverse index the group membership from cache
- members = {}
- for groupGUID in delegatedGUIDs:
- groupMembers = self.expandedMembers(groups, aliases[groupGUID])
- # groupMembers is in cachedGroupsAlias() format
- for member in groupMembers:
- memberships = members.setdefault(member, set())
- memberships.add(groupGUID)
-
- self.log.info("There are %d users delegated-to via groups" %
- (len(members),))
-
- # Store snapshot
- self.log.info("Taking snapshot of group memberships to %s" %
- (membershipsCacheFile.path,))
- membershipsCacheFile.setContent(pickle.dumps(members))
-
- # Update ownership
- uid = gid = -1
- if config.UserName:
- uid = pwd.getpwnam(config.UserName).pw_uid
- if config.GroupName:
- gid = grp.getgrnam(config.GroupName).gr_gid
- os.chown(membershipsCacheFile.path, uid, gid)
- if extProxyCacheFile.exists():
- os.chown(extProxyCacheFile.path, uid, gid)
-
- self.log.info("Storing %d group memberships in memcached" %
- (len(members),))
- changedMembers = set()
- totalNumMembers = len(members)
- currentMemberNum = 0
- for member, groups in members.iteritems():
- currentMemberNum += 1
- if currentMemberNum % 1000 == 0:
- self.log.info("...membership %d of %d" % (currentMemberNum,
- totalNumMembers))
- # self.log.debug("%s is in %s" % (member, groups))
- yield self.cache.setGroupsFor(member, groups)
- if groups != previousMembers.get(member, None):
- # This principal has had a change in group membership
- # so invalidate the PROPFIND response cache
- changedMembers.add(member)
- try:
- # Remove from previousMembers; anything still left in
- # previousMembers when this loop is done will be
- # deleted from cache (since only members that were
- # previously in delegated-to groups but are no longer
- # would still be in previousMembers)
- del previousMembers[member]
- except KeyError:
- pass
-
- # Remove entries for principals that no longer are in delegated-to
- # groups
- for member, groups in previousMembers.iteritems():
- yield self.cache.deleteGroupsFor(member)
- changedMembers.add(member)
-
- # For principals whose group membership has changed, call groupsChanged()
- if callGroupsChanged and not fast and hasattr(self.directory, "principalCollection"):
- for member in changedMembers:
- record = yield self.directory.recordWithCachedGroupsAlias(
- self.directory.recordType_users, member)
- if record is not None:
- principal = self.directory.principalCollection.principalForRecord(record)
- if principal is not None:
- self.log.debug("Group membership changed for %s (%s)" %
- (record.shortNames[0], record.guid,))
- if hasattr(principal, "groupsChanged"):
- yield principal.groupsChanged()
-
- yield self.cache.setPopulatedMarker()
-
- if useLock:
- self.log.info("Releasing lock")
- yield self.cache.releaseLock()
-
- self.log.info("Group memberships cache updated")
-
- returnValue((fast, len(members), len(changedMembers)))
-
-
-
-def diffAssignments(old, new):
- """
- Compare two proxy assignment lists and return their differences in the form of
- two lists -- one for added/updated assignments, and one for removed assignments.
- @param old: list of (group, set(members)) tuples
- @type old: C{list}
- @param new: list of (group, set(members)) tuples
- @type new: C{list}
- @return: Tuple of two lists; the first list contains tuples of (proxy-principal,
- set(members)), and represents all the new or updated assignments. The
- second list contains all the proxy-principals which used to have a delegate
- but don't anymore.
- """
- old = dict(old)
- new = dict(new)
- changed = []
- removed = []
- for key in old.iterkeys():
- if key not in new:
- removed.append(key)
- else:
- if old[key] != new[key]:
- changed.append((key, new[key]))
- for key in new.iterkeys():
- if key not in old:
- changed.append((key, new[key]))
- return changed, removed
-
-
-
-class DirectoryRecord(object):
- log = Logger()
-
- implements(IDirectoryRecord, ICalendarStoreDirectoryRecord)
-
- def __repr__(self):
- return "<%s[%s@%s(%s)] %s(%s) %r @ %s>" % (
- self.__class__.__name__,
- self.recordType,
- self.service.guid,
- self.service.realmName,
- self.guid,
- ",".join(self.shortNames),
- self.fullName,
- self.serverURI(),
- )
-
-
- def __init__(
- self, service, recordType, guid=None,
- shortNames=(), authIDs=set(), fullName=None,
- firstName=None, lastName=None, emailAddresses=set(),
- calendarUserAddresses=set(),
- autoSchedule=False, autoScheduleMode=None,
- autoAcceptGroup="",
- enabledForCalendaring=None,
- enabledForAddressBooks=None,
- uid=None,
- enabledForLogin=True,
- extProxies=(), extReadOnlyProxies=(),
- **kwargs
- ):
- assert service.realmName is not None
- assert recordType
- assert shortNames and isinstance(shortNames, tuple)
-
- guid = normalizeUUID(guid)
-
- if uid is None:
- uid = guid
-
- if fullName is None:
- fullName = ""
-
- self.service = service
- self.recordType = recordType
- self.guid = guid
- self.uid = uid
- self.enabled = False
- self.serverID = ""
- self.shortNames = shortNames
- self.authIDs = authIDs
- self.fullName = fullName
- self.firstName = firstName
- self.lastName = lastName
- self.emailAddresses = emailAddresses
- self.enabledForCalendaring = enabledForCalendaring
- self.autoSchedule = autoSchedule
- self.autoScheduleMode = autoScheduleMode
- self.autoAcceptGroup = autoAcceptGroup
- self.enabledForAddressBooks = enabledForAddressBooks
- self.enabledForLogin = enabledForLogin
- self.extProxies = extProxies
- self.extReadOnlyProxies = extReadOnlyProxies
- self.extras = kwargs
-
-
- def get_calendarUserAddresses(self):
- """
- Dynamically construct a calendarUserAddresses attribute which describes
- this L{DirectoryRecord}.
-
- @see: L{IDirectoryRecord.calendarUserAddresses}.
- """
- if not self.enabledForCalendaring:
- return frozenset()
- cuas = set(
- ["mailto:%s" % (emailAddress,)
- for emailAddress in self.emailAddresses]
- )
- if self.guid:
- cuas.add("urn:uuid:%s" % (self.guid,))
- cuas.add(joinURL("/principals", "__uids__", self.guid) + "/")
- for shortName in self.shortNames:
- cuas.add(joinURL("/principals", self.recordType, shortName,) + "/")
-
- return frozenset(cuas)
-
- calendarUserAddresses = property(get_calendarUserAddresses)
-
- def __cmp__(self, other):
- if not isinstance(other, DirectoryRecord):
- return NotImplemented
-
- for attr in ("service", "recordType", "shortNames", "guid"):
- diff = cmp(getattr(self, attr), getattr(other, attr))
- if diff != 0:
- return diff
- return 0
-
-
- def __hash__(self):
- h = hash(self.__class__.__name__)
- for attr in ("service", "recordType", "shortNames", "guid",
- "enabled", "enabledForCalendaring"):
- h = (h + hash(getattr(self, attr))) & sys.maxint
-
- return h
-
-
- def cacheToken(self):
- """
- Generate a token that can be uniquely used to identify the state of this record for use
- in a cache.
- """
- return hash((
- self.__class__.__name__,
- self.service.realmName,
- self.recordType,
- self.shortNames,
- self.guid,
- self.enabled,
- self.enabledForCalendaring,
- ))
-
-
- def addAugmentInformation(self, augment):
-
- if augment:
- self.enabled = augment.enabled
- self.serverID = augment.serverID
- self.enabledForCalendaring = augment.enabledForCalendaring
- self.enabledForAddressBooks = augment.enabledForAddressBooks
- self.autoSchedule = augment.autoSchedule
- self.autoScheduleMode = augment.autoScheduleMode
- self.autoAcceptGroup = augment.autoAcceptGroup
- self.enabledForLogin = augment.enabledForLogin
-
- if (self.enabledForCalendaring or self.enabledForAddressBooks) and self.recordType == self.service.recordType_groups:
- self.enabledForCalendaring = False
- self.enabledForAddressBooks = False
-
- # For augment records cloned from the Default augment record,
- # don't emit this message:
- if not augment.clonedFromDefault:
- self.log.error("Group '%s(%s)' cannot be enabled for calendaring or address books" % (self.guid, self.shortNames[0],))
-
- else:
- # Groups are by default always enabled
- self.enabled = (self.recordType == self.service.recordType_groups)
- self.serverID = ""
- self.enabledForCalendaring = False
- self.enabledForAddressBooks = False
- self.enabledForLogin = False
-
-
- def applySACLs(self):
- """
- Disable calendaring and addressbooks as dictated by SACLs
- """
-
- if config.EnableSACLs and self.CheckSACL:
- username = self.shortNames[0]
- if self.CheckSACL(username, "calendar") != 0:
- self.log.debug("%s is not enabled for calendaring due to SACL"
- % (username,))
- self.enabledForCalendaring = False
- if self.CheckSACL(username, "addressbook") != 0:
- self.log.debug("%s is not enabled for addressbooks due to SACL"
- % (username,))
- self.enabledForAddressBooks = False
-
-
- def displayName(self):
- return self.fullName if self.fullName else self.shortNames[0]
-
-
- def isLoginEnabled(self):
- """
- Returns True if the user should be allowed to log in, based on the
- enabledForLogin attribute, which is currently controlled by the
- DirectoryService implementation.
- """
- return self.enabledForLogin
-
-
- def members(self):
- return ()
-
-
- def expandedMembers(self, members=None, seen=None):
- """
- Return the complete, flattened set of members of a group, including
- all sub-groups.
- """
- if members is None:
- members = set()
- if seen is None:
- seen = set()
-
- if self not in seen:
- seen.add(self)
- for member in self.members():
- members.add(member)
- if member.recordType == self.service.recordType_groups:
- member.expandedMembers(members=members, seen=seen)
-
- return members
-
-
- def groups(self):
- return ()
-
-
- def cachedGroups(self):
- """
- Return the set of groups (guids) this record is a member of, based on
- the data cached by cacheGroupMembership( )
- """
- return self.service.groupMembershipCache.getGroupsFor(self.cachedGroupsAlias())
-
-
- def cachedGroupsAlias(self):
- """
- The GroupMembershipCache uses keys based on this value. Normally it's
- a record's guid but in a directory system like LDAP which can use a
- different attribute to refer to group members, we need to be able to
- look up an entry in the GroupMembershipCache by that attribute.
- Subclasses which don't use record.guid to look up group membership
- should override this method.
- """
- return self.guid
-
-
- def externalProxies(self):
- """
- Return the set of proxies defined in the directory service, as opposed
- to assignments in the proxy DB itself.
- """
- return set(self.extProxies)
-
-
- def externalReadOnlyProxies(self):
- """
- Return the set of read-only proxies defined in the directory service,
- as opposed to assignments in the proxy DB itself.
- """
- return set(self.extReadOnlyProxies)
-
-
- def memberGUIDs(self):
- """
- Return the set of GUIDs that are members of this group
- """
- return set()
-
-
- def verifyCredentials(self, credentials):
- return False
-
-
- def calendarsEnabled(self):
- return config.EnableCalDAV and self.enabledForCalendaring
-
-
- def canonicalCalendarUserAddress(self):
- """
- Return a CUA for this principal, preferring in this order:
- urn:uuid: form
- mailto: form
- first in calendarUserAddresses list
- """
-
- cua = ""
- for candidate in self.calendarUserAddresses:
- # Pick the first one, but urn:uuid: and mailto: can override
- if not cua:
- cua = candidate
- # But always immediately choose the urn:uuid: form
- if candidate.startswith("urn:uuid:"):
- cua = candidate
- break
- # Prefer mailto: if no urn:uuid:
- elif candidate.startswith("mailto:"):
- cua = candidate
- return cua
-
-
- def enabledAsOrganizer(self):
- if self.recordType == DirectoryService.recordType_users:
- return True
- elif self.recordType == DirectoryService.recordType_groups:
- return config.Scheduling.Options.AllowGroupAsOrganizer
- elif self.recordType == DirectoryService.recordType_locations:
- return config.Scheduling.Options.AllowLocationAsOrganizer
- elif self.recordType == DirectoryService.recordType_resources:
- return config.Scheduling.Options.AllowResourceAsOrganizer
- else:
- return False
-
- # Mapping from directory record.recordType to RFC2445 CUTYPE values
- _cuTypes = {
- 'users' : 'INDIVIDUAL',
- 'groups' : 'GROUP',
- 'resources' : 'RESOURCE',
- 'locations' : 'ROOM',
- }
-
- def getCUType(self):
- return self._cuTypes.get(self.recordType, "UNKNOWN")
-
-
- @classmethod
- def fromCUType(cls, cuType):
- for key, val in cls._cuTypes.iteritems():
- if val == cuType:
- return key
- return None
-
-
- def canAutoSchedule(self, organizer):
- if config.Scheduling.Options.AutoSchedule.Enabled:
- if (config.Scheduling.Options.AutoSchedule.Always or
- self.autoSchedule or
- self.autoAcceptFromOrganizer(organizer)):
- if (self.getCUType() != "INDIVIDUAL" or
- config.Scheduling.Options.AutoSchedule.AllowUsers):
- return True
- return False
-
-
- def getAutoScheduleMode(self, organizer):
- autoScheduleMode = self.autoScheduleMode
- if self.autoAcceptFromOrganizer(organizer):
- autoScheduleMode = "automatic"
- return autoScheduleMode
-
-
- def autoAcceptFromOrganizer(self, organizer):
- if organizer is not None and self.autoAcceptGroup is not None:
- service = self.service.aggregateService or self.service
- organizerRecord = service.recordWithCalendarUserAddress(organizer)
- if organizerRecord is not None:
- if organizerRecord.guid in self.autoAcceptMembers():
- return True
- return False
-
-
- def serverURI(self):
- """
- URL of the server hosting this record. Return None if hosted on this server.
- """
- if config.Servers.Enabled and self.serverID:
- return Servers.getServerURIById(self.serverID)
- else:
- return None
-
-
- def server(self):
- """
- Server hosting this record. Return None if hosted on this server.
- """
- if config.Servers.Enabled and self.serverID:
- return Servers.getServerById(self.serverID)
- else:
- return None
-
-
- def thisServer(self):
- s = self.server()
- return s.thisServer if s is not None else True
-
-
- def autoAcceptMembers(self):
- """
- Return the list of GUIDs for which this record will automatically accept
- invites from (assuming no conflicts). This list is based on the group
- assigned to record.autoAcceptGroup. Cache the expanded group membership
- within the record.
-
- @return: the list of members of the autoAcceptGroup, or an empty list if
- not assigned
- @rtype: C{list} of GUID C{str}
- """
- if not hasattr(self, "_cachedAutoAcceptMembers"):
- self._cachedAutoAcceptMembers = []
- if self.autoAcceptGroup:
- service = self.service.aggregateService or self.service
- groupRecord = service.recordWithGUID(self.autoAcceptGroup)
- if groupRecord is not None:
- self._cachedAutoAcceptMembers = [m.guid for m in groupRecord.expandedMembers()]
-
- return self._cachedAutoAcceptMembers
-
-
- def isProxyFor(self, other):
- """
- Test whether the record is a calendar user proxy for the specified record.
-
- @param other: record to test
- @type other: L{DirectoryRecord}
-
- @return: C{True} if it is a proxy.
- @rtype: C{bool}
- """
- return self.service.isProxyFor(self, other)
-
-
-
-class DirectoryError(RuntimeError):
- """
- Generic directory error.
- """
-
-
-
-class DirectoryConfigurationError(DirectoryError):
- """
- Invalid directory configuration.
- """
-
-
-
-class UnknownRecordTypeError(DirectoryError):
- """
- Unknown directory record type.
- """
- def __init__(self, recordType):
- DirectoryError.__init__(self, "Invalid record type: %s" % (recordType,))
- self.recordType = recordType
-
-
-# So CheckSACL will be parameterized
-# We do this after DirectoryRecord is defined
-try:
- from calendarserver.platform.darwin._sacl import CheckSACL
- DirectoryRecord.CheckSACL = CheckSACL
-except ImportError:
- DirectoryRecord.CheckSACL = None
Deleted: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/ldapdirectory.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/ldapdirectory.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/ldapdirectory.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1,2034 +0,0 @@
-##
-# Copyright (c) 2008-2009 Aymeric Augustin. All rights reserved.
-# Copyright (c) 2006-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-"""
-LDAP directory service implementation. Supports principal-property-search
-and restrictToGroup features.
-
-The following attributes from standard schemas are used:
-* Core (RFC 4519):
- . cn | commonName
- . givenName
- . member (if not using NIS groups)
- . ou
- . sn | surname
- . uid | userid (if using NIS groups)
-* COSINE (RFC 4524):
- . mail
-* InetOrgPerson (RFC 2798):
- . displayName (if cn is unavailable)
-* NIS (RFC):
- . gecos (if cn is unavailable)
- . memberUid (if using NIS groups)
-"""
-
-__all__ = [
- "LdapDirectoryService",
-]
-
-import ldap.async
-from ldap.filter import escape_filter_chars as ldapEsc
-
-try:
- # Note: PAM support is currently untested
- import PAM
- pamAvailable = True
-except ImportError:
- pamAvailable = False
-
-import time
-from twisted.cred.credentials import UsernamePassword
-from twistedcaldav.directory.cachingdirectory import (
- CachingDirectoryService, CachingDirectoryRecord
-)
-from twistedcaldav.directory.directory import DirectoryConfigurationError
-from twistedcaldav.directory.augment import AugmentRecord
-from twistedcaldav.directory.util import splitIntoBatches, normalizeUUID
-from twisted.internet.defer import succeed, inlineCallbacks, returnValue
-from twisted.internet.threads import deferToThread
-from twext.python.log import Logger
-from txweb2.http import HTTPError, StatusResponse
-from txweb2 import responsecode
-
-
-
-class LdapDirectoryService(CachingDirectoryService):
- """
- LDAP based implementation of L{IDirectoryService}.
- """
- log = Logger()
-
- baseGUID = "5A871574-0C86-44EE-B11B-B9440C3DC4DD"
-
- def __repr__(self):
- return "<%s %r: %r>" % (
- self.__class__.__name__, self.realmName, self.uri
- )
-
-
- def __init__(self, params):
- """
- @param params: a dictionary containing the following keys:
- cacheTimeout, realmName, uri, tls, tlsCACertFile, tlsCACertDir,
- tlsRequireCert, credentials, rdnSchema, groupSchema, resourceSchema
- poddingSchema
- """
-
- defaults = {
- "augmentService": None,
- "groupMembershipCache": None,
- "cacheTimeout": 1, # Minutes
- "negativeCaching": False,
- "warningThresholdSeconds": 3,
- "batchSize": 500, # for splitting up large queries
- "requestTimeoutSeconds": 10,
- "requestResultsLimit": 200,
- "optimizeMultiName": False,
- "queryLocationsImplicitly": True,
- "restrictEnabledRecords": False,
- "restrictToGroup": "",
- "recordTypes": ("users", "groups"),
- "uri": "ldap://localhost/",
- "tls": False,
- "tlsCACertFile": None,
- "tlsCACertDir": None,
- "tlsRequireCert": None, # never, allow, try, demand, hard
- "credentials": {
- "dn": None,
- "password": None,
- },
- "authMethod": "LDAP",
- "rdnSchema": {
- "base": "dc=example,dc=com",
- "guidAttr": "entryUUID",
- "users": {
- "rdn": "ou=People",
- "filter": None, # additional filter for this type
- "loginEnabledAttr": "", # attribute controlling login
- "loginEnabledValue": "yes", # "True" value of above attribute
- "calendarEnabledAttr": "", # attribute controlling enabledForCalendaring
- "calendarEnabledValue": "yes", # "True" value of above attribute
- "mapping": { # maps internal record names to LDAP
- "recordName": "uid",
- "fullName": "cn",
- "emailAddresses": ["mail"], # multiple LDAP fields supported
- "firstName": "givenName",
- "lastName": "sn",
- },
- },
- "groups": {
- "rdn": "ou=Group",
- "filter": None, # additional filter for this type
- "mapping": { # maps internal record names to LDAP
- "recordName": "cn",
- "fullName": "cn",
- "emailAddresses": ["mail"], # multiple LDAP fields supported
- "firstName": "givenName",
- "lastName": "sn",
- },
- },
- "locations": {
- "rdn": "ou=Places",
- "filter": None, # additional filter for this type
- "calendarEnabledAttr": "", # attribute controlling enabledForCalendaring
- "calendarEnabledValue": "yes", # "True" value of above attribute
- "associatedAddressAttr": "",
- "mapping": { # maps internal record names to LDAP
- "recordName": "cn",
- "fullName": "cn",
- "emailAddresses": ["mail"], # multiple LDAP fields supported
- },
- },
- "resources": {
- "rdn": "ou=Resources",
- "filter": None, # additional filter for this type
- "calendarEnabledAttr": "", # attribute controlling enabledForCalendaring
- "calendarEnabledValue": "yes", # "True" value of above attribute
- "mapping": { # maps internal record names to LDAP
- "recordName": "cn",
- "fullName": "cn",
- "emailAddresses": ["mail"], # multiple LDAP fields supported
- },
- },
- "addresses": {
- "rdn": "ou=Buildings",
- "filter": None, # additional filter for this type
- "streetAddressAttr": "",
- "geoAttr": "",
- "mapping": { # maps internal record names to LDAP
- "recordName": "cn",
- "fullName": "cn",
- },
- },
- },
- "groupSchema": {
- "membersAttr": "member", # how members are specified
- "nestedGroupsAttr": None, # how nested groups are specified
- "memberIdAttr": None, # which attribute the above refer to (None means use DN)
- },
- "resourceSchema": {
- # Either set this attribute to retrieve the plist version
- # of resource-info, as in a Leopard OD server, or...
- "resourceInfoAttr": None,
- # ...set the above to None and instead specify these
- # individually:
- "autoScheduleAttr": None,
- "autoScheduleEnabledValue": "yes",
- "proxyAttr": None, # list of GUIDs
- "readOnlyProxyAttr": None, # list of GUIDs
- "autoAcceptGroupAttr": None, # single group GUID
- },
- "poddingSchema": {
- "serverIdAttr": None, # maps to augments server-id
- },
- }
- ignored = None
- params = self.getParams(params, defaults, ignored)
-
- self._recordTypes = params["recordTypes"]
-
- super(LdapDirectoryService, self).__init__(params["cacheTimeout"],
- params["negativeCaching"])
-
- self.warningThresholdSeconds = params["warningThresholdSeconds"]
- self.batchSize = params["batchSize"]
- self.requestTimeoutSeconds = params["requestTimeoutSeconds"]
- self.requestResultsLimit = params["requestResultsLimit"]
- self.optimizeMultiName = params["optimizeMultiName"]
- if self.batchSize > self.requestResultsLimit:
- self.batchSize = self.requestResultsLimit
- self.queryLocationsImplicitly = params["queryLocationsImplicitly"]
- self.augmentService = params["augmentService"]
- self.groupMembershipCache = params["groupMembershipCache"]
- self.realmName = params["uri"]
- self.uri = params["uri"]
- self.tls = params["tls"]
- self.tlsCACertFile = params["tlsCACertFile"]
- self.tlsCACertDir = params["tlsCACertDir"]
- self.tlsRequireCert = params["tlsRequireCert"]
- self.credentials = params["credentials"]
- self.authMethod = params["authMethod"]
- self.rdnSchema = params["rdnSchema"]
- self.groupSchema = params["groupSchema"]
- self.resourceSchema = params["resourceSchema"]
- self.poddingSchema = params["poddingSchema"]
-
- self.base = ldap.dn.str2dn(self.rdnSchema["base"])
-
- # Certain attributes (such as entryUUID) may be hidden and not
- # returned by default when queried for all attributes. Therefore it is
- # necessary to explicitly pass all the possible attributes list
- # for ldap searches. Dynamically build the attribute list based on
- # config.
- attrSet = set()
-
- if self.rdnSchema["guidAttr"]:
- attrSet.add(self.rdnSchema["guidAttr"])
- for recordType in self.recordTypes():
- if self.rdnSchema[recordType]["attr"]:
- attrSet.add(self.rdnSchema[recordType]["attr"])
- for n in ("calendarEnabledAttr", "associatedAddressAttr",
- "streetAddressAttr", "geoAttr"):
- if self.rdnSchema[recordType].get(n, False):
- attrSet.add(self.rdnSchema[recordType][n])
- for attrList in self.rdnSchema[recordType]["mapping"].values():
- if attrList:
- # Since emailAddresses can map to multiple LDAP fields,
- # support either string or list
- if isinstance(attrList, str):
- attrList = [attrList]
- for attr in attrList:
- attrSet.add(attr)
- # Also put the guidAttr attribute into the mappings for each type
- # so recordsMatchingFields can query on guid
- self.rdnSchema[recordType]["mapping"]["guid"] = self.rdnSchema["guidAttr"]
- # Also put the memberIdAttr attribute into the mappings for each type
- # so recordsMatchingFields can query on memberIdAttr
- self.rdnSchema[recordType]["mapping"]["memberIdAttr"] = self.groupSchema["memberIdAttr"]
- if self.groupSchema["membersAttr"]:
- attrSet.add(self.groupSchema["membersAttr"])
- if self.groupSchema["nestedGroupsAttr"]:
- attrSet.add(self.groupSchema["nestedGroupsAttr"])
- if self.groupSchema["memberIdAttr"]:
- attrSet.add(self.groupSchema["memberIdAttr"])
- if self.rdnSchema["users"]["loginEnabledAttr"]:
- attrSet.add(self.rdnSchema["users"]["loginEnabledAttr"])
- if self.resourceSchema["resourceInfoAttr"]:
- attrSet.add(self.resourceSchema["resourceInfoAttr"])
- if self.resourceSchema["autoScheduleAttr"]:
- attrSet.add(self.resourceSchema["autoScheduleAttr"])
- if self.resourceSchema["autoAcceptGroupAttr"]:
- attrSet.add(self.resourceSchema["autoAcceptGroupAttr"])
- if self.resourceSchema["proxyAttr"]:
- attrSet.add(self.resourceSchema["proxyAttr"])
- if self.resourceSchema["readOnlyProxyAttr"]:
- attrSet.add(self.resourceSchema["readOnlyProxyAttr"])
- if self.poddingSchema["serverIdAttr"]:
- attrSet.add(self.poddingSchema["serverIdAttr"])
- self.attrlist = list(attrSet)
-
- self.typeDNs = {}
- for recordType in self.recordTypes():
- self.typeDNs[recordType] = ldap.dn.str2dn(
- self.rdnSchema[recordType]["rdn"].lower()
- ) + self.base
-
- self.ldap = None
-
- # Separate LDAP connection used solely for authenticating clients
- self.authLDAP = None
-
- # Restricting access by directory group
- self.restrictEnabledRecords = params['restrictEnabledRecords']
- self.restrictToGroup = params['restrictToGroup']
- self.restrictedTimestamp = 0
-
-
- def recordTypes(self):
- return self._recordTypes
-
-
- def listRecords(self, recordType):
-
- # Build base for this record Type
- base = self.typeDNs[recordType]
-
- # Build filter
- filterstr = "(!(objectClass=organizationalUnit))"
- typeFilter = self.rdnSchema[recordType].get("filter", "")
- if typeFilter:
- filterstr = "(&%s%s)" % (filterstr, typeFilter)
-
- # Query the LDAP server
- self.log.debug(
- "Querying ldap for records matching base {base} and "
- "filter {filter} for attributes {attrs}.",
- base=ldap.dn.dn2str(base), filter=filterstr,
- attrs=self.attrlist
- )
-
- # This takes a while, so if you don't want to have a "long request"
- # warning logged, use this instead of timedSearch:
- # results = self.ldap.search_s(ldap.dn.dn2str(base),
- # ldap.SCOPE_SUBTREE, filterstr=filterstr, attrlist=self.attrlist)
- results = self.timedSearch(
- ldap.dn.dn2str(base), ldap.SCOPE_SUBTREE,
- filterstr=filterstr, attrlist=self.attrlist
- )
-
- records = []
- numMissingGuids = 0
- guidAttr = self.rdnSchema["guidAttr"]
- for dn, attrs in results:
- dn = normalizeDNstr(dn)
-
- unrestricted = self.isAllowedByRestrictToGroup(dn, attrs)
-
- try:
- record = self._ldapResultToRecord(dn, attrs, recordType)
- # self.log.debug("Got LDAP record {record}", record=record)
- except MissingGuidException:
- numMissingGuids += 1
- continue
-
- if not unrestricted:
- self.log.debug(
- "{dn} is not enabled because it's not a member of group: "
- "{group}", dn=dn, group=self.restrictToGroup
- )
- record.enabledForCalendaring = False
- record.enabledForAddressBooks = False
-
- records.append(record)
-
- if numMissingGuids:
- self.log.info(
- "{num} {recordType} records are missing {attr}",
- num=numMissingGuids, recordType=recordType, attr=guidAttr
- )
-
- return records
-
-
- @inlineCallbacks
- def recordWithCachedGroupsAlias(self, recordType, alias):
- """
- @param recordType: the type of the record to look up.
- @param alias: the cached-groups alias of the record to look up.
- @type alias: C{str}
-
- @return: a deferred L{IDirectoryRecord} with the given cached-groups
- alias, or C{None} if no such record is found.
- """
- memberIdAttr = self.groupSchema["memberIdAttr"]
- attributeToSearch = "memberIdAttr" if memberIdAttr else "dn"
-
- fields = [[attributeToSearch, alias, False, "equals"]]
- results = yield self.recordsMatchingFields(
- fields, recordType=recordType
- )
- if results:
- returnValue(results[0])
- else:
- returnValue(None)
-
-
- def getExternalProxyAssignments(self):
- """
- Retrieve proxy assignments for locations and resources from the
- directory and return a list of (principalUID, ([memberUIDs)) tuples,
- suitable for passing to proxyDB.setGroupMembers( )
- """
- assignments = []
-
- guidAttr = self.rdnSchema["guidAttr"]
- readAttr = self.resourceSchema["readOnlyProxyAttr"]
- writeAttr = self.resourceSchema["proxyAttr"]
- if not (guidAttr and readAttr and writeAttr):
- self.log.error(
- "LDAP configuration requires guidAttr, proxyAttr, and "
- "readOnlyProxyAttr in order to use external proxy assignments "
- "efficiently; falling back to slower method"
- )
- # Fall back to the less-specialized version
- return super(
- LdapDirectoryService, self
- ).getExternalProxyAssignments()
-
- # Build filter
- filterstr = "(|(%s=*)(%s=*))" % (readAttr, writeAttr)
- # ...taking into account only calendar-enabled records
- enabledAttr = self.rdnSchema["locations"]["calendarEnabledAttr"]
- enabledValue = self.rdnSchema["locations"]["calendarEnabledValue"]
- if enabledAttr and enabledValue:
- filterstr = "(&(%s=%s)%s)" % (enabledAttr, enabledValue, filterstr)
-
- attrlist = [guidAttr, readAttr, writeAttr]
-
- # Query the LDAP server
- self.log.debug(
- "Querying ldap for records matching base {base} and filter "
- "{filter} for attributes {attrs}.",
- base=ldap.dn.dn2str(self.base), filter=filterstr,
- attrs=attrlist
- )
-
- results = self.timedSearch(ldap.dn.dn2str(self.base),
- ldap.SCOPE_SUBTREE, filterstr=filterstr,
- attrlist=attrlist)
-
- for dn, attrs in results:
- dn = normalizeDNstr(dn)
- guid = self._getUniqueLdapAttribute(attrs, guidAttr)
- if guid:
- guid = normalizeUUID(guid)
- readDelegate = self._getUniqueLdapAttribute(attrs, readAttr)
- if readDelegate:
- readDelegate = normalizeUUID(readDelegate)
- assignments.append(
- ("%s#calendar-proxy-read" % (guid,), [readDelegate])
- )
- writeDelegate = self._getUniqueLdapAttribute(attrs, writeAttr)
- if writeDelegate:
- writeDelegate = normalizeUUID(writeDelegate)
- assignments.append(
- ("%s#calendar-proxy-write" % (guid,), [writeDelegate])
- )
-
- return assignments
-
-
- def getLDAPConnection(self):
- if self.ldap is None:
- self.log.info("Connecting to LDAP {uri}", uri=repr(self.uri))
- self.ldap = self.createLDAPConnection()
- self.log.info(
- "Connection established to LDAP {uri}", uri=repr(self.uri)
- )
- if self.credentials.get("dn", ""):
- try:
- self.log.info(
- "Binding to LDAP {dn}",
- dn=repr(self.credentials.get("dn"))
- )
- self.ldap.simple_bind_s(
- self.credentials.get("dn"),
- self.credentials.get("password"),
- )
- self.log.info(
- "Successfully authenticated with LDAP as {dn}",
- dn=repr(self.credentials.get("dn"))
- )
- except ldap.INVALID_CREDENTIALS:
- self.log.error(
- "Can't bind to LDAP {uri}: check credentials",
- uri=self.uri
- )
- raise DirectoryConfigurationError()
-
- return self.ldap
-
-
- def createLDAPConnection(self):
- """
- Create and configure LDAP connection
- """
- cxn = ldap.initialize(self.uri)
-
- if self.tlsCACertFile:
- cxn.set_option(ldap.OPT_X_TLS_CACERTFILE, self.tlsCACertFile)
- if self.tlsCACertDir:
- cxn.set_option(ldap.OPT_X_TLS_CACERTDIR, self.tlsCACertDir)
-
- if self.tlsRequireCert == "never":
- cxn.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_NEVER)
- elif self.tlsRequireCert == "allow":
- cxn.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_ALLOW)
- elif self.tlsRequireCert == "try":
- cxn.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_TRY)
- elif self.tlsRequireCert == "demand":
- cxn.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_DEMAND)
- elif self.tlsRequireCert == "hard":
- cxn.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_HARD)
-
- if self.tls:
- cxn.start_tls_s()
-
- return cxn
-
-
- def authenticate(self, dn, password):
- """
- Perform simple bind auth, raising ldap.INVALID_CREDENTIALS if
- bad password
- """
- TRIES = 3
-
- for _ignore_i in xrange(TRIES):
- self.log.debug("Authenticating {dn}", dn=dn)
-
- if self.authLDAP is None:
- self.log.debug("Creating authentication connection to LDAP")
- self.authLDAP = self.createLDAPConnection()
-
- try:
- startTime = time.time()
- self.authLDAP.simple_bind_s(dn, password)
- # Getting here means success, so break the retry loop
- break
-
- except ldap.INAPPROPRIATE_AUTH:
- # Seen when using an empty password, treat as invalid creds
- raise ldap.INVALID_CREDENTIALS()
-
- except ldap.NO_SUCH_OBJECT:
- self.log.error(
- "LDAP Authentication error for {dn}: NO_SUCH_OBJECT",
- dn=dn
- )
- # fall through to try again; could be transient
-
- except ldap.INVALID_CREDENTIALS:
- raise
-
- except ldap.SERVER_DOWN:
- self.log.error("Lost connection to LDAP server.")
- self.authLDAP = None
- # Fall through and retry if TRIES has been reached
-
- except Exception, e:
- self.log.error(
- "LDAP authentication failed with {e}.", e=e
- )
- raise
-
- finally:
- totalTime = time.time() - startTime
- if totalTime > self.warningThresholdSeconds:
- self.log.error(
- "LDAP auth exceeded threshold: {time:.2f} seconds for "
- "{dn}", time=totalTime, dn=dn
- )
-
- else:
- self.log.error(
- "Giving up on LDAP authentication after {count:d} tries. "
- "Responding with 503.", count=TRIES
- )
- raise HTTPError(StatusResponse(
- responsecode.SERVICE_UNAVAILABLE, "LDAP server unavailable"
- ))
-
- self.log.debug("Authentication succeeded for {dn}", dn=dn)
-
-
- def timedSearch(
- self, base, scope, filterstr="(objectClass=*)", attrlist=None,
- timeoutSeconds=-1, resultLimit=0
- ):
- """
- Execute an LDAP query, retrying up to 3 times in case the LDAP server
- has gone down and we need to reconnect. If it takes longer than the
- configured threshold, emit a log error.
- The number of records requested is controlled by resultLimit (0=no
- limit).
- If timeoutSeconds is not -1, the query will abort after the specified
- number of seconds and the results retrieved so far are returned.
- """
- TRIES = 3
-
- for i in xrange(TRIES):
- try:
- s = ldap.async.List(self.getLDAPConnection())
- s.startSearch(
- base, scope, filterstr, attrList=attrlist,
- timeout=timeoutSeconds, sizelimit=resultLimit
- )
- startTime = time.time()
- s.processResults()
- except ldap.NO_SUCH_OBJECT:
- return []
- except ldap.FILTER_ERROR, e:
- self.log.error(
- "LDAP filter error: {e} {filter}", e=e, filter=filterstr
- )
- return []
- except ldap.SIZELIMIT_EXCEEDED, e:
- self.log.debug(
- "LDAP result limit exceeded: {limit:d}", limit=resultLimit
- )
- except ldap.TIMELIMIT_EXCEEDED, e:
- self.log.warn(
- "LDAP timeout exceeded: {t:d} seconds", t=timeoutSeconds
- )
- except ldap.SERVER_DOWN:
- self.ldap = None
- self.log.error(
- "LDAP server unavailable (tried {count:d} times)",
- count=(i + 1)
- )
- continue
-
- # change format, ignoring resultsType
- result = [
- resultItem for _ignore_resultType, resultItem in s.allResults
- ]
-
- totalTime = time.time() - startTime
- if totalTime > self.warningThresholdSeconds:
- if filterstr and len(filterstr) > 100:
- filterstr = "%s..." % (filterstr[:100],)
- self.log.error(
- "LDAP query exceeded threshold: {time:.2f} seconds for "
- "{base} {filter} {attrs} (#results={count:d})",
- time=totalTime, base=base, filter=filterstr,
- attrs=attrlist, count=len(result),
- )
- return result
-
- raise HTTPError(StatusResponse(
- responsecode.SERVICE_UNAVAILABLE, "LDAP server unavailable"
- ))
-
-
- def isAllowedByRestrictToGroup(self, dn, attrs):
- """
- Check to see if the principal with the given DN and LDAP attributes is
- a member of the restrictToGroup.
-
- @param dn: an LDAP dn
- @type dn: C{str}
- @param attrs: LDAP attributes
- @type attrs: C{dict}
- @return: True if principal is in the group (or restrictEnabledRecords if turned off).
- @rtype: C{boolean}
- """
- if not self.restrictEnabledRecords:
- return True
- if self.groupSchema["memberIdAttr"]:
- value = self._getUniqueLdapAttribute(
- attrs, self.groupSchema["memberIdAttr"]
- )
- else: # No memberIdAttr implies DN
- value = dn
- return value in self.restrictedPrincipals
-
-
- @property
- def restrictedPrincipals(self):
- """
- Look up (and cache) the set of guids that are members of the
- restrictToGroup. If restrictToGroup is not set, return None to
- indicate there are no group restrictions.
- """
- if self.restrictEnabledRecords:
-
- if time.time() - self.restrictedTimestamp > self.cacheTimeout:
- # fault in the members of group of name self.restrictToGroup
- recordType = self.recordType_groups
- base = self.typeDNs[recordType]
- # TODO: This shouldn't be hardcoded to cn
- filterstr = "(cn=%s)" % (self.restrictToGroup,)
- self.log.debug(
- "Retrieving ldap record with base {base} and filter "
- "{filter}.",
- base=ldap.dn.dn2str(base), filter=filterstr
- )
- result = self.timedSearch(
- ldap.dn.dn2str(base),
- ldap.SCOPE_SUBTREE,
- filterstr=filterstr,
- attrlist=self.attrlist
- )
-
- members = []
- nestedGroups = []
-
- if len(result) == 1:
- dn, attrs = result[0]
- dn = normalizeDNstr(dn)
- if self.groupSchema["membersAttr"]:
- members = self._getMultipleLdapAttributes(
- attrs,
- self.groupSchema["membersAttr"]
- )
- if not self.groupSchema["memberIdAttr"]: # DNs
- members = [normalizeDNstr(m) for m in members]
- members = set(members)
-
- if self.groupSchema["nestedGroupsAttr"]:
- nestedGroups = self._getMultipleLdapAttributes(
- attrs,
- self.groupSchema["nestedGroupsAttr"]
- )
- if not self.groupSchema["memberIdAttr"]: # DNs
- nestedGroups = [
- normalizeDNstr(g) for g in nestedGroups
- ]
- nestedGroups = set(nestedGroups)
- else:
- # Since all members are lumped into the same attribute,
- # treat them all as nestedGroups instead
- nestedGroups = members
- members = set()
-
- self._cachedRestrictedPrincipals = set(
- self._expandGroupMembership(members, nestedGroups)
- )
- self.log.info(
- "Got {count} restricted group members",
- count=len(self._cachedRestrictedPrincipals)
- )
- self.restrictedTimestamp = time.time()
- return self._cachedRestrictedPrincipals
- else:
- # No restrictions
- return None
-
-
- def _expandGroupMembership(self, members, nestedGroups, processedItems=None):
- """
- A generator which recursively yields principals which are included within nestedGroups
-
- @param members: If the LDAP service is configured to use different attributes to
- indicate member users and member nested groups, members will include the non-groups.
- Otherwise, members will be empty and only nestedGroups will be used.
- @type members: C{set}
- @param nestedGroups: If the LDAP service is configured to use different attributes to
- indicate member users and member nested groups, nestedGroups will include only
- the groups; otherwise nestedGroups will include all members
- @type members: C{set}
- @param processedItems: The set of members that have already been looked up in LDAP
- so the code doesn't have to look up the same member twice or get stuck in a
- membership loop.
- @type processedItems: C{set}
- @return: All members of the group, the values will correspond to memberIdAttr
- if memberIdAttr is set in the group schema, or DNs otherwise.
- @rtype: generator of C{str}
- """
-
- if processedItems is None:
- processedItems = set()
-
- if isinstance(members, str):
- members = [members]
-
- if isinstance(nestedGroups, str):
- nestedGroups = [nestedGroups]
-
- for member in members:
- if member not in processedItems:
- processedItems.add(member)
- yield member
-
- for group in nestedGroups:
- if group in processedItems:
- continue
-
- recordType = self.recordType_groups
- base = self.typeDNs[recordType]
- if self.groupSchema["memberIdAttr"]:
- scope = ldap.SCOPE_SUBTREE
- base = self.typeDNs[recordType]
- filterstr = "(%s=%s)" % (self.groupSchema["memberIdAttr"], group)
- else: # Use DN
- scope = ldap.SCOPE_BASE
- base = ldap.dn.str2dn(group)
- filterstr = "(objectClass=*)"
-
- self.log.debug(
- "Retrieving ldap record with base {base} and filter {filter}.",
- base=ldap.dn.dn2str(base), filter=filterstr
- )
- result = self.timedSearch(ldap.dn.dn2str(base),
- scope,
- filterstr=filterstr,
- attrlist=self.attrlist)
-
- if len(result) == 0:
- continue
-
- subMembers = set()
- subNestedGroups = set()
- if len(result) == 1:
- dn, attrs = result[0]
- dn = normalizeDNstr(dn)
- if self.groupSchema["membersAttr"]:
- subMembers = self._getMultipleLdapAttributes(
- attrs,
- self.groupSchema["membersAttr"]
- )
- if not self.groupSchema["memberIdAttr"]: # these are DNs
- subMembers = [normalizeDNstr(m) for m in subMembers]
- subMembers = set(subMembers)
-
- if self.groupSchema["nestedGroupsAttr"]:
- subNestedGroups = self._getMultipleLdapAttributes(
- attrs,
- self.groupSchema["nestedGroupsAttr"]
- )
- if not self.groupSchema["memberIdAttr"]: # these are DNs
- subNestedGroups = [normalizeDNstr(g) for g in subNestedGroups]
- subNestedGroups = set(subNestedGroups)
-
- processedItems.add(group)
- yield group
-
- for item in self._expandGroupMembership(subMembers,
- subNestedGroups,
- processedItems):
- yield item
-
-
- def _getUniqueLdapAttribute(self, attrs, *keys):
- """
- Get the first value for one or several attributes
- Useful when attributes have aliases (e.g. sn vs. surname)
- """
- for key in keys:
- values = attrs.get(key)
- if values is not None:
- return values[0]
- return None
-
-
- def _getMultipleLdapAttributes(self, attrs, *keys):
- """
- Get all values for one or several attributes
- """
- results = []
- for key in keys:
- if key:
- values = attrs.get(key)
- if values is not None:
- results += values
- return results
-
-
- def _ldapResultToRecord(self, dn, attrs, recordType):
- """
- Convert the attrs returned by a LDAP search into a LdapDirectoryRecord
- object.
-
- If guidAttr was specified in the config but is missing from attrs,
- raises MissingGuidException
- """
-
- guid = None
- authIDs = set()
- fullName = None
- firstName = ""
- lastName = ""
- emailAddresses = set()
- enabledForCalendaring = None
- enabledForAddressBooks = None
- uid = None
- enabledForLogin = True
- extras = {}
-
- shortNames = tuple(self._getMultipleLdapAttributes(attrs, self.rdnSchema[recordType]["mapping"]["recordName"]))
- if not shortNames:
- raise MissingRecordNameException()
-
- # First check for and add guid
- guidAttr = self.rdnSchema["guidAttr"]
- if guidAttr:
- guid = self._getUniqueLdapAttribute(attrs, guidAttr)
- if not guid:
- self.log.debug(
- "LDAP data for {shortNames} is missing guid attribute "
- "{attr}",
- shortNames=shortNames, attr=guidAttr
- )
- raise MissingGuidException()
- guid = normalizeUUID(guid)
-
- # Find or build email
- # (The emailAddresses mapping is a list of ldap fields)
- emailAddressesMappedTo = self.rdnSchema[recordType]["mapping"].get("emailAddresses", "")
- # Supporting either string or list for emailAddresses:
- if isinstance(emailAddressesMappedTo, str):
- emailAddresses = set(self._getMultipleLdapAttributes(attrs, self.rdnSchema[recordType]["mapping"].get("emailAddresses", "")))
- else:
- emailAddresses = set(self._getMultipleLdapAttributes(attrs, *self.rdnSchema[recordType]["mapping"]["emailAddresses"]))
- emailSuffix = self.rdnSchema[recordType].get("emailSuffix", None)
-
- if len(emailAddresses) == 0 and emailSuffix:
- emailPrefix = self._getUniqueLdapAttribute(
- attrs,
- self.rdnSchema[recordType].get("attr", "cn")
- )
- emailAddresses.add(emailPrefix + emailSuffix)
-
- proxyGUIDs = ()
- readOnlyProxyGUIDs = ()
- autoSchedule = False
- autoAcceptGroup = ""
- memberGUIDs = []
-
- # LDAP attribute -> principal matchings
- if recordType == self.recordType_users:
- fullName = self._getUniqueLdapAttribute(attrs, self.rdnSchema[recordType]["mapping"]["fullName"])
- firstName = self._getUniqueLdapAttribute(attrs, self.rdnSchema[recordType]["mapping"]["firstName"])
- lastName = self._getUniqueLdapAttribute(attrs, self.rdnSchema[recordType]["mapping"]["lastName"])
- enabledForCalendaring = True
- enabledForAddressBooks = True
-
- elif recordType == self.recordType_groups:
- fullName = self._getUniqueLdapAttribute(attrs, self.rdnSchema[recordType]["mapping"]["fullName"])
- enabledForCalendaring = False
- enabledForAddressBooks = False
- enabledForLogin = False
-
- if self.groupSchema["membersAttr"]:
- members = self._getMultipleLdapAttributes(attrs, self.groupSchema["membersAttr"])
- memberGUIDs.extend(members)
- if self.groupSchema["nestedGroupsAttr"]:
- members = self._getMultipleLdapAttributes(attrs, self.groupSchema["nestedGroupsAttr"])
- memberGUIDs.extend(members)
-
- # Normalize members if they're in DN form
- if not self.groupSchema["memberIdAttr"]: # empty = dn
- guids = list(memberGUIDs)
- memberGUIDs = []
- for dnStr in guids:
- try:
- dnStr = normalizeDNstr(dnStr)
- memberGUIDs.append(dnStr)
- except Exception, e:
- # LDAP returned an illegal DN value, log and ignore it
- self.log.warn("Bad LDAP DN: {dn!r}", dn=dnStr)
-
- elif recordType in (self.recordType_resources,
- self.recordType_locations):
- fullName = self._getUniqueLdapAttribute(attrs, self.rdnSchema[recordType]["mapping"]["fullName"])
- enabledForCalendaring = True
- enabledForAddressBooks = False
- enabledForLogin = False
- if self.resourceSchema["resourceInfoAttr"]:
- resourceInfo = self._getUniqueLdapAttribute(
- attrs,
- self.resourceSchema["resourceInfoAttr"]
- )
- if resourceInfo:
- try:
- (
- autoSchedule,
- proxy,
- readOnlyProxy,
- autoAcceptGroup
- ) = self.parseResourceInfo(
- resourceInfo,
- guid,
- recordType,
- shortNames[0]
- )
- if proxy:
- proxyGUIDs = (proxy,)
- if readOnlyProxy:
- readOnlyProxyGUIDs = (readOnlyProxy,)
- except ValueError, e:
- self.log.error(
- "Unable to parse resource info: {e}", e=e
- )
- else: # the individual resource attributes might be specified
- if self.resourceSchema["autoScheduleAttr"]:
- autoScheduleValue = self._getUniqueLdapAttribute(
- attrs,
- self.resourceSchema["autoScheduleAttr"]
- )
- autoSchedule = (
- autoScheduleValue == self.resourceSchema["autoScheduleEnabledValue"]
- )
- if self.resourceSchema["proxyAttr"]:
- proxyGUIDs = set(
- self._getMultipleLdapAttributes(
- attrs,
- self.resourceSchema["proxyAttr"]
- )
- )
- if self.resourceSchema["readOnlyProxyAttr"]:
- readOnlyProxyGUIDs = set(
- self._getMultipleLdapAttributes(
- attrs,
- self.resourceSchema["readOnlyProxyAttr"]
- )
- )
- if self.resourceSchema["autoAcceptGroupAttr"]:
- autoAcceptGroup = self._getUniqueLdapAttribute(
- attrs,
- self.resourceSchema["autoAcceptGroupAttr"]
- )
-
- if recordType == self.recordType_locations:
- if self.rdnSchema[recordType].get("associatedAddressAttr", ""):
- associatedAddress = self._getUniqueLdapAttribute(
- attrs,
- self.rdnSchema[recordType]["associatedAddressAttr"]
- )
- if associatedAddress:
- extras["associatedAddress"] = associatedAddress
-
- elif recordType == self.recordType_addresses:
- if self.rdnSchema[recordType].get("geoAttr", ""):
- geo = self._getUniqueLdapAttribute(
- attrs,
- self.rdnSchema[recordType]["geoAttr"]
- )
- if geo:
- extras["geo"] = geo
- if self.rdnSchema[recordType].get("streetAddressAttr", ""):
- street = self._getUniqueLdapAttribute(
- attrs,
- self.rdnSchema[recordType]["streetAddressAttr"]
- )
- if street:
- extras["streetAddress"] = street
-
- serverID = None
- if self.poddingSchema["serverIdAttr"]:
- serverID = self._getUniqueLdapAttribute(
- attrs,
- self.poddingSchema["serverIdAttr"]
- )
-
- record = LdapDirectoryRecord(
- service=self,
- recordType=recordType,
- guid=guid,
- shortNames=shortNames,
- authIDs=authIDs,
- fullName=fullName,
- firstName=firstName,
- lastName=lastName,
- emailAddresses=emailAddresses,
- uid=uid,
- dn=dn,
- memberGUIDs=memberGUIDs,
- extProxies=proxyGUIDs,
- extReadOnlyProxies=readOnlyProxyGUIDs,
- attrs=attrs,
- **extras
- )
-
- if self.augmentService is not None:
- # Look up augment information
- # TODO: this needs to be deferred but for now we hard code
- # the deferred result because we know it is completing
- # immediately.
- d = self.augmentService.getAugmentRecord(record.guid, recordType)
- d.addCallback(lambda x: record.addAugmentInformation(x))
-
- else:
- # Generate augment record based on information retrieved from LDAP
- augmentRecord = AugmentRecord(
- guid,
- enabled=True,
- serverID=serverID,
- enabledForCalendaring=enabledForCalendaring,
- autoSchedule=autoSchedule,
- autoAcceptGroup=autoAcceptGroup,
- enabledForAddressBooks=enabledForAddressBooks, # TODO: add to LDAP?
- enabledForLogin=enabledForLogin,
- )
- record.addAugmentInformation(augmentRecord)
-
- # Override with LDAP login control if attribute specified
- if recordType == self.recordType_users:
- loginEnabledAttr = self.rdnSchema[recordType]["loginEnabledAttr"]
- if loginEnabledAttr:
- loginEnabledValue = self.rdnSchema[recordType]["loginEnabledValue"]
- record.enabledForLogin = self._getUniqueLdapAttribute(
- attrs, loginEnabledAttr
- ) == loginEnabledValue
-
- # Override with LDAP calendar-enabled control if attribute specified
- calendarEnabledAttr = self.rdnSchema[recordType].get("calendarEnabledAttr", "")
- if calendarEnabledAttr:
- calendarEnabledValue = self.rdnSchema[recordType]["calendarEnabledValue"]
- record.enabledForCalendaring = self._getUniqueLdapAttribute(
- attrs,
- calendarEnabledAttr
- ) == calendarEnabledValue
-
- return record
-
-
- def queryDirectory(
- self, recordTypes, indexType, indexKey, queryMethod=None
- ):
- """
- Queries the LDAP directory for the record which has an attribute value
- matching the indexType and indexKey parameters.
-
- recordTypes is a list of record types to limit the search to.
- indexType specifies one of the CachingDirectoryService constants
- identifying which attribute to search on.
- indexKey is the value to search for.
-
- Nothing is returned -- the resulting record (if any) is placed in
- the cache.
- """
-
- if queryMethod is None:
- queryMethod = self.timedSearch
-
- self.log.debug(
- "LDAP query for types {types}, indexType {indexType} and "
- "indexKey {indexKey}",
- types=recordTypes, indexType=indexType, indexKey=indexKey
- )
-
- guidAttr = self.rdnSchema["guidAttr"]
- for recordType in recordTypes:
- # Build base for this record Type
- base = self.typeDNs[recordType]
-
- # Build filter
- filterstr = "(!(objectClass=organizationalUnit))"
- typeFilter = self.rdnSchema[recordType].get("filter", "")
- if typeFilter:
- filterstr = "(&%s%s)" % (filterstr, typeFilter)
-
- if indexType == self.INDEX_TYPE_GUID:
- # Query on guid only works if guid attribute has been defined.
- # Support for query on guid even if is auto-generated should
- # be added.
- if not guidAttr:
- return
- filterstr = "(&%s(%s=%s))" % (filterstr, guidAttr, indexKey)
-
- elif indexType == self.INDEX_TYPE_SHORTNAME:
- filterstr = "(&%s(%s=%s))" % (
- filterstr,
- self.rdnSchema[recordType]["mapping"]["recordName"],
- ldapEsc(indexKey)
- )
-
- elif indexType == self.INDEX_TYPE_CUA:
- # indexKey is of the form "mailto:test at example.net"
- email = indexKey[7:] # strip "mailto:"
- emailSuffix = self.rdnSchema[recordType].get(
- "emailSuffix", None
- )
- if (
- emailSuffix is not None and
- email.partition("@")[2] == emailSuffix
- ):
- filterstr = "(&%s(|(&(!(mail=*))(%s=%s))(mail=%s)))" % (
- filterstr,
- self.rdnSchema[recordType].get("attr", "cn"),
- email.partition("@")[0],
- ldapEsc(email)
- )
- else:
- # emailAddresses can map to multiple LDAP fields
- ldapFields = self.rdnSchema[recordType]["mapping"].get(
- "emailAddresses", ""
- )
- if isinstance(ldapFields, str):
- if ldapFields:
- subfilter = (
- "(%s=%s)" % (ldapFields, ldapEsc(email))
- )
- else:
- # No LDAP attribute assigned for emailAddresses
- continue
-
- else:
- subfilter = []
- for ldapField in ldapFields:
- if ldapField:
- subfilter.append(
- "(%s=%s)" % (ldapField, ldapEsc(email))
- )
- if not subfilter:
- # No LDAP attribute assigned for emailAddresses
- continue
-
- subfilter = "(|%s)" % ("".join(subfilter))
- filterstr = "(&%s%s)" % (filterstr, subfilter)
-
- elif indexType == self.INDEX_TYPE_AUTHID:
- return
-
- # Query the LDAP server
- self.log.debug(
- "Retrieving ldap record with base %s and filter %s.",
- base=ldap.dn.dn2str(base), filter=filterstr,
- )
- result = queryMethod(
- ldap.dn.dn2str(base),
- ldap.SCOPE_SUBTREE,
- filterstr=filterstr,
- attrlist=self.attrlist,
- )
-
- if result:
- dn, attrs = result.pop()
- dn = normalizeDNstr(dn)
-
- unrestricted = self.isAllowedByRestrictToGroup(dn, attrs)
-
- try:
- record = self._ldapResultToRecord(dn, attrs, recordType)
- self.log.debug("Got LDAP record {rec}", rec=record)
-
- if not unrestricted:
- self.log.debug(
- "{dn} is not enabled because it's not a member of "
- "group {group!r}",
- dn=dn, group=self.restrictToGroup
- )
- record.enabledForCalendaring = False
- record.enabledForAddressBooks = False
-
- record.applySACLs()
-
- self.recordCacheForType(recordType).addRecord(
- record, indexType, indexKey
- )
-
- # We got a match, so don't bother checking other types
- break
-
- except MissingRecordNameException:
- self.log.warn(
- "Ignoring record missing record name "
- "attribute: recordType {recordType}, indexType "
- "{indexType} and indexKey {indexKey}",
- recordTypes=recordTypes, indexType=indexType,
- indexKey=indexKey,
- )
-
- except MissingGuidException:
- self.log.warn(
- "Ignoring record missing guid attribute: "
- "recordType {recordType}, indexType {indexType} and "
- "indexKey {indexKey}",
- recordTypes=recordTypes, indexType=indexType,
- indexKey=indexKey
- )
-
-
- def recordsMatchingTokens(self, tokens, context=None, limitResults=50, timeoutSeconds=10):
- """
- # TODO: hook up limitResults to the client limit in the query
-
- @param tokens: The tokens to search on
- @type tokens: C{list} of C{str} (utf-8 bytes)
- @param context: An indication of what the end user is searching
- for; "attendee", "location", or None
- @type context: C{str}
- @return: a deferred sequence of L{IDirectoryRecord}s which
- match the given tokens and optional context.
-
- Each token is searched for within each record's full name and
- email address; if each token is found within a record that
- record is returned in the results.
-
- If context is None, all record types are considered. If
- context is "location", only locations are considered. If
- context is "attendee", only users, groups, and resources
- are considered.
- """
- self.log.debug(
- "Peforming calendar user search for {tokens} ({context})",
- tokens=tokens, context=context
- )
- startTime = time.time()
- records = []
- recordTypes = self.recordTypesForSearchContext(context)
- recordTypes = [r for r in recordTypes if r in self.recordTypes()]
-
- typeCounts = {}
- for recordType in recordTypes:
- if limitResults == 0:
- self.log.debug("LDAP search aggregate limit reached")
- break
- typeCounts[recordType] = 0
- base = self.typeDNs[recordType]
- scope = ldap.SCOPE_SUBTREE
- extraFilter = self.rdnSchema[recordType].get("filter", "")
- filterstr = buildFilterFromTokens(
- recordType,
- self.rdnSchema[recordType]["mapping"],
- tokens,
- extra=extraFilter
- )
-
- if filterstr is not None:
- # Query the LDAP server
- self.log.debug(
- "LDAP search {base} {filter} (limit={limit:d})",
- base=ldap.dn.dn2str(base), filter=filterstr,
- limit=limitResults,
- )
- results = self.timedSearch(
- ldap.dn.dn2str(base),
- scope,
- filterstr=filterstr,
- attrlist=self.attrlist,
- timeoutSeconds=timeoutSeconds,
- resultLimit=limitResults
- )
- numMissingGuids = 0
- numMissingRecordNames = 0
- numNotEnabled = 0
- for dn, attrs in results:
- dn = normalizeDNstr(dn)
- # Skip if group restriction is in place and guid is not
- # a member
- if (
- recordType != self.recordType_groups and
- not self.isAllowedByRestrictToGroup(dn, attrs)
- ):
- continue
-
- try:
- record = self._ldapResultToRecord(dn, attrs, recordType)
-
- # For non-group records, if not enabled for calendaring do
- # not include in principal property search results
- if (recordType != self.recordType_groups):
- if not record.enabledForCalendaring:
- numNotEnabled += 1
- continue
-
- records.append(record)
- typeCounts[recordType] += 1
- limitResults -= 1
-
- except MissingGuidException:
- numMissingGuids += 1
-
- except MissingRecordNameException:
- numMissingRecordNames += 1
-
- self.log.debug(
- "LDAP search returned {resultCount:d} results, "
- "{typeCount:d} usable",
- resultCount=len(results), typeCount=typeCounts[recordType]
- )
-
- typeCountsStr = ", ".join(
- ["%s:%d" % (rt, ct) for (rt, ct) in typeCounts.iteritems()]
- )
- totalTime = time.time() - startTime
- self.log.info(
- "Calendar user search for {tokens} matched {recordCount:d} "
- "records ({typeCount}) in {time!.2f} seconds",
- tokens=tokens, recordCount=len(records),
- typeCount=typeCountsStr, time=totalTime,
- )
- return succeed(records)
-
-
- @inlineCallbacks
- def recordsMatchingFields(self, fields, operand="or", recordType=None):
- """
- Carries out the work of a principal-property-search against LDAP
- Returns a deferred list of directory records.
- """
- records = []
-
- self.log.debug(
- "Performing principal property search for {fields}", fields=fields
- )
-
- if recordType is None:
- # Make a copy since we're modifying it
- recordTypes = list(self.recordTypes())
-
- # principal-property-search syntax doesn't provide a way to ask
- # for 3 of the 4 types (either all types or a single type). This
- # is wasteful in the case of iCal looking for event attendees
- # since it always ignores the locations. This config flag lets
- # you skip querying for locations in this case:
- if not self.queryLocationsImplicitly:
- if self.recordType_locations in recordTypes:
- recordTypes.remove(self.recordType_locations)
- else:
- recordTypes = [recordType]
-
- guidAttr = self.rdnSchema["guidAttr"]
- for recordType in recordTypes:
-
- base = self.typeDNs[recordType]
-
- if fields[0][0] == "dn":
- # DN's are not an attribute that can be searched on by filter
- scope = ldap.SCOPE_BASE
- filterstr = "(objectClass=*)"
- base = ldap.dn.str2dn(fields[0][1])
-
- else:
- scope = ldap.SCOPE_SUBTREE
- filterstr = buildFilter(
- recordType,
- self.rdnSchema[recordType]["mapping"],
- fields,
- operand=operand,
- optimizeMultiName=self.optimizeMultiName
- )
-
- if filterstr is not None:
- # Query the LDAP server
- self.log.debug(
- "LDAP search {base} {scope} {filter}",
- base=ldap.dn.dn2str(base), scope=scope, filter=filterstr
- )
- results = (yield deferToThread(
- self.timedSearch,
- ldap.dn.dn2str(base),
- scope,
- filterstr=filterstr,
- attrlist=self.attrlist,
- timeoutSeconds=self.requestTimeoutSeconds,
- resultLimit=self.requestResultsLimit)
- )
- self.log.debug(
- "LDAP search returned {count} results", count=len(results)
- )
- numMissingGuids = 0
- numMissingRecordNames = 0
- for dn, attrs in results:
- dn = normalizeDNstr(dn)
- # Skip if group restriction is in place and guid is not
- # a member
- if (
- recordType != self.recordType_groups and
- not self.isAllowedByRestrictToGroup(dn, attrs)
- ):
- continue
-
- try:
- record = self._ldapResultToRecord(dn, attrs, recordType)
-
- # For non-group records, if not enabled for calendaring do
- # not include in principal property search results
- if (recordType != self.recordType_groups):
- if not record.enabledForCalendaring:
- continue
-
- records.append(record)
-
- except MissingGuidException:
- numMissingGuids += 1
-
- except MissingRecordNameException:
- numMissingRecordNames += 1
-
- if numMissingGuids:
- self.log.warn(
- "{count:d} {type} records are missing {attr}",
- count=numMissingGuids, type=recordType, attr=guidAttr
- )
-
- if numMissingRecordNames:
- self.log.warn(
- "{count:d} {type} records are missing record name",
- count=numMissingRecordNames, type=recordType,
- )
-
- self.log.debug(
- "Principal property search matched {count} records",
- count=len(records)
- )
- returnValue(records)
-
-
- @inlineCallbacks
- def getGroups(self, guids):
- """
- Returns a set of group records for the list of guids passed in. For
- any group that also contains subgroups, those subgroups' records are
- also returned, and so on.
- """
-
- recordsByAlias = {}
-
- groupsDN = self.typeDNs[self.recordType_groups]
- memberIdAttr = self.groupSchema["memberIdAttr"]
-
- # First time through the loop we search using the attribute
- # corresponding to guid, since that is what the proxydb uses.
- # Subsequent iterations fault in groups via the attribute
- # used to identify members.
- attributeToSearch = "guid"
- valuesToFetch = guids
-
- while valuesToFetch:
- results = []
-
- if attributeToSearch == "dn":
- # Since DN can't be searched on in a filter we have to call
- # recordsMatchingFields for *each* DN.
- for value in valuesToFetch:
- fields = [["dn", value, False, "equals"]]
- result = (
- yield self.recordsMatchingFields(
- fields,
- recordType=self.recordType_groups
- )
- )
- results.extend(result)
- else:
- for batch in splitIntoBatches(valuesToFetch, self.batchSize):
- fields = []
- for value in batch:
- fields.append([attributeToSearch, value, False, "equals"])
- result = (
- yield self.recordsMatchingFields(
- fields,
- recordType=self.recordType_groups
- )
- )
- results.extend(result)
-
- # Reset values for next iteration
- valuesToFetch = set()
-
- for record in results:
- alias = record.cachedGroupsAlias()
- if alias not in recordsByAlias:
- recordsByAlias[alias] = record
-
- # record.memberGUIDs() contains the members of this group,
- # but it might not be in guid form; it will be data from
- # self.groupSchema["memberIdAttr"]
- for memberAlias in record.memberGUIDs():
- if not memberIdAttr:
- # Members are identified by dn so we can take a short
- # cut: we know we only need to examine groups, and
- # those will be children of the groups DN
- if not dnContainedIn(ldap.dn.str2dn(memberAlias),
- groupsDN):
- continue
- if memberAlias not in recordsByAlias:
- valuesToFetch.add(memberAlias)
-
- # Switch to the LDAP attribute used for identifying members
- # for subsequent iterations. If memberIdAttr is not specified
- # in the config, we'll search using dn.
- attributeToSearch = "memberIdAttr" if memberIdAttr else "dn"
-
- returnValue(recordsByAlias.values())
-
-
- def recordTypeForDN(self, dnStr):
- """
- Examine a DN to determine which recordType it belongs to
- @param dn: DN to compare
- @type dn: string
- @return: recordType string, or None if no match
- """
- dn = ldap.dn.str2dn(dnStr.lower())
- for recordType in self.recordTypes():
- base = self.typeDNs[recordType] # already lowercase
- if dnContainedIn(dn, base):
- return recordType
- return None
-
-
-
-def dnContainedIn(child, parent):
- """
- Return True if child dn is contained within parent dn, otherwise False.
- """
- return child[-len(parent):] == parent
-
-
-
-def normalizeDNstr(dnStr):
- """
- Convert to lowercase and remove extra whitespace
- @param dnStr: dn
- @type dnStr: C{str}
- @return: normalized dn C{str}
- """
- return ' '.join(ldap.dn.dn2str(ldap.dn.str2dn(dnStr.lower())).split())
-
-
-
-def _convertValue(value, matchType):
- if matchType == "starts-with":
- value = "%s*" % (ldapEsc(value),)
- elif matchType == "contains":
- value = "*%s*" % (ldapEsc(value),)
- # otherwise it's an exact match
- else:
- value = ldapEsc(value)
- return value
-
-
-
-def buildFilter(recordType, mapping, fields, operand="or", optimizeMultiName=False):
- """
- Create an LDAP filter string from a list of tuples representing directory
- attributes to search
-
- mapping is a dict mapping internal directory attribute names to ldap names.
- fields is a list of tuples...
- (directory field name, value to search, caseless (ignored), matchType)
- ...where matchType is one of "starts-with", "contains", "exact"
- """
-
- converted = []
- combined = {}
- for field, value, caseless, matchType in fields:
- ldapField = mapping.get(field, None)
- if ldapField:
- combined.setdefault(field, []).append((value, caseless, matchType))
- value = _convertValue(value, matchType)
- if isinstance(ldapField, str):
- converted.append("(%s=%s)" % (ldapField, value))
- else:
- subConverted = []
- for lf in ldapField:
- subConverted.append("(%s=%s)" % (lf, value))
- converted.append("(|%s)" % "".join(subConverted))
-
- if len(converted) == 0:
- return None
-
- if optimizeMultiName and recordType in ("users", "groups"):
- for field in [key for key in combined.keys() if key != "guid"]:
- if len(combined.get(field, [])) > 1:
- # Client is searching on more than one name -- interpret this as the user
- # explicitly looking up a user by name (ignoring other record types), and
- # try the various firstName/lastName permutations:
- if recordType == "users":
- converted = []
- for firstName, _ignore_firstCaseless, firstMatchType in combined["firstName"]:
- for lastName, _ignore_lastCaseless, lastMatchType in combined["lastName"]:
- if firstName != lastName:
- firstValue = _convertValue(firstName, firstMatchType)
- lastValue = _convertValue(lastName, lastMatchType)
- converted.append(
- "(&(%s=%s)(%s=%s))" %
- (mapping["firstName"], firstValue,
- mapping["lastName"], lastValue)
- )
- else:
- return None
-
- if len(converted) == 1:
- filterstr = converted[0]
- else:
- operand = ("|" if operand == "or" else "&")
- filterstr = "(%s%s)" % (operand, "".join(converted))
-
- if filterstr:
- # To reduce the amount of records returned, filter out the ones
- # that don't have (possibly) required attribute values (record
- # name, guid)
- additional = []
- for key in ("recordName", "guid"):
- if key in mapping:
- additional.append("(%s=*)" % (mapping.get(key),))
- if additional:
- filterstr = "(&%s%s)" % ("".join(additional), filterstr)
-
- return filterstr
-
-
-
-def buildFilterFromTokens(recordType, mapping, tokens, extra=None):
- """
- Create an LDAP filter string from a list of query tokens. Each token is
- searched for in each LDAP attribute corresponding to "fullName" and
- "emailAddresses" (could be multiple LDAP fields for either).
-
- @param recordType: The recordType to use to customize the filter
- @param mapping: A dict mapping internal directory attribute names to ldap names.
- @type mapping: C{dict}
- @param tokens: The list of tokens to search for
- @type tokens: C{list}
- @param extra: Extra filter to "and" into the final filter
- @type extra: C{str} or None
- @return: An LDAP filterstr
- @rtype: C{str}
- """
-
- filterStr = None
-
- # Eliminate any substring duplicates
- tokenSet = set()
- for token in tokens:
- collision = False
- for existing in tokenSet:
- if token in existing:
- collision = True
- break
- elif existing in token:
- tokenSet.remove(existing)
- break
- if not collision:
- tokenSet.add(token)
-
- tokens = [ldapEsc(t) for t in tokenSet]
- if len(tokens) == 0:
- return None
- tokens.sort()
-
- attributes = [
- ("fullName", "(%s=*%s*)"),
- ("emailAddresses", "(%s=%s*)"),
- ]
-
- ldapFields = []
- for attribute, template in attributes:
- ldapField = mapping.get(attribute, None)
- if ldapField:
- if isinstance(ldapField, str):
- ldapFields.append((ldapField, template))
- else:
- for lf in ldapField:
- ldapFields.append((lf, template))
-
- if len(ldapFields) == 0:
- return None
-
- tokenFragments = []
- if extra:
- tokenFragments.append(extra)
-
- for token in tokens:
- fragments = []
- for ldapField, template in ldapFields:
- fragments.append(template % (ldapField, token))
- if len(fragments) == 1:
- tokenFragment = fragments[0]
- else:
- tokenFragment = "(|%s)" % ("".join(fragments),)
- tokenFragments.append(tokenFragment)
-
- if len(tokenFragments) == 1:
- filterStr = tokenFragments[0]
- else:
- filterStr = "(&%s)" % ("".join(tokenFragments),)
-
- return filterStr
-
-
-
-class LdapDirectoryRecord(CachingDirectoryRecord):
- """
- LDAP implementation of L{IDirectoryRecord}.
- """
- def __init__(
- self, service, recordType,
- guid, shortNames, authIDs, fullName,
- firstName, lastName, emailAddresses,
- uid, dn, memberGUIDs, extProxies, extReadOnlyProxies,
- attrs, **kwargs
- ):
- super(LdapDirectoryRecord, self).__init__(
- service=service,
- recordType=recordType,
- guid=guid,
- shortNames=shortNames,
- authIDs=authIDs,
- fullName=fullName,
- firstName=firstName,
- lastName=lastName,
- emailAddresses=emailAddresses,
- extProxies=extProxies,
- extReadOnlyProxies=extReadOnlyProxies,
- uid=uid,
- **kwargs
- )
-
- # Save attributes of dn and attrs in case you might need them later
- self.dn = dn
- self.attrs = attrs
-
- # Store copy of member guids
- self._memberGUIDs = memberGUIDs
-
- # Identifier of this record as a group member
- memberIdAttr = self.service.groupSchema["memberIdAttr"]
- if memberIdAttr:
- self._memberId = self.service._getUniqueLdapAttribute(
- attrs,
- memberIdAttr
- )
- else:
- self._memberId = normalizeDNstr(self.dn)
-
-
- def members(self):
- """ Return the records representing members of this group """
-
- try:
- return self._members_storage
- except AttributeError:
- self._members_storage = self._members()
- return self._members_storage
-
-
- def _members(self):
- """ Fault in records for the members of this group """
-
- memberIdAttr = self.service.groupSchema["memberIdAttr"]
- results = []
-
- for memberId in self._memberGUIDs:
-
- if memberIdAttr:
-
- base = self.service.base
- filterstr = "(%s=%s)" % (memberIdAttr, ldapEsc(memberId))
- self.log.debug(
- "Retrieving subtree of {base} with filter {filter}",
- base=ldap.dn.dn2str(base), filter=filterstr,
- system="LdapDirectoryService"
- )
- result = self.service.timedSearch(
- ldap.dn.dn2str(base),
- ldap.SCOPE_SUBTREE,
- filterstr=filterstr,
- attrlist=self.service.attrlist
- )
-
- else: # using DN
-
- self.log.debug(
- "Retrieving {id}.",
- id=memberId, system="LdapDirectoryService"
- )
- result = self.service.timedSearch(
- memberId,
- ldap.SCOPE_BASE, attrlist=self.service.attrlist
- )
-
- if result:
-
- dn, attrs = result.pop()
- dn = normalizeDNstr(dn)
- self.log.debug("Retrieved: {dn} {attrs}", dn=dn, attrs=attrs)
- recordType = self.service.recordTypeForDN(dn)
- if recordType is None:
- self.log.error(
- "Unable to map {dn} to a record type", dn=dn
- )
- continue
-
- shortName = self.service._getUniqueLdapAttribute(
- attrs,
- self.service.rdnSchema[recordType]["mapping"]["recordName"]
- )
-
- if shortName:
- record = self.service.recordWithShortName(
- recordType,
- shortName
- )
- if record:
- results.append(record)
-
- return results
-
-
- def groups(self):
- """ Return the records representing groups this record is a member of """
- try:
- return self._groups_storage
- except AttributeError:
- self._groups_storage = self._groups()
- return self._groups_storage
-
-
- def _groups(self):
- """ Fault in the groups of which this record is a member """
-
- recordType = self.service.recordType_groups
- base = self.service.typeDNs[recordType]
-
- membersAttrs = []
- if self.service.groupSchema["membersAttr"]:
- membersAttrs.append(self.service.groupSchema["membersAttr"])
- if self.service.groupSchema["nestedGroupsAttr"]:
- membersAttrs.append(self.service.groupSchema["nestedGroupsAttr"])
-
- if len(membersAttrs) == 1:
- filterstr = "(%s=%s)" % (membersAttrs[0], self._memberId)
- else:
- filterstr = "(|%s)" % (
- "".join(
- ["(%s=%s)" % (a, self._memberId) for a in membersAttrs]
- ),
- )
- self.log.debug("Finding groups containing {id}", id=self._memberId)
- groups = []
-
- try:
- results = self.service.timedSearch(
- ldap.dn.dn2str(base),
- ldap.SCOPE_SUBTREE,
- filterstr=filterstr,
- attrlist=self.service.attrlist
- )
-
- for dn, attrs in results:
- dn = normalizeDNstr(dn)
- shortName = self.service._getUniqueLdapAttribute(attrs, "cn")
- self.log.debug(
- "{id} is a member of {shortName}",
- id=self._memberId, shortName=shortName
- )
- record = self.service.recordWithShortName(recordType, shortName)
- if record is not None:
- groups.append(record)
- except ldap.PROTOCOL_ERROR, e:
- self.log.warn("{e}", e=e)
-
- return groups
-
-
- def cachedGroupsAlias(self):
- """
- See directory.py for full description
-
- LDAP group members can be referred to by attributes other than guid. _memberId
- will be set to the appropriate value to look up group-membership with.
- """
- return self._memberId
-
-
- def memberGUIDs(self):
- return set(self._memberGUIDs)
-
-
- def verifyCredentials(self, credentials):
- """ Supports PAM or simple LDAP bind for username+password """
-
- if isinstance(credentials, UsernamePassword):
-
- # TODO: investigate:
- # Check that the username supplied matches one of the shortNames
- # (The DCS might already enforce this constraint, not sure)
- if credentials.username not in self.shortNames:
- return False
-
- # Check cached password
- try:
- if credentials.password == self.password:
- return True
- except AttributeError:
- pass
-
- if self.service.authMethod.upper() == "PAM":
- # Authenticate against PAM (UNTESTED)
-
- if not pamAvailable:
- self.log.error("PAM module is not installed")
- raise DirectoryConfigurationError()
-
- def pam_conv(auth, query_list, userData):
- return [(credentials.password, 0)]
-
- auth = PAM.pam()
- auth.start("caldav")
- auth.set_item(PAM.PAM_USER, credentials.username)
- auth.set_item(PAM.PAM_CONV, pam_conv)
- try:
- auth.authenticate()
- except PAM.error:
- return False
- else:
- # Cache the password to avoid further LDAP queries
- self.password = credentials.password
- return True
-
- elif self.service.authMethod.upper() == "LDAP":
-
- # Authenticate against LDAP
- try:
- self.service.authenticate(self.dn, credentials.password)
- # Cache the password to avoid further LDAP queries
- self.password = credentials.password
- return True
-
- except ldap.INVALID_CREDENTIALS:
- self.log.info(
- "Invalid credentials for {dn}",
- dn=repr(self.dn), system="LdapDirectoryService"
- )
- return False
-
- else:
- self.log.error(
- "Unknown Authentication Method {method!r}",
- method=self.service.authMethod.upper()
- )
- raise DirectoryConfigurationError()
-
- return super(LdapDirectoryRecord, self).verifyCredentials(credentials)
-
-
-
-class MissingRecordNameException(Exception):
- """ Raised when LDAP record is missing recordName """
- pass
-
-
-
-class MissingGuidException(Exception):
- """ Raised when LDAP record is missing guidAttr and it's required """
- pass
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/principal.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/principal.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/principal.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -30,53 +30,52 @@
from urllib import unquote
from urlparse import urlparse
+import uuid
+from twext.python.log import Logger
from twisted.cred.credentials import UsernamePassword
-from twisted.python.failure import Failure
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.defer import succeed
-from twisted.web.template import XMLFile, Element, renderer, tags
-from twistedcaldav.directory.util import NotFoundResource
-
-from txweb2.auth.digest import DigestedCredentials
-from txweb2 import responsecode
-from txweb2.http import HTTPError
-from txdav.xml import element as davxml
-from txweb2.dav.util import joinURL
-from txweb2.dav.noneprops import NonePropertyStore
-
-from twext.python.log import Logger
-
-
-try:
- from twistedcaldav.authkerb import NegotiateCredentials
- NegotiateCredentials # sigh, pyflakes
-except ImportError:
- NegotiateCredentials = None
from twisted.python.modules import getModule
-
+from twisted.web.template import XMLFile, Element, renderer
from twistedcaldav import caldavxml, customxml
from twistedcaldav.cache import DisabledCacheNotifier, PropfindCacheMixin
from twistedcaldav.config import config
from twistedcaldav.customxml import calendarserver_namespace
from twistedcaldav.directory.augment import allowedAutoScheduleModes
from twistedcaldav.directory.common import uidsResourceName
-from twistedcaldav.directory.directory import DirectoryService, DirectoryRecord
-from twistedcaldav.directory.idirectory import IDirectoryService
+from twistedcaldav.directory.util import NotFoundResource
+from twistedcaldav.directory.util import (
+ formatLink, formatLinks, formatPrincipals, formatList
+)
from twistedcaldav.directory.wiki import getWikiACL
+from twistedcaldav.extensions import (
+ ReadOnlyResourceMixIn, DAVPrincipalResource, DAVResourceWithChildrenMixin
+)
from twistedcaldav.extensions import DirectoryElement
-from twistedcaldav.extensions import ReadOnlyResourceMixIn, DAVPrincipalResource, \
- DAVResourceWithChildrenMixin
from twistedcaldav.resource import CalendarPrincipalCollectionResource, CalendarPrincipalResource
from txdav.caldav.datastore.scheduling.cuaddress import normalizeCUAddr
+from txdav.who.directory import CalendarDirectoryRecordMixin
+from txdav.xml import element as davxml
+from txweb2 import responsecode
+from txweb2.auth.digest import DigestedCredentials
+from txweb2.dav.noneprops import NonePropertyStore
+from txweb2.dav.util import joinURL
+from txweb2.http import HTTPError
+try:
+ from twistedcaldav.authkerb import NegotiateCredentials
+ NegotiateCredentials # sigh, pyflakes
+except ImportError:
+ NegotiateCredentials = None
+
thisModule = getModule(__name__)
log = Logger()
class PermissionsMixIn (ReadOnlyResourceMixIn):
def defaultAccessControlList(self):
- return authReadACL
+ return succeed(authReadACL)
@inlineCallbacks
@@ -94,7 +93,7 @@
else:
# ...otherwise permissions are fixed, and are not subject to
# inheritance rules, etc.
- returnValue(self.defaultAccessControlList())
+ returnValue((yield self.defaultAccessControlList()))
@@ -108,7 +107,7 @@
def cuTypeConverter(cuType):
""" Converts calendar user types to OD type names """
- return "recordType", DirectoryRecord.fromCUType(cuType)
+ return "recordType", CalendarDirectoryRecordMixin.fromCUType(cuType)
@@ -126,7 +125,7 @@
elif cua.startswith("/") or cua.startswith("http"):
ignored, collection, id = cua.rsplit("/", 2)
if collection == "__uids__":
- return "guid", id
+ return "uid", id
else:
return "recordName", id
@@ -150,18 +149,21 @@
CalendarPrincipalCollectionResource.__init__(self, url)
DAVResourceWithChildrenMixin.__init__(self)
- self.directory = IDirectoryService(directory)
+ # MOVE2WHO
+ # self.directory = IDirectoryService(directory)
+ self.directory = directory
def __repr__(self):
return "<%s: %s %s>" % (self.__class__.__name__, self.directory, self._url)
+ @inlineCallbacks
def locateChild(self, req, segments):
- child = self.getChild(segments[0])
+ child = (yield self.getChild(segments[0]))
if child is not None:
- return (child, segments[1:])
- return (NotFoundResource(principalCollections=self.principalCollections()), ())
+ returnValue((child, segments[1:]))
+ returnValue((NotFoundResource(principalCollections=self.principalCollections()), ()))
def deadProperties(self):
@@ -174,12 +176,14 @@
return succeed(None)
+ @inlineCallbacks
def principalForShortName(self, recordType, name):
- return self.principalForRecord(self.directory.recordWithShortName(recordType, name))
+ record = (yield self.directory.recordWithShortName(recordType, name))
+ returnValue((yield self.principalForRecord(record)))
def principalForUser(self, user):
- return self.principalForShortName(DirectoryService.recordType_users, user)
+ return self.principalForShortName(self.directory.recordType.lookupByName("user"), user)
def principalForAuthID(self, user):
@@ -207,7 +211,7 @@
def principalForRecord(self, record):
if record is None or not record.enabled:
- return None
+ return succeed(None)
return self.principalForUID(record.uid)
##
@@ -217,7 +221,7 @@
_cs_ns = "http://calendarserver.org/ns/"
_fieldMap = {
("DAV:" , "displayname") :
- ("fullName", None, "Display Name", davxml.DisplayName),
+ ("fullNames", None, "Display Name", davxml.DisplayName),
("urn:ietf:params:xml:ns:caldav" , "calendar-user-type") :
("", cuTypeConverter, "Calendar User Type",
caldavxml.CalendarUserType),
@@ -281,16 +285,27 @@
#
# Create children
#
- for recordType in self.directory.recordTypes():
- self.putChild(recordType, DirectoryPrincipalTypeProvisioningResource(self, recordType))
+ for name, recordType in [
+ (self.directory.recordTypeToOldName(r), r)
+ for r in self.directory.recordTypes()
+ ]:
+ self.putChild(
+ name,
+ DirectoryPrincipalTypeProvisioningResource(
+ self, name, recordType
+ )
+ )
self.putChild(uidsResourceName, DirectoryPrincipalUIDProvisioningResource(self))
+ @inlineCallbacks
def principalForUID(self, uid):
- return self.getChild(uidsResourceName).getChild(uid)
+ child = (yield self.getChild(uidsResourceName))
+ returnValue((yield child.getChild(uid)))
+ @inlineCallbacks
def _principalForURI(self, uri):
scheme, netloc, path, _ignore_params, _ignore_query, _ignore_fragment = urlparse(uri)
@@ -312,56 +327,62 @@
if (host != config.ServerHostName and
host not in config.Scheduling.Options.PrincipalHostAliases):
- return None
+ returnValue(None)
if port != {
"http" : config.HTTPPort,
"https": config.SSLPort,
}[scheme]:
- return None
+ returnValue(None)
elif scheme == "urn":
if path.startswith("uuid:"):
- return self.principalForUID(path[5:])
+ returnValue((yield self.principalForUID(path[5:])))
else:
- return None
+ returnValue(None)
else:
- return None
+ returnValue(None)
if not path.startswith(self._url):
- return None
+ returnValue(None)
path = path[len(self._url) - 1:]
segments = [unquote(s) for s in path.rstrip("/").split("/")]
if segments[0] == "" and len(segments) == 3:
- typeResource = self.getChild(segments[1])
+ typeResource = yield self.getChild(segments[1])
if typeResource is not None:
- principalResource = typeResource.getChild(segments[2])
+ principalResource = yield typeResource.getChild(segments[2])
if principalResource:
- return principalResource
+ returnValue(principalResource)
- return None
+ returnValue(None)
+ @inlineCallbacks
def principalForCalendarUserAddress(self, address):
# First see if the address is a principal URI
- principal = self._principalForURI(address)
+ principal = yield self._principalForURI(address)
if principal:
- if isinstance(principal, DirectoryCalendarPrincipalResource) and principal.record.enabledForCalendaring:
- return principal
+ if (
+ isinstance(principal, DirectoryCalendarPrincipalResource) and
+ principal.record.hasCalendars
+ ):
+ returnValue(principal)
else:
# Next try looking it up in the directory
- record = self.directory.recordWithCalendarUserAddress(address)
- if record is not None and record.enabled and record.enabledForCalendaring:
- return self.principalForRecord(record)
+ record = yield self.directory.recordWithCalendarUserAddress(address)
+ if record is not None and record.hasCalendars:
+ returnValue((yield self.principalForRecord(record)))
log.debug("No principal for calendar user address: %r" % (address,))
- return None
+ returnValue(None)
+ @inlineCallbacks
def principalForRecord(self, record):
- return self.getChild(uidsResourceName).principalForRecord(record)
+ child = (yield self.getChild(uidsResourceName))
+ returnValue((yield child.principalForRecord(record)))
##
@@ -375,13 +396,14 @@
def getChild(self, name):
if name == "":
- return self
+ return succeed(self)
else:
- return self.putChildren.get(name, None)
+ return succeed(self.putChildren.get(name, None))
def listChildren(self):
- return self.directory.recordTypes()
+ # MOVE2WHO hack
+ return [r.name + "s" for r in self.directory.recordTypes()]
##
@@ -421,14 +443,14 @@
Collection resource which provisions directory principals of a
specific type as its children, indexed by short name.
"""
- def __init__(self, parent, recordType):
+ def __init__(self, parent, name, recordType):
"""
@param parent: the parent L{DirectoryPrincipalProvisioningResource}.
@param recordType: the directory record type to provision.
"""
DirectoryProvisioningResource.__init__(
self,
- joinURL(parent.principalCollectionURL(), recordType) + "/",
+ joinURL(parent.principalCollectionURL(), name) + "/",
parent.directory
)
@@ -459,22 +481,24 @@
def getChild(self, name):
if name == "":
- return self
+ return succeed(self)
else:
return self.principalForShortName(self.recordType, name)
+ @inlineCallbacks
def listChildren(self):
+ children = []
if config.EnablePrincipalListings:
+ try:
+ for record in (yield self.directory.listRecords(self.recordType)):
+ for shortName in record.shortNames:
+ children.append(shortName)
+ except AttributeError:
+ log.warn("Cannot list children of record type {rt}",
+ rt=self.recordType.name)
+ returnValue(children)
-
- def _recordShortnameExpand():
- for record in self.directory.listRecords(self.recordType):
- if record.enabled:
- for shortName in record.shortNames:
- yield shortName
-
- return _recordShortnameExpand()
else:
# Not a listable collection
raise HTTPError(responsecode.FORBIDDEN)
@@ -517,16 +541,18 @@
def principalForRecord(self, record):
- if record is None or not record.enabled:
- return None
+ # MOVE2WHO
+ if record is None: # or not record.enabled:
+ return succeed(None)
- if record.enabledForCalendaring or record.enabledForAddressBooks:
+ # MOVE2WHO
+ if record.hasCalendars or record.hasContacts:
# XXX these are different features and one should not automatically
# imply the other...
principal = DirectoryCalendarPrincipalResource(self, record)
else:
principal = DirectoryPrincipalResource(self, record)
- return principal
+ return succeed(principal)
##
# Static
@@ -538,9 +564,10 @@
raise HTTPError(responsecode.NOT_FOUND)
+ @inlineCallbacks
def getChild(self, name):
if name == "":
- return self
+ returnValue(self)
if "#" in name:
# This UID belongs to a sub-principal
@@ -549,16 +576,16 @@
primaryUID = name
subType = None
- record = self.directory.recordWithUID(primaryUID)
- primaryPrincipal = self.principalForRecord(record)
+ record = (yield self.directory.recordWithUID(primaryUID))
+ primaryPrincipal = (yield self.principalForRecord(record))
if primaryPrincipal is None:
log.info("No principal found for UID: %s" % (name,))
- return None
+ returnValue(None)
if subType is None:
- return primaryPrincipal
+ returnValue(primaryPrincipal)
else:
- return primaryPrincipal.getChild(subType)
+ returnValue((yield primaryPrincipal.getChild(subType)))
def listChildren(self):
@@ -610,17 +637,31 @@
Top-level renderer in the template.
"""
record = self.resource.record
+ try:
+ if isinstance(record.guid, uuid.UUID):
+ guid = str(record.guid).upper()
+ else:
+ guid = record.guid
+ except AttributeError:
+ guid = ""
+ try:
+ emailAddresses = record.emailAddresses
+ except AttributeError:
+ emailAddresses = []
return tag.fillSlots(
directoryGUID=str(record.service.guid),
realm=str(record.service.realmName),
- principalGUID=str(record.guid),
- recordType=str(record.recordType),
+ principalGUID=guid,
+ recordType=record.recordType.name + "s", # MOVE2WHO need mapping
shortNames=",".join(record.shortNames),
- securityIDs=",".join(record.authIDs),
- fullName=str(record.fullName),
- firstName=str(record.firstName),
- lastName=str(record.lastName),
- emailAddresses=formatList(record.emailAddresses),
+ # MOVE2WHO: need this?
+ # securityIDs=",".join(record.authIDs),
+ fullName=str(record.displayName),
+ # MOVE2WHO: need this?
+ # firstName=str(record.firstName),
+ # MOVE2WHO: need this?
+ # lastName=str(record.lastName),
+ emailAddresses=formatList(emailAddresses),
principalUID=str(self.resource.principalUID()),
principalURL=formatLink(self.resource.principalURL()),
alternateURIs=formatLinks(self.resource.alternateURIs()),
@@ -697,7 +738,7 @@
"""
resource = self.resource
record = resource.record
- if record.enabledForCalendaring:
+ if record.hasCalendars:
return tag.fillSlots(
calendarUserAddresses=formatLinks(
sorted(resource.calendarUserAddresses())
@@ -715,7 +756,7 @@
"""
resource = self.resource
record = resource.record
- if record.enabledForAddressBooks:
+ if record.hasContacts:
return tag.fillSlots(
addressBookHomes=formatLinks(resource.addressBookHomeURLs())
)
@@ -750,7 +791,8 @@
(calendarserver_namespace, "first-name"),
(calendarserver_namespace, "last-name"),
(calendarserver_namespace, "email-address-set"),
- davxml.ResourceID.qname(),
+ # MOVE2WHO
+ # davxml.ResourceID.qname(),
)
cacheNotifierFactory = DisabledCacheNotifier
@@ -778,8 +820,9 @@
url = joinURL(parent.principalCollectionURL(), self.principalUID()) + slash
self._url = url
+ # MOVE2WHO - hack: just adding an "s" using recordType.name (need a mapping)
self._alternate_urls = tuple([
- joinURL(parent.parent.principalCollectionURL(), record.recordType, shortName) + slash for shortName in record.shortNames
+ joinURL(parent.parent.principalCollectionURL(), record.recordType.name+"s", shortName) + slash for shortName in record.shortNames
])
@@ -811,26 +854,33 @@
namespace, name = qname
- if qname == davxml.ResourceID.qname():
- returnValue(davxml.ResourceID(davxml.HRef.fromString("urn:uuid:%s" % (self.record.guid,))))
- elif namespace == calendarserver_namespace:
- if name == "first-name":
- firstName = self.record.firstName
- if firstName is not None:
- returnValue(customxml.FirstNameProperty(firstName))
- else:
- returnValue(None)
+ # MOVE2WHO -- does principal need ResourceID ?
+ # if qname == davxml.ResourceID.qname():
+ # returnValue(davxml.ResourceID(davxml.HRef.fromString("urn:uuid:%s" % (self.record.guid,))))
+ if namespace == calendarserver_namespace:
- elif name == "last-name":
- lastName = self.record.lastName
- if lastName is not None:
- returnValue(customxml.LastNameProperty(lastName))
- else:
- returnValue(None)
+ # MOVE2WHO
+ # if name == "first-name":
+ # firstName = self.record.firstName
+ # if firstName is not None:
+ # returnValue(customxml.FirstNameProperty(firstName))
+ # else:
+ # returnValue(None)
- elif name == "email-address-set":
+ # elif name == "last-name":
+ # lastName = self.record.lastName
+ # if lastName is not None:
+ # returnValue(customxml.LastNameProperty(lastName))
+ # else:
+ # returnValue(None)
+
+ if name == "email-address-set":
+ try:
+ emails = self.record.emailAddresses
+ except AttributeError:
+ emails = []
returnValue(customxml.EmailAddressSet(
- *[customxml.EmailAddressProperty(addr) for addr in sorted(self.record.emailAddresses)]
+ *[customxml.EmailAddressProperty(addr) for addr in sorted(emails)]
))
result = (yield super(DirectoryPrincipalResource, self).readProperty(property, request))
@@ -867,7 +917,7 @@
def displayName(self):
- return self.record.displayName()
+ return self.record.displayName
##
# ACL
@@ -939,51 +989,40 @@
@inlineCallbacks
- def proxyFor(self, read_write, resolve_memberships=True):
+ def proxyFor(self, readWrite):
+ """
+ Returns the set of principals currently delegating to this principal
+ with the access indicated by the readWrite argument. If readWrite is
+ True, then write-access delegators are returned, otherwise the read-
+ only-access delegators are returned.
+ @param readWrite: Whether to look up read-write delegators, or
+ read-only delegators
+ @type readWrite: C{bool}
+
+ @return: A Deferred firing with a set of principals
+ """
proxyFors = set()
- if resolve_memberships:
- cache = getattr(self.record.service, "groupMembershipCache", None)
- if cache:
- log.debug("proxyFor is using groupMembershipCache")
- guids = (yield self.record.cachedGroups())
- memberships = set()
- for guid in guids:
- principal = self.parent.principalForUID(guid)
- if principal:
- memberships.add(principal)
- else:
- memberships = self._getRelatives("groups", infinity=True)
-
- for membership in memberships:
- results = (yield membership.proxyFor(read_write, False))
- proxyFors.update(results)
-
if config.EnableProxyPrincipals:
- # Get proxy group UIDs and map to principal resources
- proxies = []
- memberships = (yield self._calendar_user_proxy_index().getMemberships(self.principalUID()))
- for uid in memberships:
- subprincipal = self.parent.principalForUID(uid)
- if subprincipal:
- if subprincipal.isProxyType(read_write):
- proxies.append(subprincipal.parent)
- else:
- yield self._calendar_user_proxy_index().removeGroup(uid)
+ childName = "calendar-proxy-{rw}-for".format(
+ rw=("write" if readWrite else "read")
+ )
+ proxyForGroup = yield self.getChild(childName)
+ if proxyForGroup:
+ proxyFors = yield proxyForGroup.groupMembers()
- proxyFors.update(proxies)
+ uids = set()
+ for principal in tuple(proxyFors):
+ if principal.principalUID() in uids:
+ proxyFors.remove(principal)
+ else:
+ uids.add(principal.principalUID())
- uids = set()
- for principal in tuple(proxyFors):
- if principal.principalUID() in uids:
- proxyFors.remove(principal)
- else:
- uids.add(principal.principalUID())
-
returnValue(proxyFors)
+ @inlineCallbacks
def _getRelatives(self, method, record=None, relatives=None, records=None, proxy=None, infinity=False):
if record is None:
record = self.record
@@ -994,61 +1033,62 @@
if record not in records:
records.add(record)
- for relative in getattr(record, method)():
+ for relative in (yield getattr(record, method)()):
if relative not in records:
- found = self.parent.principalForRecord(relative)
+ found = (yield self.parent.principalForRecord(relative))
if found is None:
log.error("No principal found for directory record: %r" % (relative,))
else:
if proxy:
if proxy == "read-write":
- found = found.getChild("calendar-proxy-write")
+ found = (yield found.getChild("calendar-proxy-write"))
else:
- found = found.getChild("calendar-proxy-read")
+ found = (yield found.getChild("calendar-proxy-read"))
relatives.add(found)
if infinity:
- self._getRelatives(method, relative, relatives, records,
+ yield self._getRelatives(method, relative, relatives, records,
infinity=infinity)
- return relatives
+ returnValue(relatives)
def groupMembers(self):
- return succeed(self._getRelatives("members"))
+ return self._getRelatives("members")
def expandedGroupMembers(self):
- return succeed(self._getRelatives("members", infinity=True))
+ return self._getRelatives("members", infinity=True)
@inlineCallbacks
def groupMemberships(self, infinity=False):
- cache = getattr(self.record.service, "groupMembershipCache", None)
- if cache:
- log.debug("groupMemberships is using groupMembershipCache")
- guids = (yield self.record.cachedGroups())
- groups = set()
- for guid in guids:
- principal = self.parent.principalForUID(guid)
- if principal:
- groups.add(principal)
- else:
- groups = self._getRelatives("groups", infinity=infinity)
+ # cache = getattr(self.record.service, "groupMembershipCache", None)
+ # if cache:
+ # log.debug("groupMemberships is using groupMembershipCache")
+ # guids = (yield self.record.cachedGroups())
+ # groups = set()
+ # for guid in guids:
+ # principal = yield self.parent.principalForUID(guid)
+ # if principal:
+ # groups.add(principal)
+ # else:
+ groups = yield self._getRelatives("groups", infinity=infinity)
- if config.EnableProxyPrincipals:
- # Get proxy group UIDs and map to principal resources
- proxies = []
- memberships = (yield self._calendar_user_proxy_index().getMemberships(self.principalUID()))
- for uid in memberships:
- subprincipal = self.parent.principalForUID(uid)
- if subprincipal:
- proxies.append(subprincipal)
- else:
- yield self._calendar_user_proxy_index().removeGroup(uid)
+ # MOVE2WHO
+ # if config.EnableProxyPrincipals:
+ # # Get proxy group UIDs and map to principal resources
+ # proxies = []
+ # memberships = (yield self._calendar_user_proxy_index().getMemberships(self.principalUID()))
+ # for uid in memberships:
+ # subprincipal = yield self.parent.principalForUID(uid)
+ # if subprincipal:
+ # proxies.append(subprincipal)
+ # else:
+ # yield self._calendar_user_proxy_index().removeGroup(uid)
- groups.update(proxies)
+ # groups.update(proxies)
returnValue(groups)
@@ -1099,7 +1139,9 @@
def getAutoSchedule(self):
- return self.record.autoSchedule
+ # MOVE2WHO
+ return True
+ # return self.record.autoSchedule
def canAutoSchedule(self, organizer=None):
@@ -1187,18 +1229,19 @@
raise HTTPError(responsecode.NOT_FOUND)
+ @inlineCallbacks
def locateChild(self, req, segments):
- child = self.getChild(segments[0])
+ child = (yield self.getChild(segments[0]))
if child is not None:
- return (child, segments[1:])
- return (None, ())
+ returnValue((child, segments[1:]))
+ returnValue((None, ()))
def getChild(self, name):
if name == "":
- return self
+ return succeed(self)
- return None
+ return succeed(None)
def listChildren(self):
@@ -1221,7 +1264,7 @@
def addressBooksEnabled(self):
- return config.EnableCardDAV and self.record.enabledForAddressBooks
+ return config.EnableCardDAV and self.record.hasContacts
@inlineCallbacks
@@ -1288,7 +1331,7 @@
def calendarHomeURLs(self):
- if self.record.enabledForCalendaring:
+ if self.record.hasCalendars:
homeURL = self._homeChildURL(None)
else:
homeURL = ""
@@ -1318,7 +1361,7 @@
def addressBookHomeURLs(self):
- if self.record.enabledForAddressBooks:
+ if self.record.hasContacts:
homeURL = self._addressBookHomeChildURL(None)
else:
homeURL = ""
@@ -1391,22 +1434,27 @@
def getChild(self, name):
if name == "":
- return self
+ return succeed(self)
- if config.EnableProxyPrincipals and name in ("calendar-proxy-read",
- "calendar-proxy-write"):
+ if config.EnableProxyPrincipals and name in (
+ "calendar-proxy-read", "calendar-proxy-write",
+ "calendar-proxy-read-for", "calendar-proxy-write-for",
+ ):
# name is required to be str
from twistedcaldav.directory.calendaruserproxy import (
CalendarUserProxyPrincipalResource
)
- return CalendarUserProxyPrincipalResource(self, str(name))
+ return succeed(CalendarUserProxyPrincipalResource(self, str(name)))
else:
- return None
+ return succeed(None)
def listChildren(self):
if config.EnableProxyPrincipals:
- return ("calendar-proxy-read", "calendar-proxy-write")
+ return (
+ "calendar-proxy-read", "calendar-proxy-write",
+ "calendar-proxy-read-for", "calendar-proxy-write-for",
+ )
else:
return ()
@@ -1425,71 +1473,3 @@
-def formatPrincipals(principals):
- """
- Format a list of principals into some twisted.web.template DOM objects.
- """
- def recordKey(principal):
- try:
- record = principal.record
- except AttributeError:
- try:
- record = principal.parent.record
- except:
- return None
- return (record.recordType, record.shortNames[0])
-
-
- def describe(principal):
- if hasattr(principal, "record"):
- return " - %s" % (principal.record.fullName,)
- else:
- return ""
-
- return formatList(
- tags.a(href=principal.principalURL())(
- str(principal), describe(principal)
- )
- for principal in sorted(principals, key=recordKey)
- )
-
-
-
-def formatList(iterable):
- """
- Format a list of stuff as an interable.
- """
- thereAreAny = False
- try:
- item = None
- for item in iterable:
- thereAreAny = True
- yield " -> "
- if item is None:
- yield "None"
- else:
- yield item
- yield "\n"
- except Exception, e:
- log.error("Exception while rendering: %s" % (e,))
- Failure().printTraceback()
- yield " ** %s **: %s\n" % (e.__class__.__name__, e)
- if not thereAreAny:
- yield " '()\n"
-
-
-
-def formatLink(url):
- """
- Convert a URL string into some twisted.web.template DOM objects for
- rendering as a link to itself.
- """
- return tags.a(href=url)(url)
-
-
-
-def formatLinks(urls):
- """
- Format a list of URL strings as a list of twisted.web.template DOM links.
- """
- return formatList(formatLink(link) for link in urls)
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/accounts.xml
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/accounts.xml 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/accounts.xml 2014-03-13 20:40:10 UTC (rev 12899)
@@ -18,279 +18,297 @@
<!DOCTYPE accounts SYSTEM "../../../conf/auth/accounts.dtd">
-<accounts realm="Test">
- <user>
- <uid>admin</uid>
- <guid>D11F03A0-97EA-48AF-9A6C-FAC7F3975766</guid>
+<directory realm="Test">
+ <record type="user">
+ <short-name>admin</short-name>
+ <uid>D11F03A0-97EA-48AF-9A6C-FAC7F3975766</uid>
<password>nimda</password>
- <name>Administrators</name>
- </user>
- <user>
- <uid>wsanchez</uid>
- <guid>6423F94A-6B76-4A3A-815B-D52CFD77935D</guid>
+ <full-name>Administrators</full-name>
+ </record>
+ <record type="user">
+ <short-name>wsanchez</short-name>
+ <uid>6423F94A-6B76-4A3A-815B-D52CFD77935D</uid>
<password>zehcnasw</password>
- <name>Wilfredo Sanchez</name>
- <email-address>wsanchez at example.com</email-address>
- </user>
- <user>
- <uid>cdaboo</uid>
- <guid>5A985493-EE2C-4665-94CF-4DFEA3A89500</guid>
+ <full-name>Wilfredo Sanchez</full-name>
+ <email>wsanchez at example.com</email>
+ </record>
+ <record type="user">
+ <short-name>cdaboo</short-name>
+ <uid>5A985493-EE2C-4665-94CF-4DFEA3A89500</uid>
<password>oobadc</password>
- <name>Cyrus Daboo</name>
- <email-address>cdaboo at example.com</email-address>
- </user>
- <user>
- <uid>lecroy</uid>
- <guid>8B4288F6-CC82-491D-8EF9-642EF4F3E7D0</guid>
+ <full-name>Cyrus Daboo</full-name>
+ <email>cdaboo at example.com</email>
+ </record>
+ <record type="user">
+ <short-name>lecroy</short-name>
+ <uid>8B4288F6-CC82-491D-8EF9-642EF4F3E7D0</uid>
<password>yorcel</password>
- <name>Chris Lecroy</name>
- <email-address>lecroy at example.com</email-address>
- </user>
- <user>
- <uid>dreid</uid>
- <guid>5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1</guid>
+ <full-name>Chris Lecroy</full-name>
+ <email>lecroy at example.com</email>
+ </record>
+ <record type="user">
+ <short-name>dreid</short-name>
+ <uid>5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1</uid>
<password>dierd</password>
- <name>David Reid</name>
- <email-address>dreid at example.com</email-address>
- </user>
- <user>
- <uid>doublequotes</uid>
- <guid>8E04787E-336D-41ED-A70B-D233AD0DCE6F</guid>
+ <full-name>David Reid</full-name>
+ <email>dreid at example.com</email>
+ </record>
+ <record type="user">
+ <short-name>doublequotes</short-name>
+ <uid>8E04787E-336D-41ED-A70B-D233AD0DCE6F</uid>
<password>setouqelbuod</password>
- <name>Double "quotey" Quotes</name>
- <email-address>doublequotes at example.com</email-address>
- </user>
- <user>
- <uid>nocalendar</uid>
- <guid>543D28BA-F74F-4D5F-9243-B3E3A61171E5</guid>
+ <full-name>Double "quotey" Quotes</full-name>
+ <email>doublequotes at example.com</email>
+ </record>
+ <record type="user">
+ <short-name>nocalendar</short-name>
+ <uid>543D28BA-F74F-4D5F-9243-B3E3A61171E5</uid>
<password>radnelacon</password>
- <name>No Calendar</name>
- <email-address>nocalendar at example.com</email-address>
- </user>
- <user>
- <uid>usera</uid>
- <guid>7423F94A-6B76-4A3A-815B-D52CFD77935D</guid>
+ <full-name>No Calendar</full-name>
+ <email>nocalendar at example.com</email>
+ </record>
+ <record type="user">
+ <short-name>usera</short-name>
+ <uid>7423F94A-6B76-4A3A-815B-D52CFD77935D</uid>
<password>a</password>
- <name>a</name>
- <email-address>a at example.com</email-address>
- </user>
- <user>
- <uid>userb</uid>
- <guid>8A985493-EE2C-4665-94CF-4DFEA3A89500</guid>
+ <full-name>a</full-name>
+ <email>a at example.com</email>
+ </record>
+ <record type="user">
+ <short-name>userb</short-name>
+ <uid>8A985493-EE2C-4665-94CF-4DFEA3A89500</uid>
<password>b</password>
- <name>b</name>
- <email-address>b at example.com</email-address>
- </user>
- <user>
- <uid>userc</uid>
- <guid>9FF60DAD-0BDE-4508-8C77-15F0CA5C8DD2</guid>
+ <full-name>b</full-name>
+ <email>b at example.com</email>
+ </record>
+ <record type="user">
+ <short-name>userc</short-name>
+ <uid>9FF60DAD-0BDE-4508-8C77-15F0CA5C8DD2</uid>
<password>c</password>
- <name>c</name>
- <email-address>c at example.com</email-address>
- </user>
- <user>
- <uid>usercalonly</uid>
- <guid>9E1FFAC4-3CCD-45A1-8272-D161C92D2EEE</guid>
+ <full-name>c</full-name>
+ <email>c at example.com</email>
+ </record>
+ <record type="user">
+ <short-name>usercalonly</short-name>
+ <uid>9E1FFAC4-3CCD-45A1-8272-D161C92D2EEE</uid>
<password>a</password>
- <name>a calonly</name>
- <email-address>a-calonly at example.com</email-address>
- </user>
- <user>
- <uid>useradbkonly</uid>
- <guid>7678EC8A-A069-4E82-9066-7279C6718507</guid>
+ <full-name>a calonly</full-name>
+ <email>a-calonly at example.com</email>
+ </record>
+ <record type="user">
+ <short-name>useradbkonly</short-name>
+ <uid>7678EC8A-A069-4E82-9066-7279C6718507</uid>
<password>a</password>
- <name>a adbkonly</name>
- <email-address>a-adbkonly at example.com</email-address>
- </user>
- <user>
- <uid>nonascii</uid>
- <uid>nonascii佐藤</uid>
- <guid>320B73A1-46E2-4180-9563-782DFDBE1F63</guid>
+ <full-name>a adbkonly</full-name>
+ <email>a-adbkonly at example.com</email>
+ </record>
+ <record type="user">
+ <short-name>nonascii</short-name>
+ <short-name>nonascii佐藤</short-name>
+ <uid>320B73A1-46E2-4180-9563-782DFDBE1F63</uid>
<password>a</password>
- <name>佐藤佐藤佐藤</name>
- <email-address>nonascii at example.com</email-address>
- </user>
- <user>
- <uid>delegator</uid>
- <guid>FC465590-E9E9-4746-ACE8-6C756A49FE4D</guid>
+ <full-name>佐藤佐藤佐藤</full-name>
+ <email>nonascii at example.com</email>
+ </record>
+ <record type="user">
+ <short-name>delegator</short-name>
+ <uid>FC465590-E9E9-4746-ACE8-6C756A49FE4D</uid>
<password>a</password>
- <name>Calendar Delegator</name>
- <email-address>calendardelegator at example.com</email-address>
- </user>
- <user>
- <uid>occasionaldelegate</uid>
- <guid>EC465590-E9E9-4746-ACE8-6C756A49FE4D</guid>
+ <full-name>Calendar Delegator</full-name>
+ <email>calendardelegator at example.com</email>
+ </record>
+ <record type="user">
+ <short-name>occasionaldelegate</short-name>
+ <uid>EC465590-E9E9-4746-ACE8-6C756A49FE4D</uid>
<password>a</password>
- <name>Occasional Delegate</name>
- <email-address>occasional at example.com</email-address>
- </user>
- <user>
- <uid>delegateviagroup</uid>
- <guid>46D9D716-CBEE-490F-907A-66FA6C3767FF</guid>
+ <full-name>Occasional Delegate</full-name>
+ <email>occasional at example.com</email>
+ </record>
+ <record type="user">
+ <short-name>delegateviagroup</short-name>
+ <uid>46D9D716-CBEE-490F-907A-66FA6C3767FF</uid>
<password>a</password>
- <name>Delegate Via Group</name>
- <email-address>delegateviagroup at example.com</email-address>
- </user>
- <group>
- <uid>delegategroup</uid>
- <guid>00599DAF-3E75-42DD-9DB7-52617E79943F</guid>
- <name>Delegate Group</name>
- <members>
- <member type="users">delegateviagroup</member>
- </members>
- </group>
+ <full-name>Delegate Via Group</full-name>
+ <email>delegateviagroup at example.com</email>
+ </record>
+ <record type="group">
+ <short-name>delegategroup</short-name>
+ <uid>00599DAF-3E75-42DD-9DB7-52617E79943F</uid>
+ <full-name>Delegate Group</full-name>
+ <member-uid>delegateviagroup</member-uid>
+ </record>
+
+ <record type="user">
+ <short-name>user01</short-name>
+ <uid>user01</uid>
+ <password>user01</password>
+ <full-name>User 01</full-name>
+ <email>user01 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user02</short-name>
+ <uid>user02</uid>
+ <password>user02</password>
+ <full-name>User 02</full-name>
+ <email>user02 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user03</short-name>
+ <uid>user03</uid>
+ <password>user03</password>
+ <full-name>User 03</full-name>
+ <email>user03 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user04</short-name>
+ <uid>user04</uid>
+ <password>user04</password>
+ <full-name>User 04</full-name>
+ <email>user04 at example.com</email>
+ </record>
+
+ <!-- Repeat is not (yet?) supported in twext.who.xml
<user repeat="100">
+ <short-name>user%02d</short-name>
<uid>user%02d</uid>
- <guid>user%02d</guid>
<password>%02duser</password>
- <name>~35 User %02d</name>
+ <full-name>~35 User %02d</full-name>
<first-name>~5</first-name>
<last-name>~9 User %02d</last-name>
- <email-address>~10 at example.com</email-address>
+ <email>~10 at example.com</email>
</user>
- <group>
- <uid>managers</uid>
- <guid>9FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1</guid>
+ -->
+
+ <record type="group">
+ <short-name>managers</short-name>
+ <uid>9FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1</uid>
<password>managers</password>
- <name>Managers</name>
- <members>
- <member type="users">lecroy</member>
- </members>
- </group>
- <group>
+ <full-name>Managers</full-name>
+ <member-uid>lecroy</member-uid>
+ </record>
+ <record type="group">
+ <short-name>admin</short-name>
<uid>admin</uid>
- <guid>admin</guid>
<password>admin</password>
- <name>Administrators</name>
- <members>
- <member type="groups">managers</member>
- </members>
- </group>
- <group>
+ <full-name>Administrators</full-name>
+ <member-uid>managers</member-uid>
+ </record>
+ <record type="group">
+ <short-name>grunts</short-name>
<uid>grunts</uid>
- <guid>grunts</guid>
<password>grunts</password>
- <name>We do all the work</name>
- <members>
- <member>wsanchez</member>
- <member>cdaboo</member>
- <member>dreid</member>
- </members>
- </group>
- <group>
+ <full-name>We do all the work</full-name>
+ <member-uid>wsanchez</member-uid>
+ <member-uid>cdaboo</member-uid>
+ <member-uid>dreid</member-uid>
+ </record>
+ <record type="group">
+ <short-name>right_coast</short-name>
<uid>right_coast</uid>
- <guid>right_coast</guid>
<password>right_coast</password>
- <name>East Coast</name>
- <members>
- <member>cdaboo</member>
- </members>
- </group>
- <group>
+ <full-name>East Coast</full-name>
+ <member-uid>cdaboo</member-uid>
+ </record>
+ <record type="group">
+ <short-name>left_coast</short-name>
<uid>left_coast</uid>
- <guid>left_coast</guid>
<password>left_coast</password>
- <name>West Coast</name>
- <members>
- <member>wsanchez</member>
- <member>lecroy</member>
- <member>dreid</member>
- </members>
- </group>
- <group>
+ <full-name>West Coast</full-name>
+ <member-uid>wsanchez</member-uid>
+ <member-uid>lecroy</member-uid>
+ <member-uid>dreid</member-uid>
+ </record>
+ <record type="group">
+ <short-name>both_coasts</short-name>
<uid>both_coasts</uid>
- <guid>both_coasts</guid>
<password>both_coasts</password>
- <name>Both Coasts</name>
- <members>
- <member type="groups">right_coast</member>
- <member type="groups">left_coast</member>
- </members>
- </group>
- <group>
+ <full-name>Both Coasts</full-name>
+ <member-uid>right_coast</member-uid>
+ <member-uid>left_coast</member-uid>
+ </record>
+ <record type="group">
+ <short-name>recursive1_coasts</short-name>
<uid>recursive1_coasts</uid>
- <guid>recursive1_coasts</guid>
<password>recursive1_coasts</password>
- <name>Recursive1 Coasts</name>
- <members>
- <member type="groups">recursive2_coasts</member>
- <member>wsanchez</member>
- </members>
- </group>
- <group>
+ <full-name>Recursive1 Coasts</full-name>
+ <member-uid>recursive2_coasts</member-uid>
+ <member-uid>wsanchez</member-uid>
+ </record>
+ <record type="group">
+ <short-name>recursive2_coasts</short-name>
<uid>recursive2_coasts</uid>
- <guid>recursive2_coasts</guid>
<password>recursive2_coasts</password>
- <name>Recursive2 Coasts</name>
- <members>
- <member type="groups">recursive1_coasts</member>
- <member>cdaboo</member>
- </members>
- </group>
- <group>
+ <full-name>Recursive2 Coasts</full-name>
+ <member-uid>recursive1_coasts</member-uid>
+ <member-uid>cdaboo</member-uid>
+ </record>
+ <record type="group">
+ <short-name>non_calendar_group</short-name>
<uid>non_calendar_group</uid>
- <guid>non_calendar_group</guid>
<password>non_calendar_group</password>
- <name>Non-calendar group</name>
- <members>
- <member>cdaboo</member>
- <member>lecroy</member>
- </members>
- </group>
+ <full-name>Non-calendar group</full-name>
+ <member-uid>cdaboo</member-uid>
+ <member-uid>lecroy</member-uid>
+ </record>
+ <!--
<location>
<uid>mercury</uid>
- <guid>mercury</guid>
+ <uid>mercury</uid>
<password>mercury</password>
<name>Mercury Seven</name>
<email-address>mercury at example.com</email-address>
</location>
<location>
<uid>gemini</uid>
- <guid>gemini</guid>
+ <uid>gemini</uid>
<password>gemini</password>
<name>Gemini Twelve</name>
<email-address>gemini at example.com</email-address>
</location>
<location>
<uid>apollo</uid>
- <guid>apollo</guid>
+ <uid>apollo</uid>
<password>apollo</password>
<name>Apollo Eleven</name>
<email-address>apollo at example.com</email-address>
</location>
<location>
<uid>orion</uid>
- <guid>orion</guid>
+ <uid>orion</uid>
<password>orion</password>
<name>Orion</name>
<email-address>orion at example.com</email-address>
</location>
<resource>
<uid>transporter</uid>
- <guid>transporter</guid>
+ <uid>transporter</uid>
<password>transporter</password>
<name>Mass Transporter</name>
<email-address>transporter at example.com</email-address>
</resource>
<resource>
<uid>ftlcpu</uid>
- <guid>ftlcpu</guid>
+ <uid>ftlcpu</uid>
<password>ftlcpu</password>
<name>Faster-Than-Light Microprocessor</name>
<email-address>ftlcpu at example.com</email-address>
</resource>
<resource>
<uid>non_calendar_proxy</uid>
- <guid>non_calendar_proxy</guid>
+ <uid>non_calendar_proxy</uid>
<password>non_calendar_proxy</password>
<name>Non-calendar proxy</name>
<email-address>non_calendar_proxy at example.com</email-address>
</resource>
<resource>
<uid>disabled</uid>
- <guid>disabled</guid>
+ <uid>disabled</uid>
<password>disabled</password>
<name>Disabled Record</name>
<email-address>disabled at example.com</email-address>
</resource>
-</accounts>
+-->
+</directory>
Deleted: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_aggregate.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_aggregate.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_aggregate.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1,87 +0,0 @@
-##
-# Copyright (c) 2005-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-from twistedcaldav.directory.xmlfile import XMLDirectoryService
-from twistedcaldav.directory.aggregate import AggregateDirectoryService
-
-from twistedcaldav.directory.test.test_xmlfile import xmlFile, augmentsFile
-
-import twistedcaldav.directory.test.util
-from twistedcaldav.directory import augment
-
-xml_prefix = "xml:"
-
-testServices = (
- (xml_prefix , twistedcaldav.directory.test.test_xmlfile.XMLFile),
-)
-
-class AggregatedDirectories (twistedcaldav.directory.test.util.DirectoryTestCase):
- def _recordTypes(self):
- recordTypes = set()
- for prefix, testClass in testServices:
- for recordType in testClass.recordTypes:
- recordTypes.add(prefix + recordType)
- return recordTypes
-
-
- def _records(key): #@NoSelf
- def get(self):
- records = {}
- for prefix, testClass in testServices:
- for record, info in getattr(testClass, key).iteritems():
- info = dict(info)
- info["prefix"] = prefix
- info["members"] = tuple(
- (t, prefix + s) for t, s in info.get("members", {})
- )
- records[prefix + record] = info
- return records
- return get
-
- recordTypes = property(_recordTypes)
- users = property(_records("users"))
- groups = property(_records("groups"))
- locations = property(_records("locations"))
- resources = property(_records("resources"))
- addresses = property(_records("addresses"))
-
- recordTypePrefixes = tuple(s[0] for s in testServices)
-
-
- def service(self):
- """
- Returns an IDirectoryService.
- """
- xmlService = XMLDirectoryService(
- {
- 'xmlFile' : xmlFile,
- 'augmentService' :
- augment.AugmentXMLDB(xmlFiles=(augmentsFile.path,)),
- }
- )
- xmlService.recordTypePrefix = xml_prefix
-
- return AggregateDirectoryService((xmlService,), None)
-
-
- def test_setRealm(self):
- """
- setRealm gets propagated to nested services
- """
- aggregatedService = self.service()
- aggregatedService.setRealm("foo.example.com")
- for service in aggregatedService._recordTypes.values():
- self.assertEquals("foo.example.com", service.realmName)
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_augment.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_augment.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_augment.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -17,7 +17,6 @@
from twistedcaldav.test.util import TestCase
from twistedcaldav.directory.augment import AugmentXMLDB, AugmentSqliteDB, \
AugmentPostgreSQLDB, AugmentRecord
-from twistedcaldav.directory.directory import DirectoryService
from twisted.internet.defer import inlineCallbacks
from twistedcaldav.directory.xmlaugmentsparser import XMLAugmentsParser
import cStringIO
@@ -78,7 +77,7 @@
class AugmentTests(TestCase):
@inlineCallbacks
- def _checkRecord(self, db, items, recordType=DirectoryService.recordType_users):
+ def _checkRecord(self, db, items, recordType="users"):
record = (yield db.getAugmentRecord(items["uid"], recordType))
self.assertTrue(record is not None, "Failed record uid: %s" % (items["uid"],))
@@ -88,7 +87,7 @@
@inlineCallbacks
- def _checkRecordExists(self, db, uid, recordType=DirectoryService.recordType_users):
+ def _checkRecordExists(self, db, uid, recordType="users"):
record = (yield db.getAugmentRecord(uid, recordType))
self.assertTrue(record is not None, "Failed record uid: %s" % (uid,))
Deleted: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_buildquery.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_buildquery.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_buildquery.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1,160 +0,0 @@
-##
-# Copyright (c) 2009-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-try:
- from calendarserver.platform.darwin.od import dsattributes
-except ImportError:
- pass
-else:
- from twistedcaldav.test.util import TestCase
- from twistedcaldav.directory.appleopendirectory import (buildQueries,
- buildLocalQueriesFromTokens, OpenDirectoryService, buildNestedQueryFromTokens)
-
- class BuildQueryTests(TestCase):
-
- def test_buildQuery(self):
- self.assertEquals(
- buildQueries(
- [dsattributes.kDSStdRecordTypeUsers],
- (
- ("firstName", "morgen", True, "starts-with"),
- ("lastName", "sagen", True, "starts-with"),
- ),
- OpenDirectoryService._ODFields
- ),
- {
- ('dsAttrTypeStandard:FirstName', 'morgen', True, 'starts-with') : [dsattributes.kDSStdRecordTypeUsers],
- ('dsAttrTypeStandard:LastName', 'sagen', True, 'starts-with') : [dsattributes.kDSStdRecordTypeUsers],
- }
- )
- self.assertEquals(
- buildQueries(
- [
- dsattributes.kDSStdRecordTypeUsers,
- ],
- (
- ("firstName", "morgen", True, "starts-with"),
- ("emailAddresses", "morgen", True, "contains"),
- ),
- OpenDirectoryService._ODFields
- ),
- {
- ('dsAttrTypeStandard:FirstName', 'morgen', True, 'starts-with') : [dsattributes.kDSStdRecordTypeUsers],
- ('dsAttrTypeStandard:EMailAddress', 'morgen', True, 'contains') : [dsattributes.kDSStdRecordTypeUsers],
- }
- )
- self.assertEquals(
- buildQueries(
- [
- dsattributes.kDSStdRecordTypeGroups,
- ],
- (
- ("firstName", "morgen", True, "starts-with"),
- ("lastName", "morgen", True, "starts-with"),
- ("fullName", "morgen", True, "starts-with"),
- ("emailAddresses", "morgen", True, "contains"),
- ),
- OpenDirectoryService._ODFields
- ),
- {
- ('dsAttrTypeStandard:RealName', 'morgen', True, 'starts-with') : [dsattributes.kDSStdRecordTypeGroups],
- ('dsAttrTypeStandard:EMailAddress', 'morgen', True, 'contains') : [dsattributes.kDSStdRecordTypeGroups],
- }
- )
- self.assertEquals(
- buildQueries(
- [
- dsattributes.kDSStdRecordTypeUsers,
- dsattributes.kDSStdRecordTypeGroups,
- ],
- (
- ("firstName", "morgen", True, "starts-with"),
- ("lastName", "morgen", True, "starts-with"),
- ("fullName", "morgen", True, "starts-with"),
- ("emailAddresses", "morgen", True, "contains"),
- ),
- OpenDirectoryService._ODFields
- ),
- {
- ('dsAttrTypeStandard:RealName', 'morgen', True, 'starts-with') : [dsattributes.kDSStdRecordTypeUsers, dsattributes.kDSStdRecordTypeGroups],
- ('dsAttrTypeStandard:EMailAddress', 'morgen', True, 'contains') : [dsattributes.kDSStdRecordTypeUsers, dsattributes.kDSStdRecordTypeGroups],
- ('dsAttrTypeStandard:FirstName', 'morgen', True, 'starts-with') : [dsattributes.kDSStdRecordTypeUsers],
- ('dsAttrTypeStandard:LastName', 'morgen', True, 'starts-with') : [dsattributes.kDSStdRecordTypeUsers],
- }
- )
- self.assertEquals(
- buildQueries(
- [
- dsattributes.kDSStdRecordTypeGroups,
- ],
- (
- ("firstName", "morgen", True, "starts-with"),
- ),
- OpenDirectoryService._ODFields
- ),
- {
- }
- )
-
-
- def test_buildLocalQueryFromTokens(self):
- """
- Verify the generating of the simpler queries passed to /Local/Default
- """
- results = buildLocalQueriesFromTokens([], OpenDirectoryService._ODFields)
- self.assertEquals(results, None)
-
- results = buildLocalQueriesFromTokens(["foo"], OpenDirectoryService._ODFields)
- self.assertEquals(
- results[0].generate(),
- "(|(dsAttrTypeStandard:RealName=*foo*)(dsAttrTypeStandard:EMailAddress=foo*))"
- )
-
- results = buildLocalQueriesFromTokens(["foo", "bar"], OpenDirectoryService._ODFields)
- self.assertEquals(
- results[0].generate(),
- "(|(dsAttrTypeStandard:RealName=*foo*)(dsAttrTypeStandard:EMailAddress=foo*))"
- )
- self.assertEquals(
- results[1].generate(),
- "(|(dsAttrTypeStandard:RealName=*bar*)(dsAttrTypeStandard:EMailAddress=bar*))"
- )
-
-
- def test_buildNestedQueryFromTokens(self):
- """
- Verify the generating of the complex nested queries
- """
- query = buildNestedQueryFromTokens([], OpenDirectoryService._ODFields)
- self.assertEquals(query, None)
-
- query = buildNestedQueryFromTokens(["foo"], OpenDirectoryService._ODFields)
- self.assertEquals(
- query.generate(),
- "(|(dsAttrTypeStandard:RealName=*foo*)(dsAttrTypeStandard:EMailAddress=foo*)(dsAttrTypeStandard:RecordName=foo*))"
- )
-
- query = buildNestedQueryFromTokens(["foo", "bar"], OpenDirectoryService._ODFields)
- self.assertEquals(
- query.generate(),
- "(&(|(dsAttrTypeStandard:RealName=*foo*)(dsAttrTypeStandard:EMailAddress=foo*)(dsAttrTypeStandard:RecordName=foo*))(|(dsAttrTypeStandard:RealName=*bar*)(dsAttrTypeStandard:EMailAddress=bar*)(dsAttrTypeStandard:RecordName=bar*)))"
- )
-
- query = buildNestedQueryFromTokens(["foo", "bar", "baz"], OpenDirectoryService._ODFields)
- self.assertEquals(
- query.generate(),
- "(&(|(dsAttrTypeStandard:RealName=*foo*)(dsAttrTypeStandard:EMailAddress=foo*)(dsAttrTypeStandard:RecordName=foo*))(|(dsAttrTypeStandard:RealName=*bar*)(dsAttrTypeStandard:EMailAddress=bar*)(dsAttrTypeStandard:RecordName=bar*))(|(dsAttrTypeStandard:RealName=*baz*)(dsAttrTypeStandard:EMailAddress=baz*)(dsAttrTypeStandard:RecordName=baz*)))"
- )
Deleted: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_cachedirectory.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_cachedirectory.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_cachedirectory.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1,405 +0,0 @@
-#
-# Copyright (c) 2009-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-from uuid import uuid4
-
-from twistedcaldav.directory.cachingdirectory import CachingDirectoryService
-from twistedcaldav.directory.cachingdirectory import CachingDirectoryRecord
-from twistedcaldav.directory.directory import DirectoryService
-from twistedcaldav.directory.util import uuidFromName
-from twistedcaldav.directory.augment import AugmentRecord
-from twistedcaldav.test.util import TestCase
-from twistedcaldav.config import config
-
-
-class TestDirectoryService (CachingDirectoryService):
-
- realmName = "Dummy Realm"
- baseGUID = "20CB1593-DE3F-4422-A7D7-BA9C2099B317"
-
- def recordTypes(self):
- return (
- DirectoryService.recordType_users,
- DirectoryService.recordType_groups,
- DirectoryService.recordType_locations,
- DirectoryService.recordType_resources,
- )
-
-
- def queryDirectory(self, recordTypes, indexType, indexKey):
-
- self.queried = True
-
- for recordType in recordTypes:
- for record in self.fakerecords[recordType]:
- cacheIt = False
- if indexType in (
- CachingDirectoryService.INDEX_TYPE_SHORTNAME,
- CachingDirectoryService.INDEX_TYPE_CUA,
- CachingDirectoryService.INDEX_TYPE_AUTHID,
- ):
- if indexKey in record[indexType]:
- cacheIt = True
- else:
- if indexKey == record[indexType]:
- cacheIt = True
-
- if cacheIt:
- cacheRecord = CachingDirectoryRecord(
- service=self,
- recordType=recordType,
- guid=record.get("guid"),
- shortNames=record.get("shortname"),
- authIDs=record.get("authid"),
- fullName=record.get("fullName"),
- firstName="",
- lastName="",
- emailAddresses=record.get("email"),
- )
-
- augmentRecord = AugmentRecord(
- uid=cacheRecord.guid,
- enabled=True,
- enabledForCalendaring=True,
- )
-
- cacheRecord.addAugmentInformation(augmentRecord)
-
- self.recordCacheForType(recordType).addRecord(cacheRecord,
- indexType, indexKey)
-
-
-
-class CachingDirectoryTest(TestCase):
-
- baseGUID = str(uuid4())
-
-
- def setUp(self):
- super(CachingDirectoryTest, self).setUp()
- self.service = TestDirectoryService()
- self.service.queried = False
-
-
- def loadRecords(self, records):
- self.service._initCaches()
- self.service.fakerecords = records
- self.service.queried = False
-
-
- def fakeRecord(
- self,
- fullName,
- recordType,
- shortNames=None,
- guid=None,
- emails=None,
- members=None,
- resourceInfo=None,
- multinames=False
- ):
- if shortNames is None:
- shortNames = (self.shortNameForFullName(fullName),)
- if multinames:
- shortNames += (fullName,)
-
- if guid is None:
- guid = self.guidForShortName(shortNames[0], recordType=recordType)
- else:
- guid = guid.lower()
-
- if emails is None:
- emails = ("%s at example.com" % (shortNames[0],),)
-
- attrs = {
- "fullName": fullName,
- "guid": guid,
- "shortname": shortNames,
- "email": emails,
- "cua": tuple(["mailto:%s" % email for email in emails]),
- "authid": tuple(["Kerberos:%s" % email for email in emails])
- }
-
- if members:
- attrs["members"] = members
-
- if resourceInfo:
- attrs["resourceInfo"] = resourceInfo
-
- return attrs
-
-
- def shortNameForFullName(self, fullName):
- return fullName.lower().replace(" ", "")
-
-
- def guidForShortName(self, shortName, recordType=""):
- return uuidFromName(self.baseGUID, "%s%s" % (recordType, shortName))
-
-
- def dummyRecords(self):
- SIZE = 10
- records = {
- DirectoryService.recordType_users: [
- self.fakeRecord("User %02d" % x, DirectoryService.recordType_users, multinames=(x > 5)) for x in range(1, SIZE + 1)
- ],
- DirectoryService.recordType_groups: [
- self.fakeRecord("Group %02d" % x, DirectoryService.recordType_groups) for x in range(1, SIZE + 1)
- ],
- DirectoryService.recordType_resources: [
- self.fakeRecord("Resource %02d" % x, DirectoryService.recordType_resources) for x in range(1, SIZE + 1)
- ],
- DirectoryService.recordType_locations: [
- self.fakeRecord("Location %02d" % x, DirectoryService.recordType_locations) for x in range(1, SIZE + 1)
- ],
- }
- # Add duplicate shortnames
- records[DirectoryService.recordType_users].append(self.fakeRecord("Duplicate", DirectoryService.recordType_users, multinames=True))
- records[DirectoryService.recordType_groups].append(self.fakeRecord("Duplicate", DirectoryService.recordType_groups, multinames=True))
- records[DirectoryService.recordType_resources].append(self.fakeRecord("Duplicate", DirectoryService.recordType_resources, multinames=True))
- records[DirectoryService.recordType_locations].append(self.fakeRecord("Duplicate", DirectoryService.recordType_locations, multinames=True))
-
- self.loadRecords(records)
-
-
- def verifyRecords(self, recordType, expectedGUIDs):
-
- records = self.service.listRecords(recordType)
- recordGUIDs = set([record.guid for record in records])
- self.assertEqual(recordGUIDs, expectedGUIDs)
-
-
-
-class GUIDLookups(CachingDirectoryTest):
-
- def test_emptylist(self):
- self.dummyRecords()
-
- self.verifyRecords(DirectoryService.recordType_users, set())
- self.verifyRecords(DirectoryService.recordType_groups, set())
- self.verifyRecords(DirectoryService.recordType_resources, set())
- self.verifyRecords(DirectoryService.recordType_locations, set())
-
-
- def test_cacheoneguid(self):
- self.dummyRecords()
-
- self.assertTrue(self.service.recordWithGUID(self.guidForShortName("user01", recordType=DirectoryService.recordType_users)) is not None)
- self.assertTrue(self.service.queried)
- self.verifyRecords(DirectoryService.recordType_users, set((
- self.guidForShortName("user01", recordType=DirectoryService.recordType_users),
- )))
- self.verifyRecords(DirectoryService.recordType_groups, set())
- self.verifyRecords(DirectoryService.recordType_resources, set())
- self.verifyRecords(DirectoryService.recordType_locations, set())
-
- # Make sure it really is cached and won't cause another query
- self.service.queried = False
- self.assertTrue(self.service.recordWithGUID(self.guidForShortName("user01", recordType=DirectoryService.recordType_users)) is not None)
- self.assertFalse(self.service.queried)
-
- # Make sure guid is case-insensitive
- self.assertTrue(self.service.recordWithGUID(self.guidForShortName("user01", recordType=DirectoryService.recordType_users).lower()) is not None)
-
-
- def test_cacheoneshortname(self):
- self.dummyRecords()
-
- self.assertTrue(self.service.recordWithShortName(
- DirectoryService.recordType_users,
- "user02"
- ) is not None)
- self.assertTrue(self.service.queried)
- self.verifyRecords(DirectoryService.recordType_users, set((
- self.guidForShortName("user02", recordType=DirectoryService.recordType_users),
- )))
- self.verifyRecords(DirectoryService.recordType_groups, set())
- self.verifyRecords(DirectoryService.recordType_resources, set())
- self.verifyRecords(DirectoryService.recordType_locations, set())
-
- # Make sure it really is cached and won't cause another query
- self.service.queried = False
- self.assertTrue(self.service.recordWithShortName(
- DirectoryService.recordType_users,
- "user02"
- ) is not None)
- self.assertFalse(self.service.queried)
-
-
- def test_cacheoneemail(self):
- self.dummyRecords()
-
- self.assertTrue(self.service.recordWithCalendarUserAddress(
- "mailto:user03 at example.com"
- ) is not None)
- self.assertTrue(self.service.queried)
- self.verifyRecords(DirectoryService.recordType_users, set((
- self.guidForShortName("user03", recordType=DirectoryService.recordType_users),
- )))
- self.verifyRecords(DirectoryService.recordType_groups, set())
- self.verifyRecords(DirectoryService.recordType_resources, set())
- self.verifyRecords(DirectoryService.recordType_locations, set())
-
- # Make sure it really is cached and won't cause another query
- self.service.queried = False
- self.assertTrue(self.service.recordWithCalendarUserAddress(
- "mailto:user03 at example.com"
- ) is not None)
- self.assertFalse(self.service.queried)
-
-
- def test_cacheonePrincipalsURLWithUIDS(self):
- self.dummyRecords()
-
- guid = self.guidForShortName("user03", "users")
- self.assertTrue(self.service.recordWithCalendarUserAddress(
- "/principals/__uids__/%s" % (guid,)
- ) is not None)
- self.assertTrue(self.service.queried)
- self.verifyRecords(DirectoryService.recordType_users, set((
- self.guidForShortName("user03", recordType=DirectoryService.recordType_users),
- )))
- self.verifyRecords(DirectoryService.recordType_groups, set())
- self.verifyRecords(DirectoryService.recordType_resources, set())
- self.verifyRecords(DirectoryService.recordType_locations, set())
-
- # Make sure it really is cached and won't cause another query
- self.service.queried = False
- self.assertTrue(self.service.recordWithCalendarUserAddress(
- "/principals/__uids__/%s" % (guid,)
- ) is not None)
- self.assertFalse(self.service.queried)
-
-
- def test_cacheonePrincipalsURLWithUsers(self):
- self.dummyRecords()
-
- self.assertTrue(self.service.recordWithCalendarUserAddress(
- "/principals/users/user03"
- ) is not None)
- self.assertTrue(self.service.queried)
- self.verifyRecords(DirectoryService.recordType_users, set((
- self.guidForShortName("user03", recordType=DirectoryService.recordType_users),
- )))
- self.verifyRecords(DirectoryService.recordType_groups, set())
- self.verifyRecords(DirectoryService.recordType_resources, set())
- self.verifyRecords(DirectoryService.recordType_locations, set())
-
- # Make sure it really is cached and won't cause another query
- self.service.queried = False
- self.assertTrue(self.service.recordWithCalendarUserAddress(
- "/principals/users/user03"
- ) is not None)
- self.assertFalse(self.service.queried)
-
-
- def test_cacheoneauthid(self):
- self.dummyRecords()
-
- self.assertTrue(self.service.recordWithAuthID(
- "Kerberos:user03 at example.com"
- ) is not None)
- self.assertTrue(self.service.queried)
- self.verifyRecords(DirectoryService.recordType_users, set((
- self.guidForShortName("user03", recordType=DirectoryService.recordType_users),
- )))
- self.verifyRecords(DirectoryService.recordType_groups, set())
- self.verifyRecords(DirectoryService.recordType_resources, set())
- self.verifyRecords(DirectoryService.recordType_locations, set())
-
- # Make sure it really is cached and won't cause another query
- self.service.queried = False
- self.assertTrue(self.service.recordWithAuthID(
- "Kerberos:user03 at example.com"
- ) is not None)
- self.assertFalse(self.service.queried)
-
-
- def test_negativeCaching(self):
- self.dummyRecords()
-
- # If negativeCaching is off, each miss will result in a call to
- # queryDirectory( )
- self.service.negativeCaching = False
-
- self.service.queried = False
- self.assertEquals(self.service.recordWithGUID(self.guidForShortName("missing")), None)
- self.assertTrue(self.service.queried)
-
- self.service.queried = False
- self.assertEquals(self.service.recordWithGUID(self.guidForShortName("missing")), None)
- self.assertTrue(self.service.queried)
-
- # However, if negativeCaching is on, a miss is recorded as such,
- # preventing a similar queryDirectory( ) until cacheTimeout passes
- self.service.negativeCaching = True
-
- self.service.queried = False
- self.assertEquals(self.service.recordWithGUID(self.guidForShortName("missing")), None)
- self.assertTrue(self.service.queried)
-
- self.service.queried = False
- self.assertEquals(self.service.recordWithGUID(self.guidForShortName("missing")), None)
- self.assertFalse(self.service.queried)
-
- # Simulate time passing by clearing the negative timestamp for this
- # entry, then try again, this time queryDirectory( ) is called
- self.service._disabledKeys[self.service.INDEX_TYPE_GUID][self.guidForShortName("missing")] = 0
-
- self.service.queried = False
- self.assertEquals(self.service.recordWithGUID(self.guidForShortName("missing")), None)
- self.assertTrue(self.service.queried)
-
-
- def test_duplicateShortNames(self):
- """
- Verify that when looking up records having duplicate short-names, the record of the
- proper type is returned
- """
-
- self.patch(config.Memcached.Pools.Default, "ClientEnabled", True)
- self.dummyRecords()
-
- record = self.service.recordWithShortName(DirectoryService.recordType_users,
- "Duplicate")
- self.assertEquals(record.recordType, DirectoryService.recordType_users)
-
- record = self.service.recordWithShortName(DirectoryService.recordType_groups,
- "Duplicate")
- self.assertEquals(record.recordType, DirectoryService.recordType_groups)
-
- record = self.service.recordWithShortName(DirectoryService.recordType_resources,
- "Duplicate")
- self.assertEquals(record.recordType, DirectoryService.recordType_resources)
-
- record = self.service.recordWithShortName(DirectoryService.recordType_locations,
- "Duplicate")
- self.assertEquals(record.recordType, DirectoryService.recordType_locations)
-
-
- def test_generateMemcacheKey(self):
- """
- Verify keys are correctly generated based on the index type -- if index type is
- short-name, then the recordtype is encoded into the key.
- """
- self.assertEquals(
- self.service.generateMemcacheKey(self.service.INDEX_TYPE_GUID, "foo", "users"),
- "dir|v2|20CB1593-DE3F-4422-A7D7-BA9C2099B317|guid|foo",
- )
- self.assertEquals(
- self.service.generateMemcacheKey(self.service.INDEX_TYPE_SHORTNAME, "foo", "users"),
- "dir|v2|20CB1593-DE3F-4422-A7D7-BA9C2099B317|users|shortname|foo",
- )
Deleted: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_directory.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_directory.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_directory.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1,1201 +0,0 @@
-##
-# Copyright (c) 2011-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-from twisted.internet.defer import inlineCallbacks
-from twisted.python.filepath import FilePath
-
-from twistedcaldav.test.util import TestCase
-from twistedcaldav.test.util import xmlFile, augmentsFile, proxiesFile, dirTest
-from twistedcaldav.config import config
-from twistedcaldav.directory.directory import DirectoryService, DirectoryRecord, GroupMembershipCache, GroupMembershipCacheUpdater, diffAssignments
-from twistedcaldav.directory.xmlfile import XMLDirectoryService
-from twistedcaldav.directory.calendaruserproxyloader import XMLCalendarUserProxyLoader
-from twistedcaldav.directory import augment, calendaruserproxy
-from twistedcaldav.directory.util import normalizeUUID
-from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
-
-import cPickle as pickle
-import uuid
-
-def StubCheckSACL(cls, username, service):
- services = {
- "calendar" : ["amanda", "betty"],
- "addressbook" : ["amanda", "carlene"],
- }
- if username in services[service]:
- return 0
- return 1
-
-
-
-class SACLTests(TestCase):
-
- def setUp(self):
- self.patch(DirectoryRecord, "CheckSACL", StubCheckSACL)
- self.patch(config, "EnableSACLs", True)
- self.service = DirectoryService()
- self.service.setRealm("test")
- self.service.baseGUID = "0E8E6EC2-8E52-4FF3-8F62-6F398B08A498"
-
-
- def test_applySACLs(self):
- """
- Users not in calendar SACL will have enabledForCalendaring set to
- False.
- Users not in addressbook SACL will have enabledForAddressBooks set to
- False.
- """
-
- data = [
- ("amanda", True, True,),
- ("betty", True, False,),
- ("carlene", False, True,),
- ("daniel", False, False,),
- ]
- for username, cal, ab in data:
- record = DirectoryRecord(self.service, "users", None, (username,),
- enabledForCalendaring=True, enabledForAddressBooks=True)
- record.applySACLs()
- self.assertEquals(record.enabledForCalendaring, cal)
- self.assertEquals(record.enabledForAddressBooks, ab)
-
-
-
-class GroupMembershipTests (TestCase):
-
- @inlineCallbacks
- def setUp(self):
- super(GroupMembershipTests, self).setUp()
-
- self.directoryFixture.addDirectoryService(XMLDirectoryService(
- {
- 'xmlFile' : xmlFile,
- 'augmentService' :
- augment.AugmentXMLDB(xmlFiles=(augmentsFile.path,)),
- }
- ))
- calendaruserproxy.ProxyDBService = calendaruserproxy.ProxySqliteDB("proxies.sqlite")
-
- # Set up a principals hierarchy for each service we're testing with
- self.principalRootResources = {}
- name = self.directoryService.__class__.__name__
- url = "/" + name + "/"
-
- provisioningResource = DirectoryPrincipalProvisioningResource(url, self.directoryService)
-
- self.site.resource.putChild(name, provisioningResource)
-
- self.principalRootResources[self.directoryService.__class__.__name__] = provisioningResource
-
- yield XMLCalendarUserProxyLoader(proxiesFile.path).updateProxyDB()
-
-
- def tearDown(self):
- """ Empty the proxy db between tests """
- return calendaruserproxy.ProxyDBService.clean() #@UndefinedVariable
-
-
- def _getPrincipalByShortName(self, type, name):
- provisioningResource = self.principalRootResources[self.directoryService.__class__.__name__]
- return provisioningResource.principalForShortName(type, name)
-
-
- def _updateMethod(self):
- """
- Update a counter in the following test
- """
- self.count += 1
-
-
- def test_expandedMembers(self):
- """
- Make sure expandedMembers( ) returns a complete, flattened set of
- members of a group, including all sub-groups.
- """
- bothCoasts = self.directoryService.recordWithShortName(
- DirectoryService.recordType_groups, "both_coasts")
- self.assertEquals(
- set([r.guid for r in bothCoasts.expandedMembers()]),
- set(['8B4288F6-CC82-491D-8EF9-642EF4F3E7D0',
- '6423F94A-6B76-4A3A-815B-D52CFD77935D',
- '5A985493-EE2C-4665-94CF-4DFEA3A89500',
- '5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1',
- 'left_coast',
- 'right_coast'])
- )
-
-
- @inlineCallbacks
- def test_groupMembershipCache(self):
- """
- Ensure we get back what we put in
- """
- cache = GroupMembershipCache("ProxyDB", expireSeconds=10)
-
- yield cache.setGroupsFor("a", set(["b", "c", "d"])) # a is in b, c, d
- members = (yield cache.getGroupsFor("a"))
- self.assertEquals(members, set(["b", "c", "d"]))
-
- yield cache.setGroupsFor("b", set()) # b not in any groups
- members = (yield cache.getGroupsFor("b"))
- self.assertEquals(members, set())
-
- cache._memcacheProtocol.advanceClock(10)
-
- members = (yield cache.getGroupsFor("a")) # has expired
- self.assertEquals(members, set())
-
-
- @inlineCallbacks
- def test_groupMembershipCacheUpdater(self):
- """
- Let the GroupMembershipCacheUpdater populate the cache, then make
- sure proxyFor( ) and groupMemberships( ) work from the cache
- """
- cache = GroupMembershipCache("ProxyDB", expireSeconds=60)
- # Having a groupMembershipCache assigned to the directory service is the
- # trigger to use such a cache:
- self.directoryService.groupMembershipCache = cache
-
- updater = GroupMembershipCacheUpdater(
- calendaruserproxy.ProxyDBService, self.directoryService, 30, 30, 30,
- cache=cache, useExternalProxies=False)
-
- # Exercise getGroups()
- groups, aliases = (yield updater.getGroups())
- self.assertEquals(
- groups,
- {
- '00599DAF-3E75-42DD-9DB7-52617E79943F':
- set(['46D9D716-CBEE-490F-907A-66FA6C3767FF']),
- '9FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1':
- set(['8B4288F6-CC82-491D-8EF9-642EF4F3E7D0']),
- 'admin':
- set(['9FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1']),
- 'both_coasts':
- set(['left_coast', 'right_coast']),
- 'grunts':
- set(['5A985493-EE2C-4665-94CF-4DFEA3A89500',
- '5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1',
- '6423F94A-6B76-4A3A-815B-D52CFD77935D']),
- 'left_coast':
- set(['5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1',
- '6423F94A-6B76-4A3A-815B-D52CFD77935D',
- '8B4288F6-CC82-491D-8EF9-642EF4F3E7D0']),
- 'non_calendar_group':
- set(['5A985493-EE2C-4665-94CF-4DFEA3A89500',
- '8B4288F6-CC82-491D-8EF9-642EF4F3E7D0']),
- 'recursive1_coasts':
- set(['6423F94A-6B76-4A3A-815B-D52CFD77935D',
- 'recursive2_coasts']),
- 'recursive2_coasts':
- set(['5A985493-EE2C-4665-94CF-4DFEA3A89500',
- 'recursive1_coasts']),
- 'right_coast':
- set(['5A985493-EE2C-4665-94CF-4DFEA3A89500'])
- }
- )
- self.assertEquals(
- aliases,
- {
- '00599DAF-3E75-42DD-9DB7-52617E79943F':
- '00599DAF-3E75-42DD-9DB7-52617E79943F',
- '9FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1':
- '9FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1',
- 'admin': 'admin',
- 'both_coasts': 'both_coasts',
- 'grunts': 'grunts',
- 'left_coast': 'left_coast',
- 'non_calendar_group': 'non_calendar_group',
- 'recursive1_coasts': 'recursive1_coasts',
- 'recursive2_coasts': 'recursive2_coasts',
- 'right_coast': 'right_coast'
- }
- )
-
- # Exercise expandedMembers()
- self.assertEquals(
- updater.expandedMembers(groups, "both_coasts"),
- set(['5A985493-EE2C-4665-94CF-4DFEA3A89500',
- '5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1',
- '6423F94A-6B76-4A3A-815B-D52CFD77935D',
- '8B4288F6-CC82-491D-8EF9-642EF4F3E7D0',
- 'left_coast',
- 'right_coast']
- )
- )
-
- # Prevent an update by locking the cache
- acquiredLock = (yield cache.acquireLock())
- self.assertTrue(acquiredLock)
- self.assertEquals((False, 0, 0), (yield updater.updateCache()))
-
- # You can't lock when already locked:
- acquiredLockAgain = (yield cache.acquireLock())
- self.assertFalse(acquiredLockAgain)
-
- # Allow an update by unlocking the cache
- yield cache.releaseLock()
-
- self.assertEquals((False, 9, 9), (yield updater.updateCache()))
-
- # Verify cache is populated:
- self.assertTrue((yield cache.isPopulated()))
-
- delegates = (
-
- # record name
- # read-write delegators
- # read-only delegators
- # groups delegate is in (restricted to only those groups
- # participating in delegation)
-
- ("wsanchez",
- set(["mercury", "apollo", "orion", "gemini"]),
- set(["non_calendar_proxy"]),
- set(['left_coast',
- 'both_coasts',
- 'recursive1_coasts',
- 'recursive2_coasts',
- 'gemini#calendar-proxy-write',
- ]),
- ),
- ("cdaboo",
- set(["apollo", "orion", "non_calendar_proxy"]),
- set(["non_calendar_proxy"]),
- set(['both_coasts',
- 'non_calendar_group',
- 'recursive1_coasts',
- 'recursive2_coasts',
- ]),
- ),
- ("lecroy",
- set(["apollo", "mercury", "non_calendar_proxy"]),
- set(),
- set(['both_coasts',
- 'left_coast',
- 'non_calendar_group',
- ]),
- ),
- ("usera",
- set(),
- set(),
- set(),
- ),
- ("userb",
- set(['7423F94A-6B76-4A3A-815B-D52CFD77935D']),
- set(),
- set(['7423F94A-6B76-4A3A-815B-D52CFD77935D#calendar-proxy-write']),
- ),
- ("userc",
- set(['7423F94A-6B76-4A3A-815B-D52CFD77935D']),
- set(),
- set(['7423F94A-6B76-4A3A-815B-D52CFD77935D#calendar-proxy-write']),
- ),
- )
-
- for name, write, read, groups in delegates:
- delegate = self._getPrincipalByShortName(DirectoryService.recordType_users, name)
-
- proxyFor = (yield delegate.proxyFor(True))
- self.assertEquals(
- set([p.record.guid for p in proxyFor]),
- write,
- )
- proxyFor = (yield delegate.proxyFor(False))
- self.assertEquals(
- set([p.record.guid for p in proxyFor]),
- read,
- )
- groupsIn = (yield delegate.groupMemberships())
- uids = set()
- for group in groupsIn:
- try:
- uid = group.uid # a sub-principal
- except AttributeError:
- uid = group.record.guid # a regular group
- uids.add(uid)
- self.assertEquals(
- set(uids),
- groups,
- )
-
- # Verify CalendarUserProxyPrincipalResource.containsPrincipal( ) works
- delegator = self._getPrincipalByShortName(DirectoryService.recordType_locations, "mercury")
- proxyPrincipal = delegator.getChild("calendar-proxy-write")
- for expected, name in [(True, "wsanchez"), (False, "cdaboo")]:
- delegate = self._getPrincipalByShortName(DirectoryService.recordType_users, name)
- self.assertEquals(expected, (yield proxyPrincipal.containsPrincipal(delegate)))
-
- # Verify that principals who were previously members of delegated-to groups but
- # are no longer members have their proxyFor info cleaned out of the cache:
- # Remove wsanchez from all groups in the directory, run the updater, then check
- # that wsanchez is only a proxy for gemini (since that assignment does not involve groups)
- self.directoryService.xmlFile = dirTest.child("accounts-modified.xml")
- self.directoryService._alwaysStat = True
- self.assertEquals((False, 8, 1), (yield updater.updateCache()))
- delegate = self._getPrincipalByShortName(DirectoryService.recordType_users, "wsanchez")
- proxyFor = (yield delegate.proxyFor(True))
- self.assertEquals(
- set([p.record.guid for p in proxyFor]),
- set(['gemini'])
- )
-
-
- @inlineCallbacks
- def test_groupMembershipCacheUpdaterExternalProxies(self):
- """
- Exercise external proxy assignment support (assignments come from the
- directory service itself)
- """
- cache = GroupMembershipCache("ProxyDB", expireSeconds=60)
- # Having a groupMembershipCache assigned to the directory service is the
- # trigger to use such a cache:
- self.directoryService.groupMembershipCache = cache
-
- # This time, we're setting some external proxy assignments for the
- # "transporter" resource...
- def fakeExternalProxies():
- return [
- (
- "transporter#calendar-proxy-write",
- set(["6423F94A-6B76-4A3A-815B-D52CFD77935D",
- "8B4288F6-CC82-491D-8EF9-642EF4F3E7D0"])
- ),
- (
- "transporter#calendar-proxy-read",
- set(["5A985493-EE2C-4665-94CF-4DFEA3A89500"])
- ),
- ]
-
- updater = GroupMembershipCacheUpdater(
- calendaruserproxy.ProxyDBService, self.directoryService, 30, 30, 30,
- cache=cache, useExternalProxies=True,
- externalProxiesSource=fakeExternalProxies)
-
- yield updater.updateCache()
-
- delegates = (
-
- # record name
- # read-write delegators
- # read-only delegators
- # groups delegate is in (restricted to only those groups
- # participating in delegation)
-
- ("wsanchez",
- set(["mercury", "apollo", "orion", "gemini", "transporter"]),
- set(["non_calendar_proxy"]),
- set(['left_coast',
- 'both_coasts',
- 'recursive1_coasts',
- 'recursive2_coasts',
- 'gemini#calendar-proxy-write',
- 'transporter#calendar-proxy-write',
- ]),
- ),
- ("cdaboo",
- set(["apollo", "orion", "non_calendar_proxy"]),
- set(["non_calendar_proxy", "transporter"]),
- set(['both_coasts',
- 'non_calendar_group',
- 'recursive1_coasts',
- 'recursive2_coasts',
- 'transporter#calendar-proxy-read',
- ]),
- ),
- ("lecroy",
- set(["apollo", "mercury", "non_calendar_proxy", "transporter"]),
- set(),
- set(['both_coasts',
- 'left_coast',
- 'non_calendar_group',
- 'transporter#calendar-proxy-write',
- ]),
- ),
- )
-
- for name, write, read, groups in delegates:
- delegate = self._getPrincipalByShortName(DirectoryService.recordType_users, name)
-
- proxyFor = (yield delegate.proxyFor(True))
- self.assertEquals(
- set([p.record.guid for p in proxyFor]),
- write,
- )
- proxyFor = (yield delegate.proxyFor(False))
- self.assertEquals(
- set([p.record.guid for p in proxyFor]),
- read,
- )
- groupsIn = (yield delegate.groupMemberships())
- uids = set()
- for group in groupsIn:
- try:
- uid = group.uid # a sub-principal
- except AttributeError:
- uid = group.record.guid # a regular group
- uids.add(uid)
- self.assertEquals(
- set(uids),
- groups,
- )
-
- #
- # Now remove two external assignments, and those should take effect.
- #
- def fakeExternalProxiesRemoved():
- return [
- (
- "transporter#calendar-proxy-write",
- set(["8B4288F6-CC82-491D-8EF9-642EF4F3E7D0"])
- ),
- ]
-
- updater = GroupMembershipCacheUpdater(
- calendaruserproxy.ProxyDBService, self.directoryService, 30, 30, 30,
- cache=cache, useExternalProxies=True,
- externalProxiesSource=fakeExternalProxiesRemoved)
-
- yield updater.updateCache()
-
- delegates = (
-
- # record name
- # read-write delegators
- # read-only delegators
- # groups delegate is in (restricted to only those groups
- # participating in delegation)
-
- # Note: "transporter" is now gone for wsanchez and cdaboo
-
- ("wsanchez",
- set(["mercury", "apollo", "orion", "gemini"]),
- set(["non_calendar_proxy"]),
- set(['left_coast',
- 'both_coasts',
- 'recursive1_coasts',
- 'recursive2_coasts',
- 'gemini#calendar-proxy-write',
- ]),
- ),
- ("cdaboo",
- set(["apollo", "orion", "non_calendar_proxy"]),
- set(["non_calendar_proxy"]),
- set(['both_coasts',
- 'non_calendar_group',
- 'recursive1_coasts',
- 'recursive2_coasts',
- ]),
- ),
- ("lecroy",
- set(["apollo", "mercury", "non_calendar_proxy", "transporter"]),
- set(),
- set(['both_coasts',
- 'left_coast',
- 'non_calendar_group',
- 'transporter#calendar-proxy-write',
- ]),
- ),
- )
-
- for name, write, read, groups in delegates:
- delegate = self._getPrincipalByShortName(DirectoryService.recordType_users, name)
-
- proxyFor = (yield delegate.proxyFor(True))
- self.assertEquals(
- set([p.record.guid for p in proxyFor]),
- write,
- )
- proxyFor = (yield delegate.proxyFor(False))
- self.assertEquals(
- set([p.record.guid for p in proxyFor]),
- read,
- )
- groupsIn = (yield delegate.groupMemberships())
- uids = set()
- for group in groupsIn:
- try:
- uid = group.uid # a sub-principal
- except AttributeError:
- uid = group.record.guid # a regular group
- uids.add(uid)
- self.assertEquals(
- set(uids),
- groups,
- )
-
- #
- # Now remove all external assignments, and those should take effect.
- #
- def fakeExternalProxiesEmpty():
- return []
-
- updater = GroupMembershipCacheUpdater(
- calendaruserproxy.ProxyDBService, self.directoryService, 30, 30, 30,
- cache=cache, useExternalProxies=True,
- externalProxiesSource=fakeExternalProxiesEmpty)
-
- yield updater.updateCache()
-
- delegates = (
-
- # record name
- # read-write delegators
- # read-only delegators
- # groups delegate is in (restricted to only those groups
- # participating in delegation)
-
- # Note: "transporter" is now gone for everyone
-
- ("wsanchez",
- set(["mercury", "apollo", "orion", "gemini"]),
- set(["non_calendar_proxy"]),
- set(['left_coast',
- 'both_coasts',
- 'recursive1_coasts',
- 'recursive2_coasts',
- 'gemini#calendar-proxy-write',
- ]),
- ),
- ("cdaboo",
- set(["apollo", "orion", "non_calendar_proxy"]),
- set(["non_calendar_proxy"]),
- set(['both_coasts',
- 'non_calendar_group',
- 'recursive1_coasts',
- 'recursive2_coasts',
- ]),
- ),
- ("lecroy",
- set(["apollo", "mercury", "non_calendar_proxy"]),
- set(),
- set(['both_coasts',
- 'left_coast',
- 'non_calendar_group',
- ]),
- ),
- )
-
- for name, write, read, groups in delegates:
- delegate = self._getPrincipalByShortName(DirectoryService.recordType_users, name)
-
- proxyFor = (yield delegate.proxyFor(True))
- self.assertEquals(
- set([p.record.guid for p in proxyFor]),
- write,
- )
- proxyFor = (yield delegate.proxyFor(False))
- self.assertEquals(
- set([p.record.guid for p in proxyFor]),
- read,
- )
- groupsIn = (yield delegate.groupMemberships())
- uids = set()
- for group in groupsIn:
- try:
- uid = group.uid # a sub-principal
- except AttributeError:
- uid = group.record.guid # a regular group
- uids.add(uid)
- self.assertEquals(
- set(uids),
- groups,
- )
-
- #
- # Now add back an external assignments, and those should take effect.
- #
- def fakeExternalProxiesAdded():
- return [
- (
- "transporter#calendar-proxy-write",
- set(["8B4288F6-CC82-491D-8EF9-642EF4F3E7D0"])
- ),
- ]
-
- updater = GroupMembershipCacheUpdater(
- calendaruserproxy.ProxyDBService, self.directoryService, 30, 30, 30,
- cache=cache, useExternalProxies=True,
- externalProxiesSource=fakeExternalProxiesAdded)
-
- yield updater.updateCache()
-
- delegates = (
-
- # record name
- # read-write delegators
- # read-only delegators
- # groups delegate is in (restricted to only those groups
- # participating in delegation)
-
- ("wsanchez",
- set(["mercury", "apollo", "orion", "gemini"]),
- set(["non_calendar_proxy"]),
- set(['left_coast',
- 'both_coasts',
- 'recursive1_coasts',
- 'recursive2_coasts',
- 'gemini#calendar-proxy-write',
- ]),
- ),
- ("cdaboo",
- set(["apollo", "orion", "non_calendar_proxy"]),
- set(["non_calendar_proxy"]),
- set(['both_coasts',
- 'non_calendar_group',
- 'recursive1_coasts',
- 'recursive2_coasts',
- ]),
- ),
- ("lecroy",
- set(["apollo", "mercury", "non_calendar_proxy", "transporter"]),
- set(),
- set(['both_coasts',
- 'left_coast',
- 'non_calendar_group',
- 'transporter#calendar-proxy-write',
- ]),
- ),
- )
-
- for name, write, read, groups in delegates:
- delegate = self._getPrincipalByShortName(DirectoryService.recordType_users, name)
-
- proxyFor = (yield delegate.proxyFor(True))
- self.assertEquals(
- set([p.record.guid for p in proxyFor]),
- write,
- )
- proxyFor = (yield delegate.proxyFor(False))
- self.assertEquals(
- set([p.record.guid for p in proxyFor]),
- read,
- )
- groupsIn = (yield delegate.groupMemberships())
- uids = set()
- for group in groupsIn:
- try:
- uid = group.uid # a sub-principal
- except AttributeError:
- uid = group.record.guid # a regular group
- uids.add(uid)
- self.assertEquals(
- set(uids),
- groups,
- )
-
-
- def test_diffAssignments(self):
- """
- Ensure external proxy assignment diffing works
- """
-
- self.assertEquals(
- (
- # changed
- [],
- # removed
- [],
- ),
- diffAssignments(
- # old
- [],
- # new
- [],
- )
- )
-
- self.assertEquals(
- (
- # changed
- [],
- # removed
- [],
- ),
- diffAssignments(
- # old
- [("B", set(["3"])), ("A", set(["1", "2"])), ],
- # new
- [("A", set(["1", "2"])), ("B", set(["3"])), ],
- )
- )
-
- self.assertEquals(
- (
- # changed
- [("A", set(["1", "2"])), ("B", set(["3"])), ],
- # removed
- [],
- ),
- diffAssignments(
- # old
- [],
- # new
- [("A", set(["1", "2"])), ("B", set(["3"])), ],
- )
- )
-
- self.assertEquals(
- (
- # changed
- [],
- # removed
- ["A", "B"],
- ),
- diffAssignments(
- # old
- [("A", set(["1", "2"])), ("B", set(["3"])), ],
- # new
- [],
- )
- )
-
- self.assertEquals(
- (
- # changed
- [("A", set(["2"])), ("C", set(["4", "5"])), ("D", set(["6"])), ],
- # removed
- ["B"],
- ),
- diffAssignments(
- # old
- [("A", set(["1", "2"])), ("B", set(["3"])), ("C", set(["4"])), ],
- # new
- [("D", set(["6"])), ("C", set(["4", "5"])), ("A", set(["2"])), ],
- )
- )
-
-
- @inlineCallbacks
- def test_groupMembershipCacheSnapshot(self):
- """
- The group membership cache creates a snapshot (a pickle file) of
- the member -> groups dictionary, and can quickly refresh memcached
- from that snapshot when restarting the server.
- """
- cache = GroupMembershipCache("ProxyDB", expireSeconds=60)
- # Having a groupMembershipCache assigned to the directory service is the
- # trigger to use such a cache:
- self.directoryService.groupMembershipCache = cache
-
- updater = GroupMembershipCacheUpdater(
- calendaruserproxy.ProxyDBService, self.directoryService, 30, 30, 30,
- cache=cache)
-
- dataRoot = FilePath(config.DataRoot)
- snapshotFile = dataRoot.child("memberships_cache")
-
- # Snapshot doesn't exist initially
- self.assertFalse(snapshotFile.exists())
-
- # Try a fast update (as when the server starts up for the very first
- # time), but since the snapshot doesn't exist we fault in from the
- # directory (fast now is False), and snapshot will get created
-
- # Note that because fast=True and isPopulated() is False, locking is
- # ignored:
- yield cache.acquireLock()
-
- self.assertFalse((yield cache.isPopulated()))
- fast, numMembers, numChanged = (yield updater.updateCache(fast=True))
- self.assertEquals(fast, False)
- self.assertEquals(numMembers, 9)
- self.assertEquals(numChanged, 9)
- self.assertTrue(snapshotFile.exists())
- self.assertTrue((yield cache.isPopulated()))
-
- yield cache.releaseLock()
-
- # Try another fast update where the snapshot already exists (as in a
- # server-restart scenario), which will only read from the snapshot
- # as indicated by the return value for "fast". Note that the cache
- # is already populated so updateCache( ) in fast mode will not do
- # anything, and numMembers will be 0.
- fast, numMembers, numChanged = (yield updater.updateCache(fast=True))
- self.assertEquals(fast, True)
- self.assertEquals(numMembers, 0)
-
- # Try an update which faults in from the directory (fast=False)
- fast, numMembers, numChanged = (yield updater.updateCache(fast=False))
- self.assertEquals(fast, False)
- self.assertEquals(numMembers, 9)
- self.assertEquals(numChanged, 0)
-
- # Verify the snapshot contains the pickled dictionary we expect
- expected = {
- "46D9D716-CBEE-490F-907A-66FA6C3767FF":
- set([
- u"00599DAF-3E75-42DD-9DB7-52617E79943F",
- ]),
- "5A985493-EE2C-4665-94CF-4DFEA3A89500":
- set([
- u"non_calendar_group",
- u"recursive1_coasts",
- u"recursive2_coasts",
- u"both_coasts"
- ]),
- "6423F94A-6B76-4A3A-815B-D52CFD77935D":
- set([
- u"left_coast",
- u"recursive1_coasts",
- u"recursive2_coasts",
- u"both_coasts"
- ]),
- "5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1":
- set([
- u"left_coast",
- u"both_coasts"
- ]),
- "8B4288F6-CC82-491D-8EF9-642EF4F3E7D0":
- set([
- u"non_calendar_group",
- u"left_coast",
- u"both_coasts"
- ]),
- "left_coast":
- set([
- u"both_coasts"
- ]),
- "recursive1_coasts":
- set([
- u"recursive1_coasts",
- u"recursive2_coasts"
- ]),
- "recursive2_coasts":
- set([
- u"recursive1_coasts",
- u"recursive2_coasts"
- ]),
- "right_coast":
- set([
- u"both_coasts"
- ])
- }
- members = pickle.loads(snapshotFile.getContent())
- self.assertEquals(members, expected)
-
- # "Corrupt" the snapshot and verify it is regenerated properly
- snapshotFile.setContent("xyzzy")
- cache.delete("group-cacher-populated")
- fast, numMembers, numChanged = (yield updater.updateCache(fast=True))
- self.assertEquals(fast, False)
- self.assertEquals(numMembers, 9)
- self.assertEquals(numChanged, 9)
- self.assertTrue(snapshotFile.exists())
- members = pickle.loads(snapshotFile.getContent())
- self.assertEquals(members, expected)
-
-
- def test_autoAcceptMembers(self):
- """
- autoAcceptMembers( ) returns an empty list if no autoAcceptGroup is
- assigned, or the expanded membership if assigned.
- """
-
- # No auto-accept-group for "orion" in augments.xml
- orion = self.directoryService.recordWithGUID("orion")
- self.assertEquals(orion.autoAcceptMembers(), [])
-
- # "both_coasts" group assigned to "apollo" in augments.xml
- apollo = self.directoryService.recordWithGUID("apollo")
- self.assertEquals(
- set(apollo.autoAcceptMembers()),
- set([
- "8B4288F6-CC82-491D-8EF9-642EF4F3E7D0",
- "5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1",
- "5A985493-EE2C-4665-94CF-4DFEA3A89500",
- "6423F94A-6B76-4A3A-815B-D52CFD77935D",
- "right_coast",
- "left_coast",
- ])
- )
-
-
- # @inlineCallbacks
- # def testScheduling(self):
- # """
- # Exercise schedulePolledGroupCachingUpdate
- # """
-
- # groupCacher = StubGroupCacher()
-
-
- # def decorateTransaction(txn):
- # txn._groupCacher = groupCacher
-
- # store = yield buildStore(self, None)
- # store.callWithNewTransactions(decorateTransaction)
- # wp = (yield schedulePolledGroupCachingUpdate(store))
- # yield wp.whenExecuted()
- # self.assertTrue(groupCacher.called)
-
- # testScheduling.skip = "Fix WorkProposal to track delayed calls and cancel them"
-
-
-
-class StubGroupCacher(object):
- def __init__(self):
- self.called = False
- self.updateSeconds = 99
-
-
- def updateCache(self):
- self.called = True
-
-
-
-class RecordsMatchingTokensTests(TestCase):
-
- @inlineCallbacks
- def setUp(self):
- super(RecordsMatchingTokensTests, self).setUp()
-
- self.directoryFixture.addDirectoryService(XMLDirectoryService(
- {
- 'xmlFile' : xmlFile,
- 'augmentService' :
- augment.AugmentXMLDB(xmlFiles=(augmentsFile.path,)),
- }
- ))
- calendaruserproxy.ProxyDBService = calendaruserproxy.ProxySqliteDB("proxies.sqlite")
-
- # Set up a principals hierarchy for each service we're testing with
- self.principalRootResources = {}
- name = self.directoryService.__class__.__name__
- url = "/" + name + "/"
-
- provisioningResource = DirectoryPrincipalProvisioningResource(url, self.directoryService)
-
- self.site.resource.putChild(name, provisioningResource)
-
- self.principalRootResources[self.directoryService.__class__.__name__] = provisioningResource
-
- yield XMLCalendarUserProxyLoader(proxiesFile.path).updateProxyDB()
-
-
- def tearDown(self):
- """ Empty the proxy db between tests """
- return calendaruserproxy.ProxyDBService.clean() #@UndefinedVariable
-
-
- @inlineCallbacks
- def test_recordsMatchingTokens(self):
- """
- Exercise the default recordsMatchingTokens implementation
- """
- records = list((yield self.directoryService.recordsMatchingTokens(["Use", "01"])))
- self.assertNotEquals(len(records), 0)
- shorts = [record.shortNames[0] for record in records]
- self.assertTrue("user01" in shorts)
-
- records = list((yield self.directoryService.recordsMatchingTokens(['"quotey"'],
- context=self.directoryService.searchContext_attendee)))
- self.assertEquals(len(records), 1)
- self.assertEquals(records[0].shortNames[0], "doublequotes")
-
- records = list((yield self.directoryService.recordsMatchingTokens(["coast"])))
- self.assertEquals(len(records), 5)
-
- records = list((yield self.directoryService.recordsMatchingTokens(["poll"],
- context=self.directoryService.searchContext_location)))
- self.assertEquals(len(records), 1)
- self.assertEquals(records[0].shortNames[0], "apollo")
-
-
- def test_recordTypesForSearchContext(self):
- self.assertEquals(
- [self.directoryService.recordType_locations],
- self.directoryService.recordTypesForSearchContext("location")
- )
- self.assertEquals(
- [self.directoryService.recordType_resources],
- self.directoryService.recordTypesForSearchContext("resource")
- )
- self.assertEquals(
- [self.directoryService.recordType_users],
- self.directoryService.recordTypesForSearchContext("user")
- )
- self.assertEquals(
- [self.directoryService.recordType_groups],
- self.directoryService.recordTypesForSearchContext("group")
- )
- self.assertEquals(
- set([
- self.directoryService.recordType_resources,
- self.directoryService.recordType_users,
- self.directoryService.recordType_groups
- ]),
- set(self.directoryService.recordTypesForSearchContext("attendee"))
- )
-
-
-
-class GUIDTests(TestCase):
-
- def setUp(self):
- self.service = DirectoryService()
- self.service.setRealm("test")
- self.service.baseGUID = "0E8E6EC2-8E52-4FF3-8F62-6F398B08A498"
-
-
- def test_normalizeUUID(self):
-
- # Ensure that record.guid automatically gets normalized to
- # uppercase+hyphenated form if the value is one that uuid.UUID( )
- # recognizes.
-
- data = (
- (
- "0543A85A-D446-4CF6-80AE-6579FA60957F",
- "0543A85A-D446-4CF6-80AE-6579FA60957F"
- ),
- (
- "0543a85a-d446-4cf6-80ae-6579fa60957f",
- "0543A85A-D446-4CF6-80AE-6579FA60957F"
- ),
- (
- "0543A85AD4464CF680AE-6579FA60957F",
- "0543A85A-D446-4CF6-80AE-6579FA60957F"
- ),
- (
- "0543a85ad4464cf680ae6579fa60957f",
- "0543A85A-D446-4CF6-80AE-6579FA60957F"
- ),
- (
- "foo",
- "foo"
- ),
- (
- None,
- None
- ),
- )
- for original, expected in data:
- self.assertEquals(expected, normalizeUUID(original))
- record = DirectoryRecord(self.service, "users", original,
- shortNames=("testing",))
- self.assertEquals(expected, record.guid)
-
-
-
-class DirectoryServiceTests(TestCase):
- """
- Test L{DirectoryService} apis.
- """
-
- class StubDirectoryService(DirectoryService):
-
- def __init__(self):
- self._records = {}
-
-
- def createRecord(self, recordType, guid=None, shortNames=(), authIDs=set(),
- fullName=None, firstName=None, lastName=None, emailAddresses=set(),
- uid=None, password=None, **kwargs):
- """
- Create/persist a directory record based on the given values
- """
-
- record = DirectoryRecord(
- self,
- recordType,
- guid=guid,
- shortNames=shortNames,
- authIDs=authIDs,
- fullName=fullName,
- firstName=firstName,
- lastName=lastName,
- emailAddresses=emailAddresses,
- uid=uid,
- password=password,
- **kwargs
- )
- self._records.setdefault(recordType, []).append(record)
-
-
- def recordTypes(self):
- return self._records.keys()
-
-
- def listRecords(self, recordType):
- return self._records[recordType]
-
-
- def setUp(self):
- self.service = self.StubDirectoryService()
- self.service.setRealm("test")
- self.service.baseGUID = "0E8E6EC2-8E52-4FF3-8F62-6F398B08A498"
-
-
- def test_recordWithCalendarUserAddress_principal_uris(self):
- """
- Make sure that recordWithCalendarUserAddress handles percent-encoded
- principal URIs.
- """
-
- self.service.createRecord(
- DirectoryService.recordType_users,
- guid="user01",
- shortNames=("user 01", "User 01"),
- fullName="User 01",
- enabledForCalendaring=True,
- )
- self.service.createRecord(
- DirectoryService.recordType_users,
- guid="user02",
- shortNames=("user02", "User 02"),
- fullName="User 02",
- enabledForCalendaring=True,
- )
-
- record = self.service.recordWithCalendarUserAddress("/principals/users/user%2001")
- self.assertTrue(record is not None)
- record = self.service.recordWithCalendarUserAddress("/principals/users/user02")
- self.assertTrue(record is not None)
- record = self.service.recordWithCalendarUserAddress("/principals/users/user%0202")
- self.assertTrue(record is None)
-
-
-
-class DirectoryRecordTests(TestCase):
- """
- Test L{DirectoryRecord} apis.
- """
-
- def setUp(self):
- self.service = DirectoryService()
- self.service.setRealm("test")
- self.service.baseGUID = "0E8E6EC2-8E52-4FF3-8F62-6F398B08A498"
-
-
- def test_cacheToken(self):
- """
- Test that DirectoryRecord.cacheToken is different for different records, and its value changes
- as attributes on the record change.
- """
-
- record1 = DirectoryRecord(self.service, "users", str(uuid.uuid4()), shortNames=("testing1",))
- record2 = DirectoryRecord(self.service, "users", str(uuid.uuid4()), shortNames=("testing2",))
- self.assertNotEquals(record1.cacheToken(), record2.cacheToken())
-
- cache1 = record1.cacheToken()
- record1.enabled = True
- self.assertNotEquals(cache1, record1.cacheToken())
-
- cache1 = record1.cacheToken()
- record1.enabledForCalendaring = True
- self.assertNotEquals(cache1, record1.cacheToken())
Deleted: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_modify.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_modify.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_modify.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1,159 +0,0 @@
-##
-# Copyright (c) 2005-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-import os
-from twistedcaldav.config import config
-from twistedcaldav.test.util import TestCase
-from calendarserver.tools.util import getDirectory
-from twext.python.filepath import CachingFilePath as FilePath
-from twistedcaldav.directory.directory import DirectoryError
-
-
-class ModificationTestCase(TestCase):
-
- def setUp(self):
- super(ModificationTestCase, self).setUp()
-
- testRoot = os.path.join(os.path.dirname(__file__), "modify")
- #configFileName = os.path.join(testRoot, "caldavd.plist")
- #config.load(configFileName)
-
- usersFile = os.path.join(testRoot, "users-groups.xml")
- config.DirectoryService.params.xmlFile = usersFile
-
- # Copy xml file containing locations/resources to a temp file because
- # we're going to be modifying it during testing
-
- origResourcesFile = FilePath(os.path.join(os.path.dirname(__file__),
- "modify", "resources-locations.xml"))
- copyResourcesFile = FilePath(self.mktemp())
- origResourcesFile.copyTo(copyResourcesFile)
- config.ResourceService.params.xmlFile = copyResourcesFile
- config.ResourceService.Enabled = True
-
- augmentsFile = os.path.join(testRoot, "augments.xml")
- config.AugmentService.params.xmlFiles = (augmentsFile,)
-
-
- def test_createRecord(self):
- directory = getDirectory()
-
- record = directory.recordWithUID("resource01")
- self.assertEquals(record, None)
-
- directory.createRecord("resources", guid="resource01",
- shortNames=("resource01",), uid="resource01",
- emailAddresses=("res1 at example.com", "res2 at example.com"),
- comment="Test Comment")
-
- record = directory.recordWithUID("resource01")
- self.assertNotEquals(record, None)
-
- self.assertEquals(len(record.emailAddresses), 2)
- self.assertEquals(record.extras['comment'], "Test Comment")
-
- directory.createRecord("resources", guid="resource02", shortNames=("resource02",), uid="resource02")
-
- record = directory.recordWithUID("resource02")
- self.assertNotEquals(record, None)
-
- # Make sure old records are still there:
- record = directory.recordWithUID("resource01")
- self.assertNotEquals(record, None)
- record = directory.recordWithUID("location01")
- self.assertNotEquals(record, None)
-
-
- def test_destroyRecord(self):
- directory = getDirectory()
-
- record = directory.recordWithUID("resource01")
- self.assertEquals(record, None)
-
- directory.createRecord("resources", guid="resource01", shortNames=("resource01",), uid="resource01")
-
- record = directory.recordWithUID("resource01")
- self.assertNotEquals(record, None)
-
- directory.destroyRecord("resources", guid="resource01")
-
- record = directory.recordWithUID("resource01")
- self.assertEquals(record, None)
-
- # Make sure old records are still there:
- record = directory.recordWithUID("location01")
- self.assertNotEquals(record, None)
-
-
- def test_updateRecord(self):
- directory = getDirectory()
-
- directory.createRecord("resources", guid="resource01",
- shortNames=("resource01",), uid="resource01",
- fullName="Resource number 1")
-
- record = directory.recordWithUID("resource01")
- self.assertEquals(record.fullName, "Resource number 1")
-
- directory.updateRecord("resources", guid="resource01",
- shortNames=("resource01", "r01"), uid="resource01",
- fullName="Resource #1", firstName="First", lastName="Last",
- emailAddresses=("resource01 at example.com", "r01 at example.com"),
- comment="Test Comment")
-
- record = directory.recordWithUID("resource01")
- self.assertEquals(record.fullName, "Resource #1")
- self.assertEquals(record.firstName, "First")
- self.assertEquals(record.lastName, "Last")
- self.assertEquals(set(record.shortNames), set(["resource01", "r01"]))
- self.assertEquals(record.emailAddresses,
- set(["resource01 at example.com", "r01 at example.com"]))
- self.assertEquals(record.extras['comment'], "Test Comment")
-
- # Make sure old records are still there:
- record = directory.recordWithUID("location01")
- self.assertNotEquals(record, None)
-
-
- def test_createDuplicateRecord(self):
- directory = getDirectory()
-
- directory.createRecord("resources", guid="resource01", shortNames=("resource01",), uid="resource01")
- self.assertRaises(DirectoryError, directory.createRecord, "resources", guid="resource01", shortNames=("resource01",), uid="resource01")
-
-
- def test_missingShortNames(self):
- directory = getDirectory()
-
- directory.createRecord("resources", guid="resource01")
-
- record = directory.recordWithUID("resource01")
- self.assertEquals(record.shortNames[0], "resource01")
-
- directory.updateRecord("resources", guid="resource01",
- fullName="Resource #1")
-
- record = directory.recordWithUID("resource01")
- self.assertEquals(record.shortNames[0], "resource01")
- self.assertEquals(record.fullName, "Resource #1")
-
-
- def test_missingGUID(self):
- directory = getDirectory()
-
- record = directory.createRecord("resources")
-
- self.assertEquals(record.shortNames[0], record.guid)
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_principal.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_principal.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_principal.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -16,68 +16,63 @@
from __future__ import print_function
import os
+from urllib import quote
from twisted.cred.credentials import UsernamePassword
from twisted.internet.defer import inlineCallbacks
-from txdav.xml import element as davxml
-from txweb2.dav.fileop import rmdir
-from txweb2.dav.resource import AccessDeniedError
-from txweb2.http import HTTPError
-from txweb2.test.test_server import SimpleRequest
-
+from twistedcaldav import carddavxml
from twistedcaldav.cache import DisabledCacheNotifier
from twistedcaldav.caldavxml import caldav_namespace
from twistedcaldav.config import config
from twistedcaldav.customxml import calendarserver_namespace
-from twistedcaldav.directory import augment, calendaruserproxy
from twistedcaldav.directory.addressbook import DirectoryAddressBookHomeProvisioningResource
from twistedcaldav.directory.calendar import DirectoryCalendarHomeProvisioningResource
-from twistedcaldav.directory.directory import DirectoryService
-from twistedcaldav.directory.xmlfile import XMLDirectoryService
-from twistedcaldav.directory.test.test_xmlfile import xmlFile, augmentsFile
+from twistedcaldav.directory.principal import DirectoryCalendarPrincipalResource
from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
-from twistedcaldav.directory.principal import DirectoryPrincipalTypeProvisioningResource
from twistedcaldav.directory.principal import DirectoryPrincipalResource
-from twistedcaldav.directory.principal import DirectoryCalendarPrincipalResource
-from twistedcaldav import carddavxml
-import twistedcaldav.test.util
-
+from twistedcaldav.directory.principal import DirectoryPrincipalTypeProvisioningResource
+from twistedcaldav.test.util import StoreTestCase
from txdav.common.datastore.file import CommonDataStore
-from urllib import quote
+from txdav.xml import element as davxml
+from txweb2.dav.fileop import rmdir
+from txweb2.dav.resource import AccessDeniedError
+from txweb2.http import HTTPError
+from txweb2.test.test_server import SimpleRequest
-class ProvisionedPrincipals (twistedcaldav.test.util.TestCase):
+
+class ProvisionedPrincipals(StoreTestCase): # twistedcaldav.test.util.TestCase):
"""
Directory service provisioned principals.
"""
- def setUp(self):
- super(ProvisionedPrincipals, self).setUp()
+ # def setUp(self):
+ # super(ProvisionedPrincipals, self).setUp()
- self.directoryServices = (
- XMLDirectoryService(
- {
- 'xmlFile' : xmlFile,
- 'augmentService' :
- augment.AugmentXMLDB(xmlFiles=(augmentsFile.path,)),
- }
- ),
- )
+ # self.directoryServices = (
+ # XMLDirectoryService(
+ # {
+ # 'xmlFile' : xmlFile,
+ # 'augmentService' :
+ # augment.AugmentXMLDB(xmlFiles=(augmentsFile.path,)),
+ # }
+ # ),
+ # )
- # Set up a principals hierarchy for each service we're testing with
- self.principalRootResources = {}
- for directory in self.directoryServices:
- name = directory.__class__.__name__
- url = "/" + name + "/"
+ # # Set up a principals hierarchy for each service we're testing with
+ # self.principalRootResources = {}
+ # for directory in self.directoryServices:
+ # name = directory.__class__.__name__
+ # url = "/" + name + "/"
- provisioningResource = DirectoryPrincipalProvisioningResource(url, directory)
- directory.setPrincipalCollection(provisioningResource)
+ # provisioningResource = DirectoryPrincipalProvisioningResource(url, directory)
+ # directory.setPrincipalCollection(provisioningResource)
- self.site.resource.putChild(name, provisioningResource)
+ # self.site.resource.putChild(name, provisioningResource)
- self.principalRootResources[directory.__class__.__name__] = provisioningResource
+ # self.principalRootResources[directory.__class__.__name__] = provisioningResource
- calendaruserproxy.ProxyDBService = calendaruserproxy.ProxySqliteDB(os.path.abspath(self.mktemp()))
+ # calendaruserproxy.ProxyDBService = calendaruserproxy.ProxySqliteDB(os.path.abspath(self.mktemp()))
@inlineCallbacks
@@ -95,7 +90,8 @@
DirectoryPrincipalResource.principalURL(),
"""
- for directory in self.directoryServices:
+ directory = self.directory
+ if True:
#print("\n -> %s" % (directory.__class__.__name__,))
provisioningResource = self.principalRootResources[directory.__class__.__name__]
@@ -170,33 +166,33 @@
"""
DirectoryPrincipalProvisioningResource.principalForUser()
"""
- for directory in self.directoryServices:
- provisioningResource = self.principalRootResources[directory.__class__.__name__]
+ directory = self.directory
+ provisioningResource = self.principalRootResources[directory.__class__.__name__]
- for user in directory.listRecords(DirectoryService.recordType_users):
- userResource = provisioningResource.principalForUser(user.shortNames[0])
- if user.enabled:
- self.failIf(userResource is None)
- self.assertEquals(user, userResource.record)
- else:
- self.failIf(userResource is not None)
+ for user in directory.listRecords(DirectoryService.recordType_users):
+ userResource = provisioningResource.principalForUser(user.shortNames[0])
+ if user.enabled:
+ self.failIf(userResource is None)
+ self.assertEquals(user, userResource.record)
+ else:
+ self.failIf(userResource is not None)
def test_principalForAuthID(self):
"""
DirectoryPrincipalProvisioningResource.principalForAuthID()
"""
- for directory in self.directoryServices:
- provisioningResource = self.principalRootResources[directory.__class__.__name__]
+ directory = self.directory
+ provisioningResource = self.principalRootResources[directory.__class__.__name__]
- for user in directory.listRecords(DirectoryService.recordType_users):
- creds = UsernamePassword(user.shortNames[0], "bogus")
- userResource = provisioningResource.principalForAuthID(creds)
- if user.enabled:
- self.failIf(userResource is None)
- self.assertEquals(user, userResource.record)
- else:
- self.failIf(userResource is not None)
+ for user in directory.listRecords(DirectoryService.recordType_users):
+ creds = UsernamePassword(user.shortNames[0], "bogus")
+ userResource = provisioningResource.principalForAuthID(creds)
+ if user.enabled:
+ self.failIf(userResource is None)
+ self.assertEquals(user, userResource.record)
+ else:
+ self.failIf(userResource is not None)
def test_principalForUID(self):
@@ -465,23 +461,23 @@
# Need to create a addressbook home provisioner for each service.
addressBookRootResources = {}
- for directory in self.directoryServices:
- path = os.path.join(self.docroot, directory.__class__.__name__)
+ directory = self.directory
+ path = os.path.join(self.docroot, directory.__class__.__name__)
- if os.path.exists(path):
- rmdir(path)
- os.mkdir(path)
+ if os.path.exists(path):
+ rmdir(path)
+ os.mkdir(path)
- # Need a data store
- _newStore = CommonDataStore(path, None, None, True, False)
+ # need a data store
+ _newstore = commondatastore(path, none, none, true, false)
- provisioningResource = DirectoryAddressBookHomeProvisioningResource(
- directory,
- "/addressbooks/",
- _newStore
- )
+ provisioningresource = directoryaddressbookhomeprovisioningresource(
+ directory,
+ "/addressbooks/",
+ _newstore
+ )
- addressBookRootResources[directory.__class__.__name__] = provisioningResource
+ addressbookrootresources[directory.__class__.__name__] = provisioningResource
# AddressBook home provisioners should result in addressBook homes.
for provisioningResource, _ignore_recordType, recordResource, record in self._allRecords():
@@ -517,23 +513,23 @@
# Need to create a calendar home provisioner for each service.
calendarRootResources = {}
- for directory in self.directoryServices:
- path = os.path.join(self.docroot, directory.__class__.__name__)
+ directory = self.directory
+ path = os.path.join(self.docroot, directory.__class__.__name__)
- if os.path.exists(path):
- rmdir(path)
- os.mkdir(path)
+ if os.path.exists(path):
+ rmdir(path)
+ os.mkdir(path)
- # Need a data store
- _newStore = CommonDataStore(path, None, None, True, False)
+ # Need a data store
+ _newStore = CommonDataStore(path, None, None, True, False)
- provisioningResource = DirectoryCalendarHomeProvisioningResource(
- directory,
- "/calendars/",
- _newStore
- )
+ provisioningResource = DirectoryCalendarHomeProvisioningResource(
+ directory,
+ "/calendars/",
+ _newStore
+ )
- calendarRootResources[directory.__class__.__name__] = provisioningResource
+ calendarRootResources[directory.__class__.__name__] = provisioningResource
# Calendar home provisioners should result in calendar homes.
for provisioningResource, _ignore_recordType, recordResource, record in self._allRecords():
@@ -643,19 +639,19 @@
"""
Default access controls for principal provisioning resources.
"""
- for directory in self.directoryServices:
- #print("\n -> %s" % (directory.__class__.__name__,))
- provisioningResource = self.principalRootResources[directory.__class__.__name__]
+ directory = self.directory
+ #print("\n -> %s" % (directory.__class__.__name__,))
+ provisioningResource = self.principalRootResources[directory.__class__.__name__]
- for args in _authReadOnlyPrivileges(self, provisioningResource, provisioningResource.principalCollectionURL()):
- yield self._checkPrivileges(*args)
+ for args in _authReadOnlyPrivileges(self, provisioningResource, provisioningResource.principalCollectionURL()):
+ yield self._checkPrivileges(*args)
- for recordType in (yield provisioningResource.listChildren()):
- #print(" -> %s" % (recordType,))
- typeResource = provisioningResource.getChild(recordType)
+ for recordType in (yield provisioningResource.listChildren()):
+ #print(" -> %s" % (recordType,))
+ typeResource = provisioningResource.getChild(recordType)
- for args in _authReadOnlyPrivileges(self, typeResource, typeResource.principalCollectionURL()):
- yield self._checkPrivileges(*args)
+ for args in _authReadOnlyPrivileges(self, typeResource, typeResource.principalCollectionURL()):
+ yield self._checkPrivileges(*args)
def test_propertyToField(self):
@@ -705,14 +701,14 @@
C{record} is the directory service record
for each record in each directory in C{directoryServices}.
"""
- for directory in self.directoryServices:
- provisioningResource = self.principalRootResources[
- directory.__class__.__name__
- ]
- for recordType in directory.recordTypes():
- for record in directory.listRecords(recordType):
- recordResource = provisioningResource.principalForRecord(record)
- yield provisioningResource, recordType, recordResource, record
+ directory = self.directory
+ provisioningResource = self.principalRootResources[
+ directory.__class__.__name__
+ ]
+ for recordType in directory.recordTypes():
+ for record in directory.listRecords(recordType):
+ recordResource = provisioningResource.principalForRecord(record)
+ yield provisioningResource, recordType, recordResource, record
def _checkPrivileges(self, resource, url, principal, privilege, allowed):
Deleted: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_proxyprincipalmembers.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_proxyprincipalmembers.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_proxyprincipalmembers.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1,506 +0,0 @@
-##
-# Copyright (c) 2005-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-from twisted.internet.defer import DeferredList, inlineCallbacks, succeed
-from txdav.xml import element as davxml
-
-from twistedcaldav.directory.directory import DirectoryService
-from twistedcaldav.test.util import xmlFile, augmentsFile, proxiesFile
-from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource, \
- DirectoryPrincipalResource
-from twistedcaldav.directory.xmlfile import XMLDirectoryService
-
-import twistedcaldav.test.util
-from twistedcaldav.directory import augment, calendaruserproxy
-from twistedcaldav.directory.calendaruserproxyloader import XMLCalendarUserProxyLoader
-
-
-class ProxyPrincipals (twistedcaldav.test.util.TestCase):
- """
- Directory service provisioned principals.
- """
-
- @inlineCallbacks
- def setUp(self):
- super(ProxyPrincipals, self).setUp()
-
- self.directoryFixture.addDirectoryService(XMLDirectoryService(
- {
- 'xmlFile' : xmlFile,
- 'augmentService' :
- augment.AugmentXMLDB(xmlFiles=(augmentsFile.path,)),
- }
- ))
- calendaruserproxy.ProxyDBService = calendaruserproxy.ProxySqliteDB("proxies.sqlite")
-
- # Set up a principals hierarchy for each service we're testing with
- self.principalRootResources = {}
- name = self.directoryService.__class__.__name__
- url = "/" + name + "/"
-
- provisioningResource = DirectoryPrincipalProvisioningResource(url, self.directoryService)
-
- self.site.resource.putChild(name, provisioningResource)
-
- self.principalRootResources[self.directoryService.__class__.__name__] = provisioningResource
-
- yield XMLCalendarUserProxyLoader(proxiesFile.path).updateProxyDB()
-
-
- def tearDown(self):
- """ Empty the proxy db between tests """
- return calendaruserproxy.ProxyDBService.clean() #@UndefinedVariable
-
-
- def _getPrincipalByShortName(self, type, name):
- provisioningResource = self.principalRootResources[self.directoryService.__class__.__name__]
- return provisioningResource.principalForShortName(type, name)
-
-
- def _groupMembersTest(self, recordType, recordName, subPrincipalName, expectedMembers):
- def gotMembers(members):
- memberNames = set([p.displayName() for p in members])
- self.assertEquals(memberNames, set(expectedMembers))
-
- principal = self._getPrincipalByShortName(recordType, recordName)
- if subPrincipalName is not None:
- principal = principal.getChild(subPrincipalName)
-
- d = principal.expandedGroupMembers()
- d.addCallback(gotMembers)
- return d
-
-
- def _groupMembershipsTest(self, recordType, recordName, subPrincipalName, expectedMemberships):
- def gotMemberships(memberships):
- uids = set([p.principalUID() for p in memberships])
- self.assertEquals(uids, set(expectedMemberships))
-
- principal = self._getPrincipalByShortName(recordType, recordName)
- if subPrincipalName is not None:
- principal = principal.getChild(subPrincipalName)
-
- d = principal.groupMemberships()
- d.addCallback(gotMemberships)
- return d
-
-
- @inlineCallbacks
- def _addProxy(self, principal, subPrincipalName, proxyPrincipal):
-
- if isinstance(principal, tuple):
- principal = self._getPrincipalByShortName(principal[0], principal[1])
- principal = principal.getChild(subPrincipalName)
- members = (yield principal.groupMembers())
-
- if isinstance(proxyPrincipal, tuple):
- proxyPrincipal = self._getPrincipalByShortName(proxyPrincipal[0], proxyPrincipal[1])
- members.add(proxyPrincipal)
-
- yield principal.setGroupMemberSetPrincipals(members)
-
-
- @inlineCallbacks
- def _removeProxy(self, recordType, recordName, subPrincipalName, proxyRecordType, proxyRecordName):
-
- principal = self._getPrincipalByShortName(recordType, recordName)
- principal = principal.getChild(subPrincipalName)
- members = (yield principal.groupMembers())
-
- proxyPrincipal = self._getPrincipalByShortName(proxyRecordType, proxyRecordName)
- for p in members:
- if p.principalUID() == proxyPrincipal.principalUID():
- members.remove(p)
- break
-
- yield principal.setGroupMemberSetPrincipals(members)
-
-
- @inlineCallbacks
- def _clearProxy(self, principal, subPrincipalName):
-
- if isinstance(principal, tuple):
- principal = self._getPrincipalByShortName(principal[0], principal[1])
- principal = principal.getChild(subPrincipalName)
- yield principal.setGroupMemberSetPrincipals(set())
-
-
- @inlineCallbacks
- def _proxyForTest(self, recordType, recordName, expectedProxies, read_write):
- principal = self._getPrincipalByShortName(recordType, recordName)
- proxies = (yield principal.proxyFor(read_write))
- proxies = sorted([_principal.displayName() for _principal in proxies])
- self.assertEquals(proxies, sorted(expectedProxies))
-
-
- @inlineCallbacks
- def test_multipleProxyAssignmentsAtOnce(self):
- yield self._proxyForTest(
- DirectoryService.recordType_users, "userb",
- ('a',),
- True
- )
- yield self._proxyForTest(
- DirectoryService.recordType_users, "userc",
- ('a',),
- True
- )
-
-
- def test_groupMembersRegular(self):
- """
- DirectoryPrincipalResource.expandedGroupMembers()
- """
- return self._groupMembersTest(
- DirectoryService.recordType_groups, "both_coasts", None,
- ("Chris Lecroy", "David Reid", "Wilfredo Sanchez", "West Coast", "East Coast", "Cyrus Daboo",),
- )
-
-
- def test_groupMembersRecursive(self):
- """
- DirectoryPrincipalResource.expandedGroupMembers()
- """
- return self._groupMembersTest(
- DirectoryService.recordType_groups, "recursive1_coasts", None,
- ("Wilfredo Sanchez", "Recursive2 Coasts", "Cyrus Daboo",),
- )
-
-
- def test_groupMembersProxySingleUser(self):
- """
- DirectoryPrincipalResource.expandedGroupMembers()
- """
- return self._groupMembersTest(
- DirectoryService.recordType_locations, "gemini", "calendar-proxy-write",
- ("Wilfredo Sanchez",),
- )
-
-
- def test_groupMembersProxySingleGroup(self):
- """
- DirectoryPrincipalResource.expandedGroupMembers()
- """
- return self._groupMembersTest(
- DirectoryService.recordType_locations, "mercury", "calendar-proxy-write",
- ("Chris Lecroy", "David Reid", "Wilfredo Sanchez", "West Coast",),
- )
-
-
- def test_groupMembersProxySingleGroupWithNestedGroups(self):
- """
- DirectoryPrincipalResource.expandedGroupMembers()
- """
- return self._groupMembersTest(
- DirectoryService.recordType_locations, "apollo", "calendar-proxy-write",
- ("Chris Lecroy", "David Reid", "Wilfredo Sanchez", "West Coast", "East Coast", "Cyrus Daboo", "Both Coasts",),
- )
-
-
- def test_groupMembersProxySingleGroupWithNestedRecursiveGroups(self):
- """
- DirectoryPrincipalResource.expandedGroupMembers()
- """
- return self._groupMembersTest(
- DirectoryService.recordType_locations, "orion", "calendar-proxy-write",
- ("Wilfredo Sanchez", "Cyrus Daboo", "Recursive1 Coasts", "Recursive2 Coasts",),
- )
-
-
- def test_groupMembersProxySingleGroupWithNonCalendarGroup(self):
- """
- DirectoryPrincipalResource.expandedGroupMembers()
- """
- ds = []
-
- ds.append(self._groupMembersTest(
- DirectoryService.recordType_resources, "non_calendar_proxy", "calendar-proxy-write",
- ("Chris Lecroy", "Cyrus Daboo", "Non-calendar group"),
- ))
-
- ds.append(self._groupMembershipsTest(
- DirectoryService.recordType_groups, "non_calendar_group", None,
- ("non_calendar_proxy#calendar-proxy-write",),
- ))
-
- return DeferredList(ds)
-
-
- def test_groupMembersProxyMissingUser(self):
- """
- DirectoryPrincipalResource.expandedGroupMembers()
- """
- proxy = self._getPrincipalByShortName(DirectoryService.recordType_users, "cdaboo")
- proxyGroup = proxy.getChild("calendar-proxy-write")
-
- def gotMembers(members):
- members.add("12345")
- return proxyGroup._index().setGroupMembers("%s#calendar-proxy-write" % (proxy.principalUID(),), members)
-
- def check(_):
- return self._groupMembersTest(
- DirectoryService.recordType_users, "cdaboo", "calendar-proxy-write",
- (),
- )
-
- # Setup the fake entry in the DB
- d = proxyGroup._index().getMembers("%s#calendar-proxy-write" % (proxy.principalUID(),))
- d.addCallback(gotMembers)
- d.addCallback(check)
- return d
-
-
- def test_groupMembershipsMissingUser(self):
- """
- DirectoryPrincipalResource.expandedGroupMembers()
- """
- # Setup the fake entry in the DB
- fake_uid = "12345"
- proxy = self._getPrincipalByShortName(DirectoryService.recordType_users, "cdaboo")
- proxyGroup = proxy.getChild("calendar-proxy-write")
-
- def gotMembers(members):
- members.add("%s#calendar-proxy-write" % (proxy.principalUID(),))
- return proxyGroup._index().setGroupMembers("%s#calendar-proxy-write" % (fake_uid,), members)
-
- def check(_):
- return self._groupMembershipsTest(
- DirectoryService.recordType_users, "cdaboo", "calendar-proxy-write",
- (),
- )
-
- d = proxyGroup._index().getMembers("%s#calendar-proxy-write" % (fake_uid,))
- d.addCallback(gotMembers)
- d.addCallback(check)
- return d
-
-
- @inlineCallbacks
- def test_setGroupMemberSet(self):
- class StubMemberDB(object):
- def __init__(self):
- self.members = set()
-
- def setGroupMembers(self, uid, members):
- self.members = members
- return succeed(None)
-
- def getMembers(self, uid):
- return succeed(self.members)
-
- user = self._getPrincipalByShortName(self.directoryService.recordType_users,
- "cdaboo")
-
- proxyGroup = user.getChild("calendar-proxy-write")
-
- memberdb = StubMemberDB()
-
- proxyGroup._index = (lambda: memberdb)
-
- new_members = davxml.GroupMemberSet(
- davxml.HRef.fromString(
- "/XMLDirectoryService/__uids__/8B4288F6-CC82-491D-8EF9-642EF4F3E7D0/"),
- davxml.HRef.fromString(
- "/XMLDirectoryService/__uids__/5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1/"))
-
- yield proxyGroup.setGroupMemberSet(new_members, None)
-
- self.assertEquals(
- set([str(p) for p in memberdb.members]),
- set(["5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1",
- "8B4288F6-CC82-491D-8EF9-642EF4F3E7D0"]))
-
-
- @inlineCallbacks
- def test_setGroupMemberSetNotifiesPrincipalCaches(self):
- class StubCacheNotifier(object):
- changedCount = 0
- def changed(self):
- self.changedCount += 1
- return succeed(None)
-
- user = self._getPrincipalByShortName(self.directoryService.recordType_users, "cdaboo")
-
- proxyGroup = user.getChild("calendar-proxy-write")
-
- notifier = StubCacheNotifier()
-
- oldCacheNotifier = DirectoryPrincipalResource.cacheNotifierFactory
-
- try:
- DirectoryPrincipalResource.cacheNotifierFactory = (lambda _1, _2, **kwargs: notifier)
-
- self.assertEquals(notifier.changedCount, 0)
-
- yield proxyGroup.setGroupMemberSet(
- davxml.GroupMemberSet(
- davxml.HRef.fromString(
- "/XMLDirectoryService/__uids__/5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1/")),
- None)
-
- self.assertEquals(notifier.changedCount, 1)
- finally:
- DirectoryPrincipalResource.cacheNotifierFactory = oldCacheNotifier
-
-
- def test_proxyFor(self):
-
- return self._proxyForTest(
- DirectoryService.recordType_users, "wsanchez",
- ("Mercury Seven", "Gemini Twelve", "Apollo Eleven", "Orion",),
- True
- )
-
-
- @inlineCallbacks
- def test_proxyForDuplicates(self):
-
- yield self._addProxy(
- (DirectoryService.recordType_locations, "gemini",),
- "calendar-proxy-write",
- (DirectoryService.recordType_groups, "grunts",),
- )
-
- yield self._proxyForTest(
- DirectoryService.recordType_users, "wsanchez",
- ("Mercury Seven", "Gemini Twelve", "Apollo Eleven", "Orion",),
- True
- )
-
-
- def test_readOnlyProxyFor(self):
-
- return self._proxyForTest(
- DirectoryService.recordType_users, "wsanchez",
- ("Non-calendar proxy",),
- False
- )
-
-
- @inlineCallbacks
- def test_UserProxy(self):
-
- for proxyType in ("calendar-proxy-read", "calendar-proxy-write"):
-
- yield self._addProxy(
- (DirectoryService.recordType_users, "wsanchez",),
- proxyType,
- (DirectoryService.recordType_users, "cdaboo",),
- )
-
- yield self._groupMembersTest(
- DirectoryService.recordType_users, "wsanchez",
- proxyType,
- ("Cyrus Daboo",),
- )
-
- yield self._addProxy(
- (DirectoryService.recordType_users, "wsanchez",),
- proxyType,
- (DirectoryService.recordType_users, "lecroy",),
- )
-
- yield self._groupMembersTest(
- DirectoryService.recordType_users, "wsanchez",
- proxyType,
- ("Cyrus Daboo", "Chris Lecroy",),
- )
-
- yield self._removeProxy(
- DirectoryService.recordType_users, "wsanchez",
- proxyType,
- DirectoryService.recordType_users, "cdaboo",
- )
-
- yield self._groupMembersTest(
- DirectoryService.recordType_users, "wsanchez",
- proxyType,
- ("Chris Lecroy",),
- )
-
-
- @inlineCallbacks
- def test_NonAsciiProxy(self):
- """
- Ensure that principalURLs with non-ascii don't cause problems
- within CalendarUserProxyPrincipalResource
- """
-
- recordType = DirectoryService.recordType_users
- proxyType = "calendar-proxy-read"
-
- record = self.directoryService.recordWithGUID("320B73A1-46E2-4180-9563-782DFDBE1F63")
- provisioningResource = self.principalRootResources[self.directoryService.__class__.__name__]
- principal = provisioningResource.principalForRecord(record)
- proxyPrincipal = provisioningResource.principalForShortName(recordType,
- "wsanchez")
-
- yield self._addProxy(principal, proxyType, proxyPrincipal)
- memberships = yield proxyPrincipal._calendar_user_proxy_index().getMemberships(proxyPrincipal.principalUID())
- for uid in memberships:
- provisioningResource.principalForUID(uid)
-
-
- @inlineCallbacks
- def test_getAllMembers(self):
- """
- getAllMembers( ) returns the unique set of guids that have been
- delegated-to directly
- """
- self.assertEquals(
- set((yield calendaruserproxy.ProxyDBService.getAllMembers())), #@UndefinedVariable
- set([
- u'00599DAF-3E75-42DD-9DB7-52617E79943F',
- u'6423F94A-6B76-4A3A-815B-D52CFD77935D',
- u'8A985493-EE2C-4665-94CF-4DFEA3A89500',
- u'9FF60DAD-0BDE-4508-8C77-15F0CA5C8DD2',
- u'both_coasts',
- u'left_coast',
- u'non_calendar_group',
- u'recursive1_coasts',
- u'recursive2_coasts',
- u'EC465590-E9E9-4746-ACE8-6C756A49FE4D'])
- )
-
-
- @inlineCallbacks
- def test_hideDisabledDelegates(self):
- """
- Delegates who are not enabledForLogin are "hidden" from the delegate lists
- (but groups *are* allowed)
- """
-
- record = self.directoryService.recordWithGUID("EC465590-E9E9-4746-ACE8-6C756A49FE4D")
-
- record.enabledForLogin = True
- yield self._groupMembersTest(
- DirectoryService.recordType_users, "delegator", "calendar-proxy-write",
- ("Occasional Delegate", "Delegate Via Group", "Delegate Group"),
- )
-
- # Login disabled -- no longer shown as a delegate
- record.enabledForLogin = False
- yield self._groupMembersTest(
- DirectoryService.recordType_users, "delegator", "calendar-proxy-write",
- ("Delegate Via Group", "Delegate Group"),
- )
-
- # Login re-enabled -- once again a delegate (it wasn't not removed from proxydb)
- record.enabledForLogin = True
- yield self._groupMembersTest(
- DirectoryService.recordType_users, "delegator", "calendar-proxy-write",
- ("Occasional Delegate", "Delegate Via Group", "Delegate Group"),
- )
Deleted: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_resources.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_resources.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_resources.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1,80 +0,0 @@
-##
-# Copyright (c) 2005-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-import os
-from twistedcaldav.config import config
-from twistedcaldav.test.util import TestCase
-from calendarserver.tools.util import getDirectory
-
-class ResourcesTestCase(TestCase):
-
- def setUp(self):
- super(ResourcesTestCase, self).setUp()
-
- testRoot = os.path.join(".", os.path.dirname(__file__), "resources")
-
- xmlFile = os.path.join(testRoot, "users-groups.xml")
- config.DirectoryService.params.xmlFile = xmlFile
-
- xmlFile = os.path.join(testRoot, "resources-locations.xml")
- config.ResourceService.params.xmlFile = xmlFile
- config.ResourceService.Enabled = True
-
- xmlFile = os.path.join(testRoot, "augments.xml")
- config.AugmentService.type = "twistedcaldav.directory.augment.AugmentXMLDB"
- config.AugmentService.params.xmlFiles = (xmlFile,)
-
-# Uh, what's this testing?
-# def test_loadConfig(self):
-# directory = getDirectory()
-
-
- def test_recordInPrimaryDirectory(self):
- directory = getDirectory()
-
- # Look up a user, which comes out of primary directory service
- record = directory.recordWithUID("user01")
- self.assertNotEquals(record, None)
-
-
- def test_recordInSupplementalDirectory(self):
- directory = getDirectory()
-
- # Look up a resource, which comes out of locations/resources service
- record = directory.recordWithUID("resource01")
- self.assertNotEquals(record, None)
-
-
- def test_augments(self):
- directory = getDirectory()
-
- # Primary directory
- record = directory.recordWithUID("user01")
- self.assertEquals(record.enabled, True)
- self.assertEquals(record.enabledForCalendaring, True)
- record = directory.recordWithUID("user02")
- self.assertEquals(record.enabled, False)
- self.assertEquals(record.enabledForCalendaring, False)
-
- # Supplemental directory
- record = directory.recordWithUID("resource01")
- self.assertEquals(record.enabled, True)
- self.assertEquals(record.enabledForCalendaring, True)
- self.assertEquals(record.autoSchedule, True)
- record = directory.recordWithUID("resource02")
- self.assertEquals(record.enabled, False)
- self.assertEquals(record.enabledForCalendaring, False)
- self.assertEquals(record.autoSchedule, False)
Deleted: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_xmlfile.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_xmlfile.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/test/test_xmlfile.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1,375 +0,0 @@
-##
-# Copyright (c) 2005-2014 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-from twext.python.filepath import CachingFilePath as FilePath
-
-from twistedcaldav.directory import augment
-from twistedcaldav.directory.directory import DirectoryService
-import twistedcaldav.directory.test.util
-from twistedcaldav.directory.xmlfile import XMLDirectoryService
-from twistedcaldav.test.util import TestCase, xmlFile, augmentsFile
-
-# FIXME: Add tests for GUID hooey, once we figure out what that means here
-
-class XMLFileBase(object):
- """
- L{XMLFileBase} is a base/mix-in object for testing L{XMLDirectoryService}
- (or things that depend on L{IDirectoryService} and need a simple
- implementation to use).
- """
- recordTypes = set((
- DirectoryService.recordType_users,
- DirectoryService.recordType_groups,
- DirectoryService.recordType_locations,
- DirectoryService.recordType_resources,
- DirectoryService.recordType_addresses,
- ))
-
- users = {
- "admin" : {"password": "nimda", "guid": "D11F03A0-97EA-48AF-9A6C-FAC7F3975766", "addresses": ()},
- "wsanchez" : {"password": "zehcnasw", "guid": "6423F94A-6B76-4A3A-815B-D52CFD77935D", "addresses": ("mailto:wsanchez at example.com",)},
- "cdaboo" : {"password": "oobadc", "guid": "5A985493-EE2C-4665-94CF-4DFEA3A89500", "addresses": ("mailto:cdaboo at example.com",) },
- "lecroy" : {"password": "yorcel", "guid": "8B4288F6-CC82-491D-8EF9-642EF4F3E7D0", "addresses": ("mailto:lecroy at example.com",) },
- "dreid" : {"password": "dierd", "guid": "5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1", "addresses": ("mailto:dreid at example.com",) },
- "nocalendar" : {"password": "radnelacon", "guid": "543D28BA-F74F-4D5F-9243-B3E3A61171E5", "addresses": ()},
- "user01" : {"password": "01user", "guid": None, "addresses": ("mailto:c4ca4238a0 at example.com",)},
- "user02" : {"password": "02user", "guid": None, "addresses": ("mailto:c81e728d9d at example.com",)},
- }
-
- groups = {
- "admin" : {"password": "admin", "guid": None, "addresses": (), "members": ((DirectoryService.recordType_groups, "managers"),)},
- "managers" : {"password": "managers", "guid": None, "addresses": (), "members": ((DirectoryService.recordType_users , "lecroy"),)},
- "grunts" : {"password": "grunts", "guid": None, "addresses": (), "members": ((DirectoryService.recordType_users , "wsanchez"),
- (DirectoryService.recordType_users , "cdaboo"),
- (DirectoryService.recordType_users , "dreid"))},
- "right_coast": {"password": "right_coast", "guid": None, "addresses": (), "members": ((DirectoryService.recordType_users , "cdaboo"),)},
- "left_coast" : {"password": "left_coast", "guid": None, "addresses": (), "members": ((DirectoryService.recordType_users , "wsanchez"),
- (DirectoryService.recordType_users , "dreid"),
- (DirectoryService.recordType_users , "lecroy"))},
- "both_coasts": {"password": "both_coasts", "guid": None, "addresses": (), "members": ((DirectoryService.recordType_groups, "right_coast"),
- (DirectoryService.recordType_groups, "left_coast"))},
- "recursive1_coasts": {"password": "recursive1_coasts", "guid": None, "addresses": (), "members": ((DirectoryService.recordType_groups, "recursive2_coasts"),
- (DirectoryService.recordType_users, "wsanchez"))},
- "recursive2_coasts": {"password": "recursive2_coasts", "guid": None, "addresses": (), "members": ((DirectoryService.recordType_groups, "recursive1_coasts"),
- (DirectoryService.recordType_users, "cdaboo"))},
- "non_calendar_group": {"password": "non_calendar_group", "guid": None, "addresses": (), "members": ((DirectoryService.recordType_users , "cdaboo"),
- (DirectoryService.recordType_users , "lecroy"))},
- }
-
- locations = {
- "mercury": {"password": "mercury", "guid": None, "addresses": ("mailto:mercury at example.com",)},
- "gemini" : {"password": "gemini", "guid": None, "addresses": ("mailto:gemini at example.com",)},
- "apollo" : {"password": "apollo", "guid": None, "addresses": ("mailto:apollo at example.com",)},
- "orion" : {"password": "orion", "guid": None, "addresses": ("mailto:orion at example.com",)},
- }
-
- resources = {
- "transporter" : {"password": "transporter", "guid": None, "addresses": ("mailto:transporter at example.com",) },
- "ftlcpu" : {"password": "ftlcpu", "guid": None, "addresses": ("mailto:ftlcpu at example.com",) },
- "non_calendar_proxy" : {"password": "non_calendar_proxy", "guid": "non_calendar_proxy", "addresses": ("mailto:non_calendar_proxy at example.com",)},
- }
-
-
- def xmlFile(self):
- """
- Create a L{FilePath} that points to a temporary file containing a copy
- of C{twistedcaldav/directory/test/accounts.xml}.
-
- @see: L{xmlFile}
-
- @rtype: L{FilePath}
- """
- if not hasattr(self, "_xmlFile"):
- self._xmlFile = FilePath(self.mktemp())
- xmlFile.copyTo(self._xmlFile)
- return self._xmlFile
-
-
- def augmentsFile(self):
- """
- Create a L{FilePath} that points to a temporary file containing a copy
- of C{twistedcaldav/directory/test/augments.xml}.
-
- @see: L{augmentsFile}
-
- @rtype: L{FilePath}
- """
- if not hasattr(self, "_augmentsFile"):
- self._augmentsFile = FilePath(self.mktemp())
- augmentsFile.copyTo(self._augmentsFile)
- return self._augmentsFile
-
-
- def service(self):
- """
- Create an L{XMLDirectoryService} based on the contents of the paths
- returned by L{XMLFileBase.augmentsFile} and L{XMLFileBase.xmlFile}.
-
- @rtype: L{XMLDirectoryService}
- """
- return XMLDirectoryService(
- {
- 'xmlFile': self.xmlFile(),
- 'augmentService':
- augment.AugmentXMLDB(xmlFiles=(self.augmentsFile().path,)),
- },
- alwaysStat=True
- )
-
-
-
-class XMLFile (
- XMLFileBase,
- twistedcaldav.directory.test.util.BasicTestCase,
- twistedcaldav.directory.test.util.DigestTestCase
-):
- """
- Test XML file based directory implementation.
- """
-
- def test_changedXML(self):
- service = self.service()
-
- self.xmlFile().open("w").write(
-"""<?xml version="1.0" encoding="utf-8"?>
-<!DOCTYPE accounts SYSTEM "accounts.dtd">
-<accounts realm="Test Realm">
- <user>
- <uid>admin</uid>
- <guid>admin</guid>
- <password>nimda</password>
- <name>Super User</name>
- </user>
-</accounts>
-"""
- )
- for recordType, expectedRecords in (
- (DirectoryService.recordType_users , ("admin",)),
- (DirectoryService.recordType_groups , ()),
- (DirectoryService.recordType_locations , ()),
- (DirectoryService.recordType_resources , ()),
- ):
- # Fault records in
- for name in expectedRecords:
- service.recordWithShortName(recordType, name)
-
- self.assertEquals(
- set(r.shortNames[0] for r in service.listRecords(recordType)),
- set(expectedRecords)
- )
-
-
- def test_okAutoSchedule(self):
- service = self.service()
-
- self.xmlFile().open("w").write(
-"""<?xml version="1.0" encoding="utf-8"?>
-<!DOCTYPE accounts SYSTEM "accounts.dtd">
-<accounts realm="Test Realm">
- <location>
- <uid>my office</uid>
- <guid>myoffice</guid>
- <password>nimda</password>
- <name>Super User</name>
- </location>
-</accounts>
-"""
- )
- self.augmentsFile().open("w").write(
-"""<?xml version="1.0" encoding="utf-8"?>
-<!DOCTYPE accounts SYSTEM "accounts.dtd">
-<augments>
- <record>
- <uid>myoffice</uid>
- <enable>true</enable>
- <enable-calendar>true</enable-calendar>
- <auto-schedule>true</auto-schedule>
- </record>
-</augments>
-"""
- )
- service.augmentService.refresh()
-
- for recordType, expectedRecords in (
- (DirectoryService.recordType_users , ()),
- (DirectoryService.recordType_groups , ()),
- (DirectoryService.recordType_locations , ("my office",)),
- (DirectoryService.recordType_resources , ()),
- ):
- # Fault records in
- for name in expectedRecords:
- service.recordWithShortName(recordType, name)
-
- self.assertEquals(
- set(r.shortNames[0] for r in service.listRecords(recordType)),
- set(expectedRecords)
- )
- self.assertTrue(service.recordWithShortName(DirectoryService.recordType_locations, "my office").autoSchedule)
-
-
- def test_okDisableCalendar(self):
- service = self.service()
-
- self.xmlFile().open("w").write(
-"""<?xml version="1.0" encoding="utf-8"?>
-<!DOCTYPE accounts SYSTEM "accounts.dtd">
-<accounts realm="Test Realm">
- <group>
- <uid>enabled</uid>
- <guid>enabled</guid>
- <password>enabled</password>
- <name>Enabled</name>
- </group>
- <group>
- <uid>disabled</uid>
- <guid>disabled</guid>
- <password>disabled</password>
- <name>Disabled</name>
- </group>
-</accounts>
-"""
- )
-
- for recordType, expectedRecords in (
- (DirectoryService.recordType_users , ()),
- (DirectoryService.recordType_groups , ("enabled", "disabled")),
- (DirectoryService.recordType_locations , ()),
- (DirectoryService.recordType_resources , ()),
- ):
- # Fault records in
- for name in expectedRecords:
- service.recordWithShortName(recordType, name)
-
- self.assertEquals(
- set(r.shortNames[0] for r in service.listRecords(recordType)),
- set(expectedRecords)
- )
-
- # All groups are disabled
- self.assertFalse(service.recordWithShortName(DirectoryService.recordType_groups, "enabled").enabledForCalendaring)
- self.assertFalse(service.recordWithShortName(DirectoryService.recordType_groups, "disabled").enabledForCalendaring)
-
-
- def test_readExtras(self):
- service = self.service()
-
- self.xmlFile().open("w").write(
-"""<?xml version="1.0" encoding="utf-8"?>
-<!DOCTYPE accounts SYSTEM "accounts.dtd">
-<accounts realm="Test Realm">
- <location>
- <uid>my office</uid>
- <guid>myoffice</guid>
- <name>My Office</name>
- <extras>
- <comment>This is the comment</comment>
- <capacity>40</capacity>
- </extras>
- </location>
-</accounts>
-"""
- )
-
- record = service.recordWithShortName(
- DirectoryService.recordType_locations, "my office")
- self.assertEquals(record.guid, "myoffice")
- self.assertEquals(record.extras["comment"], "This is the comment")
- self.assertEquals(record.extras["capacity"], "40")
-
-
- def test_writeExtras(self):
- service = self.service()
-
- service.createRecord(DirectoryService.recordType_locations, "newguid",
- shortNames=("New office",),
- fullName="My New Office",
- address="1 Infinite Loop, Cupertino, CA",
- capacity="10",
- comment="Test comment",
- )
-
- record = service.recordWithShortName(
- DirectoryService.recordType_locations, "New office")
- self.assertEquals(record.extras["comment"], "Test comment")
- self.assertEquals(record.extras["capacity"], "10")
-
- service.updateRecord(DirectoryService.recordType_locations, "newguid",
- shortNames=("New office",),
- fullName="My Newer Office",
- address="2 Infinite Loop, Cupertino, CA",
- capacity="20",
- comment="Test comment updated",
- )
-
- record = service.recordWithShortName(
- DirectoryService.recordType_locations, "New office")
- self.assertEquals(record.fullName, "My Newer Office")
- self.assertEquals(record.extras["address"], "2 Infinite Loop, Cupertino, CA")
- self.assertEquals(record.extras["comment"], "Test comment updated")
- self.assertEquals(record.extras["capacity"], "20")
-
- service.destroyRecord(DirectoryService.recordType_locations, "newguid")
-
- record = service.recordWithShortName(
- DirectoryService.recordType_locations, "New office")
- self.assertEquals(record, None)
-
-
- def test_indexing(self):
- service = self.service()
- self.assertNotEquals(None, service._lookupInIndex(service.recordType_users, service.INDEX_TYPE_SHORTNAME, "usera"))
- self.assertNotEquals(None, service._lookupInIndex(service.recordType_users, service.INDEX_TYPE_CUA, "mailto:wsanchez at example.com"))
- self.assertNotEquals(None, service._lookupInIndex(service.recordType_users, service.INDEX_TYPE_GUID, "9FF60DAD-0BDE-4508-8C77-15F0CA5C8DD2"))
- self.assertNotEquals(None, service._lookupInIndex(service.recordType_locations, service.INDEX_TYPE_SHORTNAME, "orion"))
- self.assertEquals(None, service._lookupInIndex(service.recordType_users, service.INDEX_TYPE_CUA, "mailto:nobody at example.com"))
-
-
- def test_repeat(self):
- service = self.service()
- record = service.recordWithShortName(
- DirectoryService.recordType_users, "user01")
- self.assertEquals(record.fullName, "c4ca4238a0b923820dcc509a6f75849bc4c User 01")
- self.assertEquals(record.firstName, "c4ca4")
- self.assertEquals(record.lastName, "c4ca4238a User 01")
- self.assertEquals(record.emailAddresses, set(['c4ca4238a0 at example.com']))
-
-
-
-class XMLFileSubset (XMLFileBase, TestCase):
- """
- Test the recordTypes subset feature of XMLFile service.
- """
- recordTypes = set((
- DirectoryService.recordType_users,
- DirectoryService.recordType_groups,
- ))
-
-
- def test_recordTypesSubset(self):
- directory = XMLDirectoryService(
- {
- 'xmlFile' : self.xmlFile(),
- 'augmentService' :
- augment.AugmentXMLDB(xmlFiles=(self.augmentsFile().path,)),
- 'recordTypes' :
- (
- DirectoryService.recordType_users,
- DirectoryService.recordType_groups
- ),
- },
- alwaysStat=True
- )
- self.assertEquals(set(("users", "groups")), set(directory.recordTypes()))
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/util.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/util.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/util.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -34,7 +34,10 @@
from twisted.internet.defer import inlineCallbacks, returnValue
from txdav.xml import element as davxml
from uuid import UUID, uuid5
+from twisted.python.failure import Failure
+from twisted.web.template import tags
+
log = Logger()
def uuidFromName(namespace, name):
@@ -148,3 +151,76 @@
else:
response = StatusResponse(responsecode.NOT_FOUND, "Resource not found")
returnValue(response)
+
+
+
+
+def formatLink(url):
+ """
+ Convert a URL string into some twisted.web.template DOM objects for
+ rendering as a link to itself.
+ """
+ return tags.a(href=url)(url)
+
+
+
+def formatLinks(urls):
+ """
+ Format a list of URL strings as a list of twisted.web.template DOM links.
+ """
+ return formatList(formatLink(link) for link in urls)
+
+
+def formatPrincipals(principals):
+ """
+ Format a list of principals into some twisted.web.template DOM objects.
+ """
+ def recordKey(principal):
+ try:
+ record = principal.record
+ except AttributeError:
+ try:
+ record = principal.parent.record
+ except:
+ return None
+ return (record.recordType, record.shortNames[0])
+
+
+ def describe(principal):
+ if hasattr(principal, "record"):
+ return " - %s" % (principal.record.displayName,)
+ else:
+ return ""
+
+ return formatList(
+ tags.a(href=principal.principalURL())(
+ str(principal), describe(principal)
+ )
+ for principal in sorted(principals, key=recordKey)
+ )
+
+
+
+def formatList(iterable):
+ """
+ Format a list of stuff as an interable.
+ """
+ thereAreAny = False
+ try:
+ item = None
+ for item in iterable:
+ thereAreAny = True
+ yield " -> "
+ if item is None:
+ yield "None"
+ else:
+ yield item
+ yield "\n"
+ except Exception, e:
+ log.error("Exception while rendering: %s" % (e,))
+ Failure().printTraceback()
+ yield " ** %s **: %s\n" % (e.__class__.__name__, e)
+ if not thereAreAny:
+ yield " '()\n"
+
+
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/wiki.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/wiki.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directory/wiki.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -19,32 +19,32 @@
as other principals.
"""
-__all__ = [
- "WikiDirectoryService",
-]
+from twisted.internet.defer import inlineCallbacks, returnValue, succeed
+from twistedcaldav.config import config
+from twisted.web.xmlrpc import Proxy, Fault
from calendarserver.platform.darwin.wiki import accessForUserToWiki
+from twext.python.log import Logger
from twext.internet.gaiendpoint import MultiFailure
-from twext.python.log import Logger
from txweb2 import responsecode
-from txweb2.auth.wrapper import UnauthorizedResponse
-from txweb2.dav.resource import TwistedACLInheritable
+# from txweb2.auth.wrapper import UnauthorizedResponse
+# from txweb2.dav.resource import TwistedACLInheritable
from txweb2.http import HTTPError, StatusResponse
-from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.web.error import Error as WebError
-from twisted.web.xmlrpc import Proxy, Fault
-from twistedcaldav.config import config
-from twistedcaldav.directory.directory import DirectoryService, \
- DirectoryRecord, UnknownRecordTypeError
+# from twistedcaldav.directory.directory import DirectoryService, \
+# DirectoryRecord, UnknownRecordTypeError
-from txdav.xml import element as davxml
+# from txdav.xml import element as davxml
log = Logger()
-class WikiDirectoryService(DirectoryService):
+# class WikiDirectoryService(DirectoryService):
+
+
+class WikiDirectoryService(object):
"""
L{IDirectoryService} implementation for Wikis.
"""
@@ -57,81 +57,81 @@
UIDPrefix = "wiki-"
- def __repr__(self):
- return "<%s %r>" % (self.__class__.__name__, self.realmName)
+# def __repr__(self):
+# return "<%s %r>" % (self.__class__.__name__, self.realmName)
- def __init__(self):
- super(WikiDirectoryService, self).__init__()
- self.byUID = {}
- self.byShortName = {}
+# def __init__(self):
+# super(WikiDirectoryService, self).__init__()
+# self.byUID = {}
+# self.byShortName = {}
- def recordTypes(self):
- return (WikiDirectoryService.recordType_wikis,)
+# def recordTypes(self):
+# return (WikiDirectoryService.recordType_wikis,)
- def listRecords(self, recordType):
- return ()
+# def listRecords(self, recordType):
+# return ()
- def recordWithShortName(self, recordType, shortName):
- if recordType != WikiDirectoryService.recordType_wikis:
- raise UnknownRecordTypeError(recordType)
+# def recordWithShortName(self, recordType, shortName):
+# if recordType != WikiDirectoryService.recordType_wikis:
+# raise UnknownRecordTypeError(recordType)
- if shortName in self.byShortName:
- record = self.byShortName[shortName]
- return record
+# if shortName in self.byShortName:
+# record = self.byShortName[shortName]
+# return record
- record = self._addRecord(shortName)
- return record
+# record = self._addRecord(shortName)
+# return record
- def recordWithUID(self, uid):
+# def recordWithUID(self, uid):
- if uid in self.byUID:
- record = self.byUID[uid]
- return record
+# if uid in self.byUID:
+# record = self.byUID[uid]
+# return record
- if uid.startswith(self.UIDPrefix):
- shortName = uid[len(self.UIDPrefix):]
- record = self._addRecord(shortName)
- return record
- else:
- return None
+# if uid.startswith(self.UIDPrefix):
+# shortName = uid[len(self.UIDPrefix):]
+# record = self._addRecord(shortName)
+# return record
+# else:
+# return None
- def _addRecord(self, shortName):
+# def _addRecord(self, shortName):
- record = WikiDirectoryRecord(
- self,
- WikiDirectoryService.recordType_wikis,
- shortName,
- None
- )
- self.byUID[record.uid] = record
- self.byShortName[shortName] = record
- return record
+# record = WikiDirectoryRecord(
+# self,
+# WikiDirectoryService.recordType_wikis,
+# shortName,
+# None
+# )
+# self.byUID[record.uid] = record
+# self.byShortName[shortName] = record
+# return record
-class WikiDirectoryRecord(DirectoryRecord):
- """
- L{DirectoryRecord} implementation for Wikis.
- """
+# class WikiDirectoryRecord(DirectoryRecord):
+# """
+# L{DirectoryRecord} implementation for Wikis.
+# """
- def __init__(self, service, recordType, shortName, entry):
- super(WikiDirectoryRecord, self).__init__(
- service=service,
- recordType=recordType,
- guid=None,
- shortNames=(shortName,),
- fullName=shortName,
- enabledForCalendaring=True,
- uid="%s%s" % (WikiDirectoryService.UIDPrefix, shortName),
- )
- # Wiki enabling doesn't come from augments db, so enable here...
- self.enabled = True
+# def __init__(self, service, recordType, shortName, entry):
+# super(WikiDirectoryRecord, self).__init__(
+# service=service,
+# recordType=recordType,
+# guid=None,
+# shortNames=(shortName,),
+# fullName=shortName,
+# enabledForCalendaring=True,
+# uid="%s%s" % (WikiDirectoryService.UIDPrefix, shortName),
+# )
+# # Wiki enabling doesn't come from augments db, so enable here...
+# self.enabled = True
@@ -250,118 +250,120 @@
- at inlineCallbacks
def getWikiACL(resource, request):
- """
- Ask the wiki server we're paired with what level of access the authnUser has.
+ return succeed(None)
+# @inlineCallbacks
+# def getWikiACL(resource, request):
+# """
+# Ask the wiki server we're paired with what level of access the authnUser has.
- Returns an ACL.
+# Returns an ACL.
- Wiki authentication is a bit tricky because the end-user accessing a group
- calendar may not actually be enabled for calendaring. Therefore in that
- situation, the authzUser will have been replaced with the wiki principal
- in locateChild( ), so that any changes the user makes will have the wiki
- as the originator. The authnUser will always be the end-user.
- """
- from twistedcaldav.directory.principal import DirectoryPrincipalResource
+# Wiki authentication is a bit tricky because the end-user accessing a group
+# calendar may not actually be enabled for calendaring. Therefore in that
+# situation, the authzUser will have been replaced with the wiki principal
+# in locateChild( ), so that any changes the user makes will have the wiki
+# as the originator. The authnUser will always be the end-user.
+# """
+# from twistedcaldav.directory.principal import DirectoryPrincipalResource
- if (not hasattr(resource, "record") or
- resource.record.recordType != WikiDirectoryService.recordType_wikis):
- returnValue(None)
+# if (not hasattr(resource, "record") or
+# resource.record.recordType != WikiDirectoryService.recordType_wikis):
+# returnValue(None)
- if hasattr(request, 'wikiACL'):
- returnValue(request.wikiACL)
+# if hasattr(request, 'wikiACL'):
+# returnValue(request.wikiACL)
- userID = "unauthenticated"
- wikiID = resource.record.shortNames[0]
+# userID = "unauthenticated"
+# wikiID = resource.record.shortNames[0]
- try:
- url = str(request.authnUser.children[0])
- principal = (yield request.locateResource(url))
- if isinstance(principal, DirectoryPrincipalResource):
- userID = principal.record.guid
- except:
- # TODO: better error handling
- pass
+# try:
+# url = str(request.authnUser.children[0])
+# principal = (yield request.locateResource(url))
+# if isinstance(principal, DirectoryPrincipalResource):
+# userID = principal.record.guid
+# except:
+# # TODO: better error handling
+# pass
- try:
- access = (yield getWikiAccess(userID, wikiID))
+# try:
+# access = (yield getWikiAccess(userID, wikiID))
- # The ACL we returns has ACEs for the end-user and the wiki principal
- # in case authzUser is the wiki principal.
- if access == "read":
- request.wikiACL = davxml.ACL(
- davxml.ACE(
- request.authnUser,
- davxml.Grant(
- davxml.Privilege(davxml.Read()),
- davxml.Privilege(davxml.ReadCurrentUserPrivilegeSet()),
+# # The ACL we returns has ACEs for the end-user and the wiki principal
+# # in case authzUser is the wiki principal.
+# if access == "read":
+# request.wikiACL = davxml.ACL(
+# davxml.ACE(
+# request.authnUser,
+# davxml.Grant(
+# davxml.Privilege(davxml.Read()),
+# davxml.Privilege(davxml.ReadCurrentUserPrivilegeSet()),
- # We allow write-properties so that direct sharees can change
- # e.g. calendar color properties
- davxml.Privilege(davxml.WriteProperties()),
- ),
- TwistedACLInheritable(),
- ),
- davxml.ACE(
- davxml.Principal(
- davxml.HRef.fromString("/principals/wikis/%s/" % (wikiID,))
- ),
- davxml.Grant(
- davxml.Privilege(davxml.Read()),
- davxml.Privilege(davxml.ReadCurrentUserPrivilegeSet()),
- ),
- TwistedACLInheritable(),
- )
- )
- returnValue(request.wikiACL)
+# # We allow write-properties so that direct sharees can change
+# # e.g. calendar color properties
+# davxml.Privilege(davxml.WriteProperties()),
+# ),
+# TwistedACLInheritable(),
+# ),
+# davxml.ACE(
+# davxml.Principal(
+# davxml.HRef.fromString("/principals/wikis/%s/" % (wikiID,))
+# ),
+# davxml.Grant(
+# davxml.Privilege(davxml.Read()),
+# davxml.Privilege(davxml.ReadCurrentUserPrivilegeSet()),
+# ),
+# TwistedACLInheritable(),
+# )
+# )
+# returnValue(request.wikiACL)
- elif access in ("write", "admin"):
- request.wikiACL = davxml.ACL(
- davxml.ACE(
- request.authnUser,
- davxml.Grant(
- davxml.Privilege(davxml.Read()),
- davxml.Privilege(davxml.ReadCurrentUserPrivilegeSet()),
- davxml.Privilege(davxml.Write()),
- ),
- TwistedACLInheritable(),
- ),
- davxml.ACE(
- davxml.Principal(
- davxml.HRef.fromString("/principals/wikis/%s/" % (wikiID,))
- ),
- davxml.Grant(
- davxml.Privilege(davxml.Read()),
- davxml.Privilege(davxml.ReadCurrentUserPrivilegeSet()),
- davxml.Privilege(davxml.Write()),
- ),
- TwistedACLInheritable(),
- )
- )
- returnValue(request.wikiACL)
+# elif access in ("write", "admin"):
+# request.wikiACL = davxml.ACL(
+# davxml.ACE(
+# request.authnUser,
+# davxml.Grant(
+# davxml.Privilege(davxml.Read()),
+# davxml.Privilege(davxml.ReadCurrentUserPrivilegeSet()),
+# davxml.Privilege(davxml.Write()),
+# ),
+# TwistedACLInheritable(),
+# ),
+# davxml.ACE(
+# davxml.Principal(
+# davxml.HRef.fromString("/principals/wikis/%s/" % (wikiID,))
+# ),
+# davxml.Grant(
+# davxml.Privilege(davxml.Read()),
+# davxml.Privilege(davxml.ReadCurrentUserPrivilegeSet()),
+# davxml.Privilege(davxml.Write()),
+# ),
+# TwistedACLInheritable(),
+# )
+# )
+# returnValue(request.wikiACL)
- else: # "no-access":
+# else: # "no-access":
- if userID == "unauthenticated":
- # Return a 401 so they have an opportunity to log in
- response = (yield UnauthorizedResponse.makeResponse(
- request.credentialFactories,
- request.remoteAddr,
- ))
- raise HTTPError(response)
+# if userID == "unauthenticated":
+# # Return a 401 so they have an opportunity to log in
+# response = (yield UnauthorizedResponse.makeResponse(
+# request.credentialFactories,
+# request.remoteAddr,
+# ))
+# raise HTTPError(response)
- raise HTTPError(
- StatusResponse(
- responsecode.FORBIDDEN,
- "You are not allowed to access this wiki"
- )
- )
+# raise HTTPError(
+# StatusResponse(
+# responsecode.FORBIDDEN,
+# "You are not allowed to access this wiki"
+# )
+# )
- except HTTPError:
- # pass through the HTTPError we might have raised above
- raise
+# except HTTPError:
+# # pass through the HTTPError we might have raised above
+# raise
- except Exception, e:
- log.error("Wiki ACL lookup failed: %s" % (e,))
- raise HTTPError(StatusResponse(responsecode.SERVICE_UNAVAILABLE, "Wiki ACL lookup failed"))
+# except Exception, e:
+# log.error("Wiki ACL lookup failed: %s" % (e,))
+# raise HTTPError(StatusResponse(responsecode.SERVICE_UNAVAILABLE, "Wiki ACL lookup failed"))
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directorybackedaddressbook.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directorybackedaddressbook.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/directorybackedaddressbook.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -112,16 +112,18 @@
# DAV:Read for all authenticated principals (does not include anonymous)
accessPrincipal = davxml.Authenticated()
- return davxml.ACL(
- davxml.ACE(
- davxml.Principal(accessPrincipal),
- davxml.Grant(
- davxml.Privilege(davxml.Read()),
- davxml.Privilege(davxml.ReadCurrentUserPrivilegeSet())
- ),
- davxml.Protected(),
- TwistedACLInheritable(),
- ),
+ return succeed(
+ davxml.ACL(
+ davxml.ACE(
+ davxml.Principal(accessPrincipal),
+ davxml.Grant(
+ davxml.Privilege(davxml.Read()),
+ davxml.Privilege(davxml.ReadCurrentUserPrivilegeSet())
+ ),
+ davxml.Protected(),
+ TwistedACLInheritable(),
+ ),
+ )
)
@@ -160,7 +162,7 @@
def accessControlList(self, request, inheritance=True, expanding=False, inherited_aces=None):
# Permissions here are fixed, and are not subject to inheritance rules, etc.
- return succeed(self.defaultAccessControlList())
+ return self.defaultAccessControlList()
@inlineCallbacks
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/extensions.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/extensions.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/extensions.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -66,8 +66,8 @@
from twistedcaldav.method.report import http_REPORT
from twistedcaldav.config import config
+from twext.who.expression import Operand, MatchType, MatchFlags
-
thisModule = getModule(__name__)
log = Logger()
@@ -95,7 +95,7 @@
msg = "Bad XML: unknown value for test attribute: %s" % (testMode,)
log.warn(msg)
raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, msg))
- operand = "and" if testMode == "allof" else "or"
+ operand = Operand.AND if testMode == "allof" else Operand.OR
# Are we narrowing results down to a single CUTYPE?
cuType = principal_property_search.attributes.get("type", None)
@@ -144,10 +144,18 @@
log.warn(msg)
raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, msg))
+ # Convert to twext.who.expression form
+ matchType = {
+ "starts-with": MatchType.startsWith,
+ "contains": MatchType.contains,
+ "equals": MatchType.equals
+ }.get(matchType)
+ matchFlags = MatchFlags.caseInsensitive if caseless else MatchFlags.none
+
# Ignore any query strings under three letters
- matchText = str(match)
+ matchText = match.toString() # gives us unicode
if len(matchText) >= 3:
- propertySearches.append((props.children, matchText, caseless, matchType))
+ propertySearches.append((props.children, matchText, matchFlags, matchType))
elif child.qname() == (calendarserver_namespace, "limit"):
try:
@@ -182,7 +190,7 @@
# See if we can take advantage of the directory
fields = []
nonDirectorySearches = []
- for props, match, caseless, matchType in propertySearches:
+ for props, match, matchFlags, matchType in propertySearches:
nonDirectoryProps = []
for prop in props:
try:
@@ -191,12 +199,12 @@
except ValueError, e:
raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, str(e)))
if fieldName:
- fields.append((fieldName, match, caseless, matchType))
+ fields.append((fieldName, match, matchFlags, matchType))
else:
nonDirectoryProps.append(prop)
if nonDirectoryProps:
nonDirectorySearches.append((nonDirectoryProps, match,
- caseless, matchType))
+ matchFlags, matchType))
matchingResources = []
matchcount = 0
@@ -208,7 +216,7 @@
operand=operand, cuType=cuType))
for record in records:
- resource = principalCollection.principalForRecord(record)
+ resource = yield principalCollection.principalForRecord(record)
if resource:
matchingResources.append(resource)
@@ -299,7 +307,7 @@
records = (yield dir.recordsMatchingTokens(tokens, context=context))
for record in records:
- resource = principalCollection.principalForRecord(record)
+ resource = yield principalCollection.principalForRecord(record)
if resource:
matchingResources.append(resource)
@@ -420,9 +428,9 @@
f.trap(HTTPError)
code = f.value.response.code
if code == responsecode.NOT_FOUND:
- log.error("Property %s was returned by listProperties() "
- "but does not exist for resource %s."
- % (name, self.resource))
+ log.error("Property {p} was returned by listProperties() "
+ "but does not exist for resource {r}.",
+ p=name, r=self.resource)
return (name, None)
if code == responsecode.UNAUTHORIZED:
return (name, accessDeniedValue)
@@ -721,7 +729,8 @@
elif name == "record-type":
if hasattr(self, "record"):
- returnValue(customxml.RecordType(self.record.recordType))
+ # MOVE2WHO -- need mapping
+ returnValue(customxml.RecordType(self.record.recordType.name + "s"))
else:
raise HTTPError(StatusResponse(
responsecode.NOT_FOUND,
@@ -848,7 +857,7 @@
):
# Permissions here are fixed, and are not subject to
# inheritance rules, etc.
- return succeed(self.defaultAccessControlList())
+ return self.defaultAccessControlList()
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/freebusyurl.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/freebusyurl.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/freebusyurl.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -102,7 +102,7 @@
davxml.Protected(),
),
)
- return davxml.ACL(*aces)
+ return succeed(davxml.ACL(*aces))
def resourceType(self):
@@ -243,7 +243,7 @@
# TODO: We should probably verify that the actual time-range is within sensible bounds (e.g. not too far in the past or future and not too long)
# Now lookup the principal details for the targeted user
- principal = self.parent.principalForRecord()
+ principal = (yield self.parent.principalForRecord())
# Pick the first mailto cu address or the first other type
cuaddr = None
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/ical.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/ical.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/ical.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -34,6 +34,7 @@
import itertools
import uuid
+from twisted.internet.defer import inlineCallbacks, returnValue
from twext.python.log import Logger
from txweb2.stream import IStream
from txweb2.dav.util import allDataFromStream
@@ -3239,6 +3240,7 @@
self.removeProperty(attachment)
+ @inlineCallbacks
def normalizeCalendarUserAddresses(self, lookupFunction, principalFunction,
toUUID=True):
"""
@@ -3259,7 +3261,7 @@
# Check that we can lookup this calendar user address - if not
# we cannot do anything with it
cuaddr = normalizeCUAddr(prop.value())
- name, guid, cuaddrs = lookupFunction(cuaddr, principalFunction, config)
+ name, guid, cuaddrs = yield lookupFunction(cuaddr, principalFunction, config)
if guid is None:
continue
@@ -3275,7 +3277,9 @@
if toUUID:
# Always re-write value to urn:uuid
- prop.setValue("urn:uuid:%s" % (guid,))
+ if isinstance(guid, uuid.UUID):
+ guid = unicode(guid).upper()
+ prop.setValue("urn:uuid:{guid}".format(guid=guid))
# If it is already a non-UUID address leave it be
elif cuaddr.startswith("urn:uuid:"):
@@ -3353,7 +3357,7 @@
# For VPOLL also do immediate children
if component.name() == "VPOLL":
- component.normalizeCalendarUserAddresses(lookupFunction, principalFunction, toUUID)
+ yield component.normalizeCalendarUserAddresses(lookupFunction, principalFunction, toUUID)
def allPerUserUIDs(self):
@@ -3563,15 +3567,16 @@
# Utilities
# #
+ at inlineCallbacks
def normalizeCUAddress(cuaddr, lookupFunction, principalFunction, toUUID=True):
# Check that we can lookup this calendar user address - if not
# we cannot do anything with it
- _ignore_name, guid, cuaddrs = lookupFunction(normalizeCUAddr(cuaddr), principalFunction, config)
+ _ignore_name, guid, cuaddrs = (yield lookupFunction(normalizeCUAddr(cuaddr), principalFunction, config))
if toUUID:
# Always re-write value to urn:uuid
if guid:
- return "urn:uuid:%s" % (guid,)
+ returnValue("urn:uuid:%s" % (guid,))
# If it is already a non-UUID address leave it be
elif cuaddr.startswith("urn:uuid:"):
@@ -3610,9 +3615,9 @@
# Make the change
if newaddr:
- return newaddr
+ returnValue(newaddr)
- return cuaddr
+ returnValue(cuaddr)
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/resource.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/resource.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/resource.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -863,7 +863,8 @@
home = self._newStoreObject.parentCollection().ownerHome()
else:
home = self._newStoreObject.ownerHome()
- returnValue(element.HRef(self.principalForUID(home.uid()).principalURL()))
+ principal = (yield self.principalForUID(home.uid()))
+ returnValue(element.HRef(principal.principalURL()))
else:
parent = (yield self.locateParent(request, request.urlForResource(self)))
if parent and isinstance(parent, CalDAVResource):
@@ -883,7 +884,7 @@
home = self._newStoreObject.parentCollection().ownerHome()
else:
home = self._newStoreObject.ownerHome()
- returnValue(self.principalForUID(home.uid()))
+ returnValue((yield self.principalForUID(home.uid())))
else:
parent = (yield self.locateParent(request, request.urlForResource(self)))
if parent and isinstance(parent, CalDAVResource):
@@ -933,8 +934,8 @@
return None
if 'record' in dir(self):
- if self.record.fullName:
- return self.record.fullName
+ if self.record.fullNames:
+ return self.record.fullNames[0]
elif self.record.shortNames:
return self.record.shortNames[0]
else:
@@ -1063,6 +1064,7 @@
returnValue(PerUserDataFilter(accessUID).filter(caldata))
+ # MOVE2WHO returns Deferred
def iCalendarAddressDoNormalization(self, ical):
"""
Normalize calendar user addresses in the supplied iCalendar object into their
@@ -1071,24 +1073,26 @@
@param ical: calendar object to normalize.
@type ical: L{Component}
"""
- ical.normalizeCalendarUserAddresses(normalizationLookup,
+ return ical.normalizeCalendarUserAddresses(normalizationLookup,
self.principalForCalendarUserAddress)
+ @inlineCallbacks
def principalForCalendarUserAddress(self, address):
for principalCollection in self.principalCollections():
- principal = principalCollection.principalForCalendarUserAddress(address)
+ principal = (yield principalCollection.principalForCalendarUserAddress(address))
if principal is not None:
- return principal
- return None
+ returnValue(principal)
+ returnValue(None)
+ @inlineCallbacks
def principalForUID(self, principalUID):
for principalCollection in self.principalCollections():
- principal = principalCollection.principalForUID(principalUID)
+ principal = (yield principalCollection.principalForUID(principalUID))
if principal is not None:
- return principal
- return None
+ returnValue(principal)
+ returnValue(None)
@inlineCallbacks
@@ -1874,7 +1878,7 @@
elif name == "auto-schedule-mode" and self.calendarsEnabled():
autoScheduleMode = self.getAutoScheduleMode()
- returnValue(customxml.AutoScheduleMode(autoScheduleMode if autoScheduleMode else "default"))
+ returnValue(customxml.AutoScheduleMode(autoScheduleMode.description if autoScheduleMode else "default"))
elif namespace == carddav_namespace and self.addressBooksEnabled():
if name == "addressbook-home-set":
@@ -2302,20 +2306,23 @@
# ACL
##
+ @inlineCallbacks
def owner(self, request):
- return succeed(element.HRef(self.principalForRecord().principalURL()))
+ principal = yield self.principalForRecord()
+ returnValue(element.HRef(principal.principalURL()))
def ownerPrincipal(self, request):
- return succeed(self.principalForRecord())
+ return self.principalForRecord()
def resourceOwnerPrincipal(self, request):
- return succeed(self.principalForRecord())
+ return self.principalForRecord()
+ @inlineCallbacks
def defaultAccessControlList(self):
- myPrincipal = self.principalForRecord()
+ myPrincipal = yield self.principalForRecord()
# Server may be read only
if config.EnableReadOnlyServer:
@@ -2342,12 +2349,12 @@
# Give all access to config.AdminPrincipals
aces += config.AdminACEs
- return element.ACL(*aces)
+ returnValue(element.ACL(*aces))
def accessControlList(self, request, inheritance=True, expanding=False, inherited_aces=None):
# Permissions here are fixed, and are not subject to inheritance rules, etc.
- return succeed(self.defaultAccessControlList())
+ return self.defaultAccessControlList()
def principalCollections(self):
@@ -2555,9 +2562,10 @@
return config.Sharing.Enabled and config.Sharing.Calendars.Enabled and self.exists()
+ @inlineCallbacks
def _otherPrincipalHomeURL(self, otherUID):
- ownerPrincipal = self.principalForUID(otherUID)
- return ownerPrincipal.calendarHomeURLs()[0]
+ ownerPrincipal = (yield self.principalForUID(otherUID))
+ returnValue(ownerPrincipal.calendarHomeURLs()[0])
@inlineCallbacks
@@ -2584,8 +2592,9 @@
return self._newStoreHome.hasCalendarResourceUIDSomewhereElse(uid, ok_object._newStoreObject, mode)
+ @inlineCallbacks
def defaultAccessControlList(self):
- myPrincipal = self.principalForRecord()
+ myPrincipal = yield self.principalForRecord()
# Server may be read only
if config.EnableReadOnlyServer:
@@ -2652,7 +2661,7 @@
),
)
- return element.ACL(*aces)
+ returnValue(element.ACL(*aces))
@inlineCallbacks
@@ -2808,9 +2817,10 @@
return config.Sharing.Enabled and config.Sharing.AddressBooks.Enabled and self.exists()
+ @inlineCallbacks
def _otherPrincipalHomeURL(self, otherUID):
- ownerPrincipal = self.principalForUID(otherUID)
- return ownerPrincipal.addressBookHomeURLs()[0]
+ ownerPrincipal = (yield self.principalForUID(otherUID))
+ returnValue(ownerPrincipal.addressBookHomeURLs()[0])
@inlineCallbacks
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/scheduling_store/caldav/resource.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/scheduling_store/caldav/resource.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/scheduling_store/caldav/resource.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -373,12 +373,14 @@
if config.Scheduling.CalDAV.OldDraftCompatibility:
privs += (davxml.Privilege(caldavxml.Schedule()),)
- return davxml.ACL(
- # CalDAV:schedule-deliver for any authenticated user
- davxml.ACE(
- davxml.Principal(davxml.Authenticated()),
- davxml.Grant(*privs),
- ),
+ return succeed(
+ davxml.ACL(
+ # CalDAV:schedule-deliver for any authenticated user
+ davxml.ACE(
+ davxml.Principal(davxml.Authenticated()),
+ davxml.Grant(*privs),
+ ),
+ )
)
@@ -532,9 +534,10 @@
return succeed(sendSchedulePrivilegeSet)
+ @inlineCallbacks
def defaultAccessControlList(self):
if config.EnableProxyPrincipals:
- myPrincipal = self.parent.principalForRecord()
+ myPrincipal = yield self.parent.principalForRecord()
privs = (
davxml.Privilege(caldavxml.ScheduleSend()),
@@ -542,16 +545,18 @@
if config.Scheduling.CalDAV.OldDraftCompatibility:
privs += (davxml.Privilege(caldavxml.Schedule()),)
- return davxml.ACL(
- # CalDAV:schedule for associated write proxies
- davxml.ACE(
- davxml.Principal(davxml.HRef(joinURL(myPrincipal.principalURL(), "calendar-proxy-write"))),
- davxml.Grant(*privs),
- davxml.Protected(),
- ),
+ returnValue(
+ davxml.ACL(
+ # CalDAV:schedule for associated write proxies
+ davxml.ACE(
+ davxml.Principal(davxml.HRef(joinURL(myPrincipal.principalURL(), "calendar-proxy-write"))),
+ davxml.Grant(*privs),
+ davxml.Protected(),
+ ),
+ )
)
else:
- return super(ScheduleOutboxResource, self).defaultAccessControlList()
+ returnValue(super(ScheduleOutboxResource, self).defaultAccessControlList())
def report_urn_ietf_params_xml_ns_caldav_calendar_query(self, request, calendar_query):
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/sharing.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/sharing.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/sharing.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -792,7 +792,7 @@
Set shared state and check access control.
"""
if child._newStoreObject is not None and not child._newStoreObject.owned():
- ownerHomeURL = self._otherPrincipalHomeURL(child._newStoreObject.ownerHome().uid())
+ ownerHomeURL = (yield self._otherPrincipalHomeURL(child._newStoreObject.ownerHome().uid()))
ownerView = yield child._newStoreObject.ownerView()
child.setShare(joinURL(ownerHomeURL, ownerView.name()))
access = yield child._checkAccessControl()
@@ -802,6 +802,7 @@
def _otherPrincipalHomeURL(self, otherUID):
+ # Is this only meant to be overridden?
pass
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/stdconfig.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/stdconfig.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/stdconfig.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -374,6 +374,7 @@
# users, groups, locations and resources) to the server.
#
"DirectoryService": {
+ "Enabled": True,
"type": "twistedcaldav.directory.xmlfile.XMLDirectoryService",
"params": DEFAULT_SERVICE_PARAMS["twistedcaldav.directory.xmlfile.XMLDirectoryService"],
},
@@ -385,7 +386,7 @@
# and resources.
#
"ResourceService": {
- "Enabled" : True,
+ "Enabled": True,
"type": "twistedcaldav.directory.xmlfile.XMLDirectoryService",
"params": DEFAULT_RESOURCE_PARAMS["twistedcaldav.directory.xmlfile.XMLDirectoryService"],
},
@@ -1016,8 +1017,6 @@
"Enabled": True,
"MemcachedPool" : "Default",
"UpdateSeconds" : 300,
- "ExpireSeconds" : 86400,
- "LockSeconds" : 600,
"EnableUpdater" : True,
"UseExternalProxies" : False,
},
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/storebridge.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/storebridge.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/storebridge.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1763,11 +1763,12 @@
return succeed(davPrivilegeSet)
+ @inlineCallbacks
def defaultAccessControlList(self):
"""
Only read privileges allowed for managed attachments.
"""
- myPrincipal = self.parent.principalForRecord()
+ myPrincipal = yield self.parent.principalForRecord()
read_privs = (
davxml.Privilege(davxml.Read()),
@@ -1808,12 +1809,12 @@
),
)
- return davxml.ACL(*aces)
+ returnValue(davxml.ACL(*aces))
def accessControlList(self, request, inheritance=True, expanding=False, inherited_aces=None):
# Permissions here are fixed, and are not subject to inheritance rules, etc.
- return succeed(self.defaultAccessControlList())
+ return self.defaultAccessControlList()
@@ -1988,7 +1989,7 @@
# Access level comes from what the wiki has granted to the
# sharee
sharee = self.principalForUID(shareeUID)
- userID = sharee.record.guid
+ userID = sharee.record.uid
wikiID = owner.record.shortNames[0]
access = (yield getWikiAccess(userID, wikiID))
if access == "read":
@@ -2865,7 +2866,7 @@
principalURL = str(authz_principal)
if principalURL:
authz = (yield request.locateResource(principalURL))
- self._parentResource._newStoreObject._txn._authz_uid = authz.record.guid
+ self._parentResource._newStoreObject._txn._authz_uid = authz.record.uid
try:
response = (yield self.storeComponent(component, smart_merge=schedule_tag_match))
@@ -3586,7 +3587,7 @@
principalURL = str(authz_principal)
if principalURL:
authz = (yield request.locateResource(principalURL))
- self._parentResource._newStoreObject._txn._authz_uid = authz.record.guid
+ self._parentResource._newStoreObject._txn._authz_uid = authz.record.uid
try:
response = (yield self.storeComponent(component))
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_addressbookmultiget.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_addressbookmultiget.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_addressbookmultiget.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -31,7 +31,10 @@
from twisted.internet.defer import inlineCallbacks, returnValue
from txdav.xml import element as davxml
+from twext.who.idirectory import RecordType
+
+
class AddressBookMultiget (StoreTestCase):
"""
addressbook-multiget REPORT
@@ -39,6 +42,13 @@
data_dir = os.path.join(os.path.dirname(__file__), "data")
vcards_dir = os.path.join(data_dir, "vCards")
+
+ @inlineCallbacks
+ def setUp(self):
+ yield StoreTestCase.setUp(self)
+ self.authRecord = yield self.directory.recordWithShortName(RecordType.user, u"wsanchez")
+
+
def test_multiget_some_vcards(self):
"""
All vcards.
@@ -207,7 +217,7 @@
</D:set>
</D:mkcol>
"""
- response = yield self.send(SimpleStoreRequest(self, "MKCOL", addressbook_uri, content=mkcol, authid="wsanchez"))
+ response = yield self.send(SimpleStoreRequest(self, "MKCOL", addressbook_uri, content=mkcol, authRecord=self.authRecord))
response = IResponse(response)
@@ -221,7 +231,7 @@
"PUT",
joinURL(addressbook_uri, filename + ".vcf"),
headers=Headers({"content-type": MimeType.fromString("text/vcard")}),
- authid="wsanchez"
+ authRecord=self.authRecord
)
request.stream = MemoryStream(icaldata)
yield self.send(request)
@@ -235,12 +245,12 @@
"PUT",
joinURL(addressbook_uri, child.basename()),
headers=Headers({"content-type": MimeType.fromString("text/vcard")}),
- authid="wsanchez"
+ authRecord=self.authRecord
)
request.stream = MemoryStream(child.getContent())
yield self.send(request)
- request = SimpleStoreRequest(self, "REPORT", addressbook_uri, authid="wsanchez")
+ request = SimpleStoreRequest(self, "REPORT", addressbook_uri, authRecord=self.authRecord)
request.stream = MemoryStream(query.toxml())
response = yield self.send(request)
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_addressbookquery.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_addressbookquery.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_addressbookquery.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -27,7 +27,10 @@
from twistedcaldav.test.util import StoreTestCase, SimpleStoreRequest
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.python.filepath import FilePath
+from twext.who.idirectory import RecordType
+
+
class AddressBookQuery(StoreTestCase):
"""
addressbook-query REPORT
@@ -67,6 +70,7 @@
oldValue = config.MaxQueryWithDataResults
config.MaxQueryWithDataResults = 1
+
def _restoreValueOK(f):
config.MaxQueryWithDataResults = oldValue
return None
@@ -89,6 +93,7 @@
oldValue = config.MaxQueryWithDataResults
config.MaxQueryWithDataResults = 1
+
def _restoreValueOK(f):
config.MaxQueryWithDataResults = oldValue
return None
@@ -191,15 +196,16 @@
if response.code != responsecode.CREATED:
self.fail("MKCOL failed: %s" % (response.code,))
'''
+ authRecord = yield self.directory.recordWithShortName(RecordType.user, u"wsanchez")
# Add vCards to addressbook
for child in FilePath(self.vcards_dir).children():
if os.path.splitext(child.basename())[1] != ".vcf":
continue
- request = SimpleStoreRequest(self, "PUT", joinURL(addressbook_uri, child.basename()), authid="wsanchez")
+ request = SimpleStoreRequest(self, "PUT", joinURL(addressbook_uri, child.basename()), authRecord=authRecord)
request.stream = MemoryStream(child.getContent())
yield self.send(request)
- request = SimpleStoreRequest(self, "REPORT", addressbook_uri, authid="wsanchez")
+ request = SimpleStoreRequest(self, "REPORT", addressbook_uri, authRecord=authRecord)
request.stream = MemoryStream(query.toxml())
response = yield self.send(request)
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_calendarquery.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_calendarquery.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_calendarquery.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -34,8 +34,8 @@
from pycalendar.datetime import DateTime
from twistedcaldav.ical import Component
from txdav.caldav.icalendarstore import ComponentUpdateState
-from twistedcaldav.directory.directory import DirectoryService
from txdav.caldav.datastore.query.filter import TimeRange
+from twext.who.idirectory import RecordType
@inlineCallbacks
@@ -79,7 +79,7 @@
"""
Put the contents of the Holidays directory into the store.
"""
- record = self.directory.recordWithShortName(DirectoryService.recordType_users, "wsanchez")
+ record = yield self.directory.recordWithShortName(RecordType.user, u"wsanchez")
yield self.transactionUnderTest().calendarHomeWithUID(record.uid, create=True)
calendar = yield self.calendarUnderTest(name="calendar", home=record.uid)
for f in os.listdir(self.holidays_dir):
@@ -248,6 +248,7 @@
"""
self.patch(config, "MaxQueryWithDataResults", 1)
+
def _restoreValueOK(f):
self.fail("REPORT must fail with 403")
@@ -268,6 +269,7 @@
"""
self.patch(config, "MaxQueryWithDataResults", 1)
+
def _restoreValueError(f):
self.fail("REPORT must not fail with 403")
@@ -343,7 +345,8 @@
@inlineCallbacks
def calendar_query(self, query, got_xml):
- request = SimpleStoreRequest(self, "REPORT", "/calendars/users/wsanchez/calendar/", authid="wsanchez")
+ authRecord = yield self.directory.recordWithShortName(RecordType.user, u"wsanchez")
+ request = SimpleStoreRequest(self, "REPORT", "/calendars/users/wsanchez/calendar/", authRecord=authRecord)
request.stream = MemoryStream(query.toxml())
response = yield self.send(request)
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_collectioncontents.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_collectioncontents.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_collectioncontents.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -14,22 +14,22 @@
# limitations under the License.
##
-from twisted.internet.defer import inlineCallbacks
from twext.python.filepath import CachingFilePath as FilePath
-from txweb2 import responsecode
-from txweb2.iweb import IResponse
-from txweb2.stream import MemoryStream, FileStream
-from txweb2.http_headers import MimeType
-
+from twext.who.idirectory import RecordType
+from twisted.internet.defer import inlineCallbacks
from twistedcaldav.ical import Component
from twistedcaldav.memcachelock import MemcacheLock
from twistedcaldav.memcacher import Memcacher
-
-
from twistedcaldav.test.util import StoreTestCase, SimpleStoreRequest
-from txweb2.dav.util import joinURL
from txdav.caldav.datastore.sql import CalendarObject
+from txweb2 import responsecode
+from txweb2.dav.util import joinURL
+from txweb2.http_headers import MimeType
+from txweb2.iweb import IResponse
+from txweb2.stream import MemoryStream, FileStream
+
+
class CollectionContents(StoreTestCase):
"""
PUT request
@@ -52,7 +52,7 @@
def _fakeDoImplicitScheduling(self, component, inserting, internal_state):
return False, None, False, None
- self.patch(CalendarObject , "doImplicitScheduling",
+ self.patch(CalendarObject, "doImplicitScheduling",
_fakeDoImplicitScheduling)
# Tests in this suite assume that the root resource is a calendar home.
@@ -61,31 +61,27 @@
return super(CollectionContents, self).setUp()
+ @inlineCallbacks
def test_collection_in_calendar(self):
"""
Make (regular) collection in calendar
"""
calendar_uri = "/calendars/users/wsanchez/collection_in_calendar/"
- def mkcalendar_cb(response):
- response = IResponse(response)
-
- if response.code != responsecode.CREATED:
- self.fail("MKCALENDAR failed: %s" % (response.code,))
-
- def mkcol_cb(response):
- response = IResponse(response)
-
- if response.code != responsecode.FORBIDDEN:
- self.fail("Incorrect response to nested MKCOL: %s" % (response.code,))
-
+ authRecord = yield self.directory.recordWithShortName(RecordType.user, u"wsanchez")
+ request = SimpleStoreRequest(self, "MKCALENDAR", calendar_uri, authRecord=authRecord)
+ response = yield self.send(request)
+ response = IResponse(response)
+ if response.code != responsecode.CREATED:
+ self.fail("MKCALENDAR failed: %s" % (response.code,))
nested_uri = joinURL(calendar_uri, "nested")
- request = SimpleStoreRequest(self, "MKCOL", nested_uri, authid="wsanchez")
- return self.send(request, mkcol_cb)
+ request = SimpleStoreRequest(self, "MKCOL", nested_uri, authRecord=authRecord)
+ response = yield self.send(request)
+ response = IResponse(response)
- request = SimpleStoreRequest(self, "MKCALENDAR", calendar_uri, authid="wsanchez")
- return self.send(request, mkcalendar_cb)
+ if response.code != responsecode.FORBIDDEN:
+ self.fail("Incorrect response to nested MKCOL: %s" % (response.code,))
def test_bogus_file(self):
@@ -163,6 +159,7 @@
)
+ @inlineCallbacks
def _test_file_in_calendar(self, what, *work):
"""
Creates a calendar collection, then PUTs a resource into that collection
@@ -171,68 +168,58 @@
"""
calendar_uri = "/calendars/users/wsanchez/testing_calendar/"
+ authRecord = yield self.directory.recordWithShortName(RecordType.user, u"wsanchez")
+ request = SimpleStoreRequest(self, "MKCALENDAR", calendar_uri, authRecord=authRecord)
+ response = yield self.send(request)
+ response = IResponse(response)
+ if response.code != responsecode.CREATED:
+ self.fail("MKCALENDAR failed: %s" % (response.code,))
- @inlineCallbacks
- def mkcalendar_cb(response):
+ c = 0
+ for stream, response_code in work:
+ dst_uri = joinURL(calendar_uri, "dst%d.ics" % (c,))
+ request = SimpleStoreRequest(self, "PUT", dst_uri, authRecord=authRecord)
+ request.headers.setHeader("if-none-match", "*")
+ request.headers.setHeader("content-type", MimeType("text", "calendar"))
+ request.stream = stream
+ response = yield self.send(request)
response = IResponse(response)
- if response.code != responsecode.CREATED:
- self.fail("MKCALENDAR failed: %s" % (response.code,))
+ if response.code != response_code:
+ self.fail("Incorrect response to %s: %s (!= %s)" % (what, response.code, response_code))
- c = 0
+ c += 1
- for stream, response_code in work:
- dst_uri = joinURL(calendar_uri, "dst%d.ics" % (c,))
- request = SimpleStoreRequest(self, "PUT", dst_uri, authid="wsanchez")
- request.headers.setHeader("if-none-match", "*")
- request.headers.setHeader("content-type", MimeType("text", "calendar"))
- request.stream = stream
- response = yield self.send(request)
- response = IResponse(response)
- if response.code != response_code:
- self.fail("Incorrect response to %s: %s (!= %s)" % (what, response.code, response_code))
-
- c += 1
-
- request = SimpleStoreRequest(self, "MKCALENDAR", calendar_uri, authid="wsanchez")
- return self.send(request, mkcalendar_cb)
-
-
+ @inlineCallbacks
def test_fail_dot_file_put_in_calendar(self):
"""
Make (regular) collection in calendar
"""
calendar_uri = "/calendars/users/wsanchez/dot_file_in_calendar/"
+ authRecord = yield self.directory.recordWithShortName(RecordType.user, u"wsanchez")
+ request = SimpleStoreRequest(self, "MKCALENDAR", calendar_uri, authRecord=authRecord)
+ response = yield self.send(request)
+ response = IResponse(response)
+ if response.code != responsecode.CREATED:
+ self.fail("MKCALENDAR failed: %s" % (response.code,))
- def mkcalendar_cb(response):
- response = IResponse(response)
+ stream = self.dataPath.child(
+ "Holidays").child(
+ "C318AA54-1ED0-11D9-A5E0-000A958A3252.ics"
+ ).open()
+ try:
+ calendar = str(Component.fromStream(stream))
+ finally:
+ stream.close()
- if response.code != responsecode.CREATED:
- self.fail("MKCALENDAR failed: %s" % (response.code,))
+ event_uri = "/".join([calendar_uri, ".event.ics"])
- def put_cb(response):
- response = IResponse(response)
-
- if response.code != responsecode.FORBIDDEN:
- self.fail("Incorrect response to dot file PUT: %s" % (response.code,))
-
- stream = self.dataPath.child(
- "Holidays").child(
- "C318AA54-1ED0-11D9-A5E0-000A958A3252.ics"
- ).open()
- try:
- calendar = str(Component.fromStream(stream))
- finally:
- stream.close()
-
- event_uri = "/".join([calendar_uri, ".event.ics"])
-
- request = SimpleStoreRequest(self, "PUT", event_uri, authid="wsanchez")
- request.headers.setHeader("content-type", MimeType("text", "calendar"))
- request.stream = MemoryStream(calendar)
- return self.send(request, put_cb)
-
- request = SimpleStoreRequest(self, "MKCALENDAR", calendar_uri, authid="wsanchez")
- return self.send(request, mkcalendar_cb)
+ request = SimpleStoreRequest(self, "PUT", event_uri, authRecord=authRecord)
+ request.headers.setHeader("content-type", MimeType("text", "calendar"))
+ request.stream = MemoryStream(calendar)
+ response = yield self.send(request)
+ response = IResponse(response)
+ if response.code != responsecode.FORBIDDEN:
+ self.fail("Incorrect response to dot file PUT: %s" % (response.code,))
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_icalendar.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_icalendar.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_icalendar.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -19,6 +19,7 @@
import itertools
from twisted.trial.unittest import SkipTest
+from twisted.internet.defer import inlineCallbacks, succeed
from twistedcaldav.ical import Component, Property, InvalidICalendarDataError, \
normalizeCUAddress, normalize_iCalStr
@@ -32,6 +33,8 @@
from twistedcaldav.dateops import normalizeForExpand
from pycalendar.value import Value
+
+
class iCalendar (twistedcaldav.test.util.TestCase):
"""
iCalendar support tests
@@ -7497,6 +7500,7 @@
self.assertEquals(expected, ical.hasInstancesAfter(cutoff))
+ @inlineCallbacks
def test_normalizeCalendarUserAddressesFromUUID(self):
"""
Ensure mailto is preferred, followed by path form, then http form.
@@ -7520,25 +7524,27 @@
def lookupFunction(cuaddr, ignored1, ignored2):
- return {
- "urn:uuid:foo" : (
- "Foo",
- "foo",
- ("urn:uuid:foo", "http://example.com/foo", "/foo")
- ),
- "urn:uuid:bar" : (
- "Bar",
- "bar",
- ("urn:uuid:bar", "mailto:bar at example.com", "http://example.com/bar", "/bar")
- ),
- "urn:uuid:baz" : (
- "Baz",
- "baz",
- ("urn:uuid:baz", "http://example.com/baz")
- ),
- }[cuaddr]
+ return succeed(
+ {
+ "urn:uuid:foo" : (
+ "Foo",
+ "foo",
+ ("urn:uuid:foo", "http://example.com/foo", "/foo")
+ ),
+ "urn:uuid:bar" : (
+ "Bar",
+ "bar",
+ ("urn:uuid:bar", "mailto:bar at example.com", "http://example.com/bar", "/bar")
+ ),
+ "urn:uuid:baz" : (
+ "Baz",
+ "baz",
+ ("urn:uuid:baz", "http://example.com/baz")
+ ),
+ }[cuaddr]
+ )
- component.normalizeCalendarUserAddresses(lookupFunction, None, toUUID=False)
+ yield component.normalizeCalendarUserAddresses(lookupFunction, None, toUUID=False)
self.assertEquals("mailto:bar at example.com",
component.getAttendeeProperty(("mailto:bar at example.com",)).value())
@@ -7548,6 +7554,7 @@
component.getAttendeeProperty(("http://example.com/baz",)).value())
+ @inlineCallbacks
def test_normalizeCalendarUserAddressesAndLocationChange(self):
"""
Ensure http(s) and /path CUA values are tucked away into the property
@@ -7573,25 +7580,27 @@
def lookupFunction(cuaddr, ignored1, ignored2):
- return {
- "/principals/users/foo" : (
- "Foo",
- "foo",
- ("urn:uuid:foo",)
- ),
- "http://example.com/principals/users/bar" : (
- "Bar",
- "bar",
- ("urn:uuid:bar",)
- ),
- "http://example.com/principals/locations/buzz" : (
- "{Restricted} Buzz",
- "buzz",
- ("urn:uuid:buzz",)
- ),
- }[cuaddr]
+ return succeed(
+ {
+ "/principals/users/foo" : (
+ "Foo",
+ "foo",
+ ("urn:uuid:foo",)
+ ),
+ "http://example.com/principals/users/bar" : (
+ "Bar",
+ "bar",
+ ("urn:uuid:bar",)
+ ),
+ "http://example.com/principals/locations/buzz" : (
+ "{Restricted} Buzz",
+ "buzz",
+ ("urn:uuid:buzz",)
+ ),
+ }[cuaddr]
+ )
- component.normalizeCalendarUserAddresses(lookupFunction, None, toUUID=True)
+ yield component.normalizeCalendarUserAddresses(lookupFunction, None, toUUID=True)
# Location value changed
prop = component.mainComponent().getProperty("LOCATION")
@@ -7601,6 +7610,7 @@
self.assertEquals(prop.parameterValue("CN"), "{Restricted} Buzz")
+ @inlineCallbacks
def test_normalizeCalendarUserAddressesAndLocationNoChange(self):
"""
Ensure http(s) and /path CUA values are tucked away into the property
@@ -7626,25 +7636,27 @@
def lookupFunction(cuaddr, ignored1, ignored2):
- return {
- "/principals/users/foo" : (
- "Foo",
- "foo",
- ("urn:uuid:foo",)
- ),
- "http://example.com/principals/users/bar" : (
- "Bar",
- "bar",
- ("urn:uuid:bar",)
- ),
- "http://example.com/principals/locations/buzz" : (
- "{Restricted} Buzz",
- "buzz",
- ("urn:uuid:buzz",)
- ),
- }[cuaddr]
+ return succeed(
+ {
+ "/principals/users/foo" : (
+ "Foo",
+ "foo",
+ ("urn:uuid:foo",)
+ ),
+ "http://example.com/principals/users/bar" : (
+ "Bar",
+ "bar",
+ ("urn:uuid:bar",)
+ ),
+ "http://example.com/principals/locations/buzz" : (
+ "{Restricted} Buzz",
+ "buzz",
+ ("urn:uuid:buzz",)
+ ),
+ }[cuaddr]
+ )
- component.normalizeCalendarUserAddresses(lookupFunction, None, toUUID=True)
+ yield component.normalizeCalendarUserAddresses(lookupFunction, None, toUUID=True)
# Location value changed
prop = component.mainComponent().getProperty("LOCATION")
@@ -7654,6 +7666,7 @@
self.assertEquals(prop.parameterValue("CN"), "{Restricted} Buzz")
+ @inlineCallbacks
def test_normalizeCalendarUserAddressesAndLocationNoChangeOtherCUType(self):
"""
Ensure http(s) and /path CUA values are tucked away into the property
@@ -7679,25 +7692,27 @@
def lookupFunction(cuaddr, ignored1, ignored2):
- return {
- "/principals/users/foo" : (
- "Foo",
- "foo",
- ("urn:uuid:foo",)
- ),
- "http://example.com/principals/users/bar" : (
- "Bar",
- "bar",
- ("urn:uuid:bar",)
- ),
- "http://example.com/principals/locations/buzz" : (
- "{Restricted} Buzz",
- "buzz",
- ("urn:uuid:buzz",)
- ),
- }[cuaddr]
+ return succeed(
+ {
+ "/principals/users/foo" : (
+ "Foo",
+ "foo",
+ ("urn:uuid:foo",)
+ ),
+ "http://example.com/principals/users/bar" : (
+ "Bar",
+ "bar",
+ ("urn:uuid:bar",)
+ ),
+ "http://example.com/principals/locations/buzz" : (
+ "{Restricted} Buzz",
+ "buzz",
+ ("urn:uuid:buzz",)
+ ),
+ }[cuaddr]
+ )
- component.normalizeCalendarUserAddresses(lookupFunction, None, toUUID=True)
+ yield component.normalizeCalendarUserAddresses(lookupFunction, None, toUUID=True)
# Location value changed
prop = component.mainComponent().getProperty("LOCATION")
@@ -8404,6 +8419,7 @@
self.assertEqual(changed, result_changed)
+ @inlineCallbacks
def test_normalizeCUAddressFromUUID(self):
"""
Ensure mailto is preferred, followed by path form, then http form.
@@ -8418,34 +8434,37 @@
)
def lookupFunction(cuaddr, ignored1, ignored2):
- return {
- "urn:uuid:foo" : (
- "Foo",
- "foo",
- ("urn:uuid:foo", "http://example.com/foo", "/foo")
- ),
- "urn:uuid:bar" : (
- "Bar",
- "bar",
- ("urn:uuid:bar", "mailto:bar at example.com", "http://example.com/bar", "/bar")
- ),
- "urn:uuid:baz" : (
- "Baz",
- "baz",
- ("urn:uuid:baz", "http://example.com/baz")
- ),
- "urn:uuid:buz" : (
- "Buz",
- "buz",
- ("urn:uuid:buz",)
- ),
- }[cuaddr]
+ return succeed(
+ {
+ "urn:uuid:foo" : (
+ "Foo",
+ "foo",
+ ("urn:uuid:foo", "http://example.com/foo", "/foo")
+ ),
+ "urn:uuid:bar" : (
+ "Bar",
+ "bar",
+ ("urn:uuid:bar", "mailto:bar at example.com", "http://example.com/bar", "/bar")
+ ),
+ "urn:uuid:baz" : (
+ "Baz",
+ "baz",
+ ("urn:uuid:baz", "http://example.com/baz")
+ ),
+ "urn:uuid:buz" : (
+ "Buz",
+ "buz",
+ ("urn:uuid:buz",)
+ ),
+ }[cuaddr]
+ )
for cuaddr, result in data:
- new_cuaddr = normalizeCUAddress(cuaddr, lookupFunction, None, toUUID=False)
+ new_cuaddr = yield normalizeCUAddress(cuaddr, lookupFunction, None, toUUID=False)
self.assertEquals(new_cuaddr, result)
+ @inlineCallbacks
def test_normalizeCUAddressToUUID(self):
"""
Ensure http(s) and /path CUA values are tucked away into the property
@@ -8459,21 +8478,23 @@
def lookupFunction(cuaddr, ignored1, ignored2):
- return {
- "/principals/users/foo" : (
- "Foo",
- "foo",
- ("urn:uuid:foo",)
- ),
- "http://example.com/principals/users/buz" : (
- "Buz",
- "buz",
- ("urn:uuid:buz",)
- ),
- }[cuaddr]
+ return succeed(
+ {
+ "/principals/users/foo" : (
+ "Foo",
+ "foo",
+ ("urn:uuid:foo",)
+ ),
+ "http://example.com/principals/users/buz" : (
+ "Buz",
+ "buz",
+ ("urn:uuid:buz",)
+ ),
+ }[cuaddr]
+ )
for cuaddr, result in data:
- new_cuaddr = normalizeCUAddress(cuaddr, lookupFunction, None, toUUID=True)
+ new_cuaddr = yield normalizeCUAddress(cuaddr, lookupFunction, None, toUUID=True)
self.assertEquals(new_cuaddr, result)
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_mkcalendar.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_mkcalendar.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_mkcalendar.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -27,6 +27,10 @@
from twistedcaldav import caldavxml
from twistedcaldav.test.util import StoreTestCase, SimpleStoreRequest
+from twext.who.idirectory import RecordType
+
+
+
class MKCALENDAR (StoreTestCase):
"""
MKCALENDAR request
@@ -35,6 +39,12 @@
# Try nesting calendars (should fail)
# HEAD request on calendar: resourcetype = (collection, calendar)
+ @inlineCallbacks
+ def setUp(self):
+ yield StoreTestCase.setUp(self)
+ self.authRecord = yield self.directory.recordWithShortName(RecordType.user, u"user01")
+
+
def test_make_calendar(self):
"""
Make calendar
@@ -45,7 +55,7 @@
if os.path.exists(path):
rmdir(path)
- request = SimpleStoreRequest(self, "MKCALENDAR", uri, authid="user01")
+ request = SimpleStoreRequest(self, "MKCALENDAR", uri, authRecord=self.authRecord)
@inlineCallbacks
def do_test(response):
@@ -146,7 +156,7 @@
)
)
- request = SimpleStoreRequest(self, "MKCALENDAR", uri, authid="user01")
+ request = SimpleStoreRequest(self, "MKCALENDAR", uri, authRecord=self.authRecord)
request.stream = MemoryStream(mk.toxml())
return self.send(request, do_test)
@@ -165,7 +175,7 @@
# FIXME: Check for DAV:resource-must-be-null element
- request = SimpleStoreRequest(self, "MKCALENDAR", uri, authid="user01")
+ request = SimpleStoreRequest(self, "MKCALENDAR", uri, authRecord=self.authRecord)
return self.send(request, do_test)
@@ -190,8 +200,8 @@
nested_uri = os.path.join(first_uri, "nested")
- request = SimpleStoreRequest(self, "MKCALENDAR", nested_uri, authid="user01")
+ request = SimpleStoreRequest(self, "MKCALENDAR", nested_uri, authRecord=self.authRecord)
yield self.send(request, do_test)
- request = SimpleStoreRequest(self, "MKCALENDAR", first_uri, authid="user01")
+ request = SimpleStoreRequest(self, "MKCALENDAR", first_uri, authRecord=self.authRecord)
return self.send(request, next)
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_multiget.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_multiget.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_multiget.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -14,6 +14,7 @@
##
from twext.python.filepath import CachingFilePath as FilePath
+from twext.who.idirectory import RecordType
from txweb2 import responsecode
from txweb2.dav.util import davXMLFromStream, joinURL
from txweb2.http_headers import Headers, MimeType
@@ -38,6 +39,12 @@
data_dir = os.path.join(os.path.dirname(__file__), "data")
holidays_dir = os.path.join(data_dir, "Holidays")
+ @inlineCallbacks
+ def setUp(self):
+ yield StoreTestCase.setUp(self)
+ self.authRecord = yield self.directory.recordWithShortName(RecordType.user, u"wsanchez")
+
+
def test_multiget_some_events(self):
"""
All events.
@@ -262,7 +269,7 @@
def calendar_query(self, calendar_uri, query, got_xml, data, no_init):
if not no_init:
- response = yield self.send(SimpleStoreRequest(self, "MKCALENDAR", calendar_uri, authid="wsanchez"))
+ response = yield self.send(SimpleStoreRequest(self, "MKCALENDAR", calendar_uri, authRecord=self.authRecord))
response = IResponse(response)
if response.code != responsecode.CREATED:
self.fail("MKCALENDAR failed: %s" % (response.code,))
@@ -274,7 +281,7 @@
"PUT",
joinURL(calendar_uri, filename + ".ics"),
headers=Headers({"content-type": MimeType.fromString("text/calendar")}),
- authid="wsanchez"
+ authRecord=self.authRecord
)
request.stream = MemoryStream(icaldata)
yield self.send(request)
@@ -288,12 +295,12 @@
"PUT",
joinURL(calendar_uri, child.basename()),
headers=Headers({"content-type": MimeType.fromString("text/calendar")}),
- authid="wsanchez"
+ authRecord=self.authRecord
)
request.stream = MemoryStream(child.getContent())
yield self.send(request)
- request = SimpleStoreRequest(self, "REPORT", calendar_uri, authid="wsanchez")
+ request = SimpleStoreRequest(self, "REPORT", calendar_uri, authRecord=self.authRecord)
request.stream = MemoryStream(query.toxml())
response = yield self.send(request)
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_props.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_props.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_props.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -19,21 +19,35 @@
from txweb2.iweb import IResponse
from txweb2.stream import MemoryStream
+from twisted.internet.defer import inlineCallbacks
+
from twistedcaldav import caldavxml
from twistedcaldav.test.util import StoreTestCase, SimpleStoreRequest
from txdav.xml import element as davxml
+from twext.who.idirectory import RecordType
+
+
+
class Properties(StoreTestCase):
"""
CalDAV properties
"""
+
+ @inlineCallbacks
+ def setUp(self):
+ yield StoreTestCase.setUp(self)
+ self.authRecord = yield self.directory.recordWithShortName(RecordType.user, u"user01")
+
+
def test_live_props(self):
"""
Live CalDAV properties
"""
calendar_uri = "/calendars/users/user01/test/"
+
def mkcalendar_cb(response):
response = IResponse(response)
@@ -123,24 +137,24 @@
return davXMLFromStream(response.stream).addCallback(got_xml)
query = davxml.PropertyFind(
- davxml.PropertyContainer(
- caldavxml.SupportedCalendarData(),
- caldavxml.SupportedCalendarComponentSet(),
- davxml.SupportedReportSet(),
- ),
- )
+ davxml.PropertyContainer(
+ caldavxml.SupportedCalendarData(),
+ caldavxml.SupportedCalendarComponentSet(),
+ davxml.SupportedReportSet(),
+ ),
+ )
request = SimpleStoreRequest(
self,
"PROPFIND",
calendar_uri,
headers=http_headers.Headers({"Depth": "0"}),
- authid="user01",
+ authRecord=self.authRecord,
)
request.stream = MemoryStream(query.toxml())
return self.send(request, propfind_cb)
- request = SimpleStoreRequest(self, "MKCALENDAR", calendar_uri, authid="user01")
+ request = SimpleStoreRequest(self, "MKCALENDAR", calendar_uri, authRecord=self.authRecord)
return self.send(request, mkcalendar_cb)
@@ -207,10 +221,10 @@
"PROPFIND",
calendar_uri,
headers=http_headers.Headers({"Depth": "0"}),
- authid="user01",
+ authRecord=self.authRecord,
)
request.stream = MemoryStream(query.toxml())
return self.send(request, propfind_cb)
- request = SimpleStoreRequest(self, "MKCALENDAR", calendar_uri, authid="user01")
+ request = SimpleStoreRequest(self, "MKCALENDAR", calendar_uri, authRecord=self.authRecord)
return self.send(request, mkcalendar_cb)
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_resource.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_resource.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_resource.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -14,22 +14,25 @@
# limitations under the License.
##
+from twext.who.idirectory import RecordType
+from twisted.internet.defer import inlineCallbacks
+from twistedcaldav import carddavxml
+from twistedcaldav.config import config
+from twistedcaldav.notifications import NotificationCollectionResource
+from twistedcaldav.resource import (
+ CalDAVResource, CommonHomeResource,
+ CalendarHomeResource, AddressBookHomeResource
+)
+from twistedcaldav.test.util import (
+ InMemoryPropertyStore, StoreTestCase, SimpleStoreRequest
+)
+from twistedcaldav.test.util import TestCase
from txdav.xml.element import HRef, Principal, Unauthenticated
from txweb2.http import HTTPError
from txweb2.test.test_server import SimpleRequest
-from twisted.internet.defer import inlineCallbacks
-from twistedcaldav import carddavxml
-from twistedcaldav.config import config
-from twistedcaldav.resource import CalDAVResource, CommonHomeResource, \
- CalendarHomeResource, AddressBookHomeResource
-from twistedcaldav.test.util import InMemoryPropertyStore, StoreTestCase, \
- SimpleStoreRequest
-from twistedcaldav.test.util import TestCase
-from twistedcaldav.notifications import NotificationCollectionResource
-
class StubProperty(object):
def qname(self):
return "StubQnamespace", "StubQname"
@@ -185,13 +188,20 @@
class DefaultAddressBook (StoreTestCase):
+
@inlineCallbacks
+ def setUp(self):
+ yield StoreTestCase.setUp(self)
+ self.authRecord = yield self.directory.recordWithShortName(RecordType.user, u"wsanchez")
+
+
+ @inlineCallbacks
def test_pick_default_addressbook(self):
"""
Get adbk
"""
- request = SimpleStoreRequest(self, "GET", "/addressbooks/users/wsanchez/", authid="wsanchez")
+ request = SimpleStoreRequest(self, "GET", "/addressbooks/users/wsanchez/", authRecord=self.authRecord)
home = yield request.locateResource("/addressbooks/users/wsanchez")
# default property initially not present
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_sharing.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_sharing.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_sharing.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -185,7 +185,8 @@
@inlineCallbacks
def _doPOST(self, body, resultcode=responsecode.OK):
- request = SimpleStoreRequest(self, "POST", "/calendars/__uids__/user01/calendar/", content=body, authid="user01")
+ authRecord = yield self.directory.recordWithUID(u"user01")
+ request = SimpleStoreRequest(self, "POST", "/calendars/__uids__/user01/calendar/", content=body, authRecord=authRecord)
request.headers.setHeader("content-type", MimeType("text", "xml"))
response = yield self.send(request)
response = IResponse(response)
@@ -210,7 +211,8 @@
@inlineCallbacks
def _doPOSTSharerAccept(self, body, resultcode=responsecode.OK):
- request = SimpleStoreRequest(self, "POST", "/calendars/__uids__/user02/", content=body, authid="user02")
+ authRecord = yield self.directory.recordWithUID(u"user02")
+ request = SimpleStoreRequest(self, "POST", "/calendars/__uids__/user02/", content=body, authRecord=authRecord)
request.headers.setHeader("content-type", MimeType("text", "xml"))
response = yield self.send(request)
response = IResponse(response)
@@ -732,6 +734,7 @@
self.assertEquals(propInvite, None)
+ # MOVE2WHO Fix wiki
@inlineCallbacks
def wikiSetup(self):
"""
@@ -798,7 +801,8 @@
self.patch(sharing, "getWikiAccess", stubWikiAccessMethod)
@inlineCallbacks
def listChildrenViaPropfind():
- request = SimpleStoreRequest(self, "PROPFIND", "/calendars/__uids__/user01/", authid="user01")
+ authRecord = yield self.directory.recordWithUID(u"user01")
+ request = SimpleStoreRequest(self, "PROPFIND", "/calendars/__uids__/user01/", authRecord=authRecord)
request.headers.setHeader("depth", "1")
response = yield self.send(request)
response = IResponse(response)
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_upgrade.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_upgrade.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_upgrade.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1536,6 +1536,7 @@
self.assertFalse(changed)
+ @inlineCallbacks
def test_normalizeCUAddrs(self):
"""
Ensure that calendar user addresses (CUAs) are cached so we can
@@ -1577,8 +1578,8 @@
directory = StubDirectory()
cuaCache = {}
- normalizeCUAddrs(normalizeEvent, directory, cuaCache)
- normalizeCUAddrs(normalizeEvent, directory, cuaCache)
+ yield normalizeCUAddrs(normalizeEvent, directory, cuaCache)
+ yield normalizeCUAddrs(normalizeEvent, directory, cuaCache)
# Ensure we only called principalForCalendarUserAddress 3 times. It
# would have been 8 times without the cuaCache.
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_wrapping.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_wrapping.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/test_wrapping.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -28,11 +28,12 @@
from txweb2.responsecode import UNAUTHORIZED
from txweb2.stream import MemoryStream
+from twext.who.idirectory import RecordType
+
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.defer import maybeDeferred
from twistedcaldav.config import config
-from twistedcaldav.directory.test.test_xmlfile import XMLFileBase
from twistedcaldav.ical import Component as VComponent
from twistedcaldav.storebridge import DropboxCollection, \
CalendarCollectionResource
@@ -51,11 +52,14 @@
import hashlib
+
def _todo(f, why):
f.todo = why
return f
rewriteOrRemove = lambda f: _todo(f, "Rewrite or remove")
+
+
class FakeChanRequest(object):
code = 'request-not-finished'
@@ -113,7 +117,7 @@
@param objectText: Some iCalendar text to populate it with.
@type objectText: str
"""
- record = self.directory.recordWithShortName("users", "wsanchez")
+ record = yield self.directory.recordWithShortName(RecordType.user, u"wsanchez")
uid = record.uid
txn = self.transactionUnderTest()
home = yield txn.calendarHomeWithUID(uid, True)
@@ -132,7 +136,7 @@
@param objectText: Some iVcard text to populate it with.
@type objectText: str
"""
- record = self.directory.recordWithShortName("users", "wsanchez")
+ record = yield self.directory.recordWithShortName(RecordType.user, u"wsanchez")
uid = record.uid
txn = self.transactionUnderTest()
home = yield txn.addressbookHomeWithUID(uid, True)
@@ -171,9 +175,10 @@
"http://localhost:8008/" + path
)
if user is not None:
- guid = XMLFileBase.users[user]["guid"]
+ record = yield self.directory.recordWithShortName(RecordType.user, user)
+ uid = record.uid
req.authnUser = req.authzUser = (
- davxml.Principal(davxml.HRef('/principals/__uids__/' + guid + '/'))
+ davxml.Principal(davxml.HRef('/principals/__uids__/' + uid + '/'))
)
returnValue(aResource)
@@ -271,7 +276,7 @@
)
yield self.commit()
self.assertIsInstance(dropBoxResource, DropboxCollection)
- dropboxHomeType = davxml.ResourceType.dropboxhome #@UndefinedVariable
+ dropboxHomeType = davxml.ResourceType.dropboxhome # @UndefinedVariable
self.assertEquals(dropBoxResource.resourceType(),
dropboxHomeType)
@@ -285,7 +290,7 @@
C{CalendarHome.calendarWithName}.
"""
calDavFile = yield self.getResource("calendars/users/wsanchez/calendar")
- regularCalendarType = davxml.ResourceType.calendar #@UndefinedVariable
+ regularCalendarType = davxml.ResourceType.calendar # @UndefinedVariable
self.assertEquals(calDavFile.resourceType(),
regularCalendarType)
yield self.commit()
@@ -344,8 +349,11 @@
self.assertIdentical(
homeChild._associatedTransaction,
homeTransaction,
- "transaction mismatch on %s; %r is not %r " %
- (name, homeChild._associatedTransaction, homeTransaction))
+ "transaction mismatch on {n}; {at} is not {ht} ".format(
+ n=name, at=homeChild._associatedTransaction,
+ ht=homeTransaction
+ )
+ )
@inlineCallbacks
@@ -575,12 +583,13 @@
yield NamedLock.acquire(txn, "ImplicitUIDLock:%s" % (hashlib.md5("uid1").hexdigest(),))
# PUT fails
+ authRecord = yield self.directory.recordWithShortName(RecordType.user, u"wsanchez")
request = SimpleStoreRequest(
self,
"PUT",
"/calendars/users/wsanchez/calendar/1.ics",
headers=Headers({"content-type": MimeType.fromString("text/calendar")}),
- authid="wsanchez"
+ authRecord=authRecord
)
request.stream = MemoryStream("""BEGIN:VCALENDAR
CALSCALE:GREGORIAN
@@ -606,12 +615,13 @@
"""
# PUT works
+ authRecord = yield self.directory.recordWithShortName(RecordType.user, u"wsanchez")
request = SimpleStoreRequest(
self,
"PUT",
"/calendars/users/wsanchez/calendar/1.ics",
headers=Headers({"content-type": MimeType.fromString("text/calendar")}),
- authid="wsanchez"
+ authRecord=authRecord
)
request.stream = MemoryStream("""BEGIN:VCALENDAR
CALSCALE:GREGORIAN
@@ -635,11 +645,12 @@
txn = self.transactionUnderTest()
yield NamedLock.acquire(txn, "ImplicitUIDLock:%s" % (hashlib.md5("uid1").hexdigest(),))
+ authRecord = yield self.directory.recordWithShortName(RecordType.user, u"wsanchez")
request = SimpleStoreRequest(
self,
"DELETE",
"/calendars/users/wsanchez/calendar/1.ics",
- authid="wsanchez"
+ authRecord=authRecord
)
response = yield self.send(request)
self.assertEqual(response.code, responsecode.SERVICE_UNAVAILABLE)
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/util.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/util.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/test/util.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -17,45 +17,37 @@
from __future__ import with_statement
import os
-import xattr
-from twistedcaldav.stdconfig import config
-
-from twisted.python.failure import Failure
+from calendarserver.provision.root import RootResource
+from calendarserver.tap.util import getRootResource
+from twext.python.filepath import CachingFilePath as FilePath
+from twext.python.log import Logger
from twisted.internet.base import DelayedCall
from twisted.internet.defer import succeed, fail, inlineCallbacks, returnValue
from twisted.internet.protocol import ProcessProtocol
-
-from twext.python.filepath import CachingFilePath as FilePath
-import txweb2.dav.test.util
-from txdav.xml import element as davxml, element
-from txweb2.http import HTTPError, StatusResponse
-
+from twisted.python.failure import Failure
from twistedcaldav import memcacher
-from twistedcaldav.memcacheclient import ClientFactory
from twistedcaldav.bind import doBind
-from twistedcaldav.directory import augment
from twistedcaldav.directory.addressbook import DirectoryAddressBookHomeProvisioningResource
from twistedcaldav.directory.calendar import (
DirectoryCalendarHomeProvisioningResource
)
from twistedcaldav.directory.principal import (
DirectoryPrincipalProvisioningResource)
-from twistedcaldav.directory.aggregate import AggregateDirectoryService
-from twistedcaldav.directory.xmlfile import XMLDirectoryService
-
-from txdav.common.datastore.test.util import deriveQuota, CommonCommonTests
-from txdav.common.datastore.file import CommonDataStore
-
-from calendarserver.provision.root import RootResource
-
-from twext.python.log import Logger
+from twistedcaldav.directory.util import transactionFromRequest
+from twistedcaldav.memcacheclient import ClientFactory
+from twistedcaldav.stdconfig import config
from txdav.caldav.datastore.test.util import buildCalendarStore
-from calendarserver.tap.util import getRootResource, directoryFromConfig
+from txdav.common.datastore.file import CommonDataStore
+from txdav.common.datastore.test.util import deriveQuota, CommonCommonTests
+from txdav.who.util import directoryFromConfig
+from txdav.xml import element as davxml, element
from txweb2.dav.test.util import SimpleRequest
-from twistedcaldav.directory.util import transactionFromRequest
-from twistedcaldav.directory.directory import DirectoryService
+import txweb2.dav.test.util
+from txweb2.http import HTTPError, StatusResponse
+import xattr
+
log = Logger()
@@ -67,10 +59,12 @@
]
DelayedCall.debug = True
+
def _todo(f, why):
f.todo = why
return f
+
featureUnimplemented = lambda f: _todo(f, "Feature unimplemented")
testUnimplemented = lambda f: _todo(f, "Test unimplemented")
todo = lambda why: lambda f: _todo(f, why)
@@ -83,92 +77,21 @@
-class DirectoryFixture(object):
- """
- Test fixture for creating various parts of the resource hierarchy related
- to directories.
- """
- def __init__(self):
- def _setUpPrincipals(ds):
- # FIXME: see FIXME in
- # DirectoryPrincipalProvisioningResource.__init__; this performs a
- # necessary modification to any directory service object for it to
- # be fully functional.
- self.principalsResource = DirectoryPrincipalProvisioningResource(
- "/principals/", ds
- )
- self._directoryChangeHooks = [_setUpPrincipals]
- directoryService = None
- principalsResource = None
-
- def addDirectoryService(self, newService):
- """
- Add an L{IDirectoryService} to this test case.
-
- If this test case does not have a directory service yet, create it and
- assign C{directoryService} and C{principalsResource} attributes to this
- test case.
-
- If the test case already has a directory service, create an
- L{AggregateDirectoryService} and re-assign the C{self.directoryService}
- attribute to point at it instead, while setting the C{realmName} of the
- new service to match the old one.
-
- If the test already has an L{AggregateDirectoryService}, create a
- I{new} L{AggregateDirectoryService} with the same list of services,
- after adjusting the new service's realm to match the existing ones.
- """
-
- if self.directoryService is None:
- directoryService = newService
- else:
- newService.realmName = self.directoryService.realmName
- if isinstance(self.directoryService, AggregateDirectoryService):
- directories = set(self.directoryService._recordTypes.items())
- directories.add(newService)
- else:
- directories = [newService, self.directoryService]
- directoryService = AggregateDirectoryService(directories, None)
-
- self.directoryService = directoryService
- # FIXME: see FIXME in DirectoryPrincipalProvisioningResource.__init__;
- # this performs a necessary modification to the directory service object
- # for it to be fully functional.
- for hook in self._directoryChangeHooks:
- hook(directoryService)
-
-
- def whenDirectoryServiceChanges(self, callback):
- """
- When the C{directoryService} attribute is changed by
- L{TestCase.addDirectoryService}, call the given callback in order to
- update any state which relies upon that service.
-
- If there's already a directory, invoke the callback immediately.
- """
- self._directoryChangeHooks.append(callback)
- if self.directoryService is not None:
- callback(self.directoryService)
-
-
-
class SimpleStoreRequest(SimpleRequest):
"""
A SimpleRequest that automatically grabs the proper transaction for a test.
"""
- def __init__(self, test, method, uri, headers=None, content=None, authid=None):
+ def __init__(self, test, method, uri, headers=None, content=None, authRecord=None):
super(SimpleStoreRequest, self).__init__(test.site, method, uri, headers, content)
self._test = test
self._newStoreTransaction = test.transactionUnderTest(txn=transactionFromRequest(self, test.storeUnderTest()))
self.credentialFactories = {}
# Fake credentials if auth needed
- if authid is not None:
- record = self._test.directory.recordWithShortName(DirectoryService.recordType_users, authid)
- if record:
- self.authzUser = self.authnUser = element.Principal(element.HRef("/principals/__uids__/%s/" % (record.uid,)))
+ if authRecord is not None:
+ self.authzUser = self.authnUser = element.Principal(element.HRef("/principals/__uids__/%s/" % (authRecord.uid,)))
@inlineCallbacks
@@ -198,9 +121,13 @@
self.configure()
- self._sqlCalendarStore = yield buildCalendarStore(self, self.notifierFactory, directoryFromConfig(config))
+ self._sqlCalendarStore = yield buildCalendarStore(
+ self, self.notifierFactory, None
+ )
+ self.directory = directoryFromConfig(config, self._sqlCalendarStore)
+ self._sqlCalendarStore.setDirectoryService(self.directory)
+
self.rootResource = getRootResource(config, self._sqlCalendarStore)
- self.directory = self._sqlCalendarStore.directoryService()
self.principalsResource = DirectoryPrincipalProvisioningResource("/principals/", self.directory)
self.site.resource.putChild("principals", self.principalsResource)
@@ -259,16 +186,7 @@
accounts.setContent(xmlFile.getContent())
- @property
- def directoryService(self):
- """
- Read-only alias for L{DirectoryFixture.directoryService} for
- compatibility with older tests. TODO: remove this.
- """
- return self.directory
-
-
class TestCase(txweb2.dav.test.util.TestCase):
resource_class = RootResource
@@ -282,19 +200,6 @@
quota=deriveQuota(self))
- def createStockDirectoryService(self):
- """
- Create a stock C{directoryService} attribute and assign it.
- """
- self.xmlFile = FilePath(config.DataRoot).child("accounts.xml")
- self.xmlFile.setContent(xmlFile.getContent())
- self.directoryFixture.addDirectoryService(XMLDirectoryService({
- "xmlFile": "accounts.xml",
- "augmentService":
- augment.AugmentXMLDB(xmlFiles=(augmentsFile.path,)),
- }))
-
-
def setupCalendars(self):
"""
When a directory service exists, set up the resources at C{/calendars}
@@ -308,6 +213,8 @@
L{TestCase.directoryFixture.addDirectoryService}.
"""
newStore = self.createDataStore()
+
+
@self.directoryFixture.whenDirectoryServiceChanges
def putAllChildren(ds):
self.calendarCollection = (
@@ -343,20 +250,10 @@
config.UsePackageTimezones = True
- @property
- def directoryService(self):
- """
- Read-only alias for L{DirectoryFixture.directoryService} for
- compatibility with older tests. TODO: remove this.
- """
- return self.directoryFixture.directoryService
-
def setUp(self):
super(TestCase, self).setUp()
- self.directoryFixture = DirectoryFixture()
-
# FIXME: this is only here to workaround circular imports
doBind()
@@ -506,7 +403,7 @@
print("Xattr mismatch:", childPath, attr)
print((xattr.getxattr(childPath, attr), " != ", value))
return False
- else: # method
+ else: # method
if not value(xattr.getxattr(childPath, attr)):
return False
@@ -555,7 +452,8 @@
that stores the data for that L{CalendarHomeResource}.
"""
super(HomeTestCase, self).setUp()
- self.createStockDirectoryService()
+
+
@self.directoryFixture.whenDirectoryServiceChanges
def addHomeProvisioner(ds):
self.homeProvisioner = DirectoryCalendarHomeProvisioningResource(
@@ -639,7 +537,8 @@
file.
"""
super(AddressBookHomeTestCase, self).setUp()
- self.createStockDirectoryService()
+
+
@self.directoryFixture.whenDirectoryServiceChanges
def addHomeProvisioner(ds):
self.homeProvisioner = DirectoryAddressBookHomeProvisioningResource(
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/timezoneservice.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/timezoneservice.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/timezoneservice.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -86,15 +86,17 @@
def defaultAccessControlList(self):
- return davxml.ACL(
- # DAV:Read for all principals (includes anonymous)
- davxml.ACE(
- davxml.Principal(davxml.All()),
- davxml.Grant(
- davxml.Privilege(davxml.Read()),
+ return succeed(
+ davxml.ACL(
+ # DAV:Read for all principals (includes anonymous)
+ davxml.ACE(
+ davxml.Principal(davxml.All()),
+ davxml.Grant(
+ davxml.Privilege(davxml.Read()),
+ ),
+ davxml.Protected(),
),
- davxml.Protected(),
- ),
+ )
)
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/timezonestdservice.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/timezonestdservice.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/timezonestdservice.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -146,15 +146,17 @@
def defaultAccessControlList(self):
- return davxml.ACL(
- # DAV:Read for all principals (includes anonymous)
- davxml.ACE(
- davxml.Principal(davxml.All()),
- davxml.Grant(
- davxml.Privilege(davxml.Read()),
+ return succeed(
+ davxml.ACL(
+ # DAV:Read for all principals (includes anonymous)
+ davxml.ACE(
+ davxml.Principal(davxml.All()),
+ davxml.Grant(
+ davxml.Privilege(davxml.Read()),
+ ),
+ davxml.Protected(),
),
- davxml.Protected(),
- ),
+ )
)
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/upgrade.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/upgrade.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/upgrade.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -39,11 +39,8 @@
from twistedcaldav import caldavxml
from twistedcaldav.directory import calendaruserproxy
from twistedcaldav.directory.calendaruserproxyloader import XMLCalendarUserProxyLoader
-from twistedcaldav.directory.directory import DirectoryService
-from twistedcaldav.directory.directory import GroupMembershipCacheUpdater
from twistedcaldav.directory.principal import DirectoryCalendarPrincipalResource
from twistedcaldav.directory.resourceinfo import ResourceInfoDatabase
-from twistedcaldav.directory.xmlfile import XMLDirectoryService
from twistedcaldav.ical import Component
from txdav.caldav.datastore.scheduling.cuaddress import LocalCalendarUser
from txdav.caldav.datastore.scheduling.imip.mailgateway import MailGatewayTokensDatabase
@@ -60,8 +57,7 @@
from twisted.protocols.amp import AMP, Command, String, Boolean
-from calendarserver.tap.util import getRootResource, FakeRequest, directoryFromConfig
-from calendarserver.tools.util import getDirectory
+from calendarserver.tap.util import getRootResource, FakeRequest
from txdav.caldav.datastore.scheduling.imip.mailgateway import migrateTokensToStore
@@ -121,6 +117,7 @@
+ at inlineCallbacks
def upgradeCalendarCollection(calPath, directory, cuaCache):
errorOccurred = False
collectionUpdated = False
@@ -164,7 +161,7 @@
continue
try:
- data, fixed = normalizeCUAddrs(data, directory, cuaCache)
+ data, fixed = (yield normalizeCUAddrs(data, directory, cuaCache))
if fixed:
log.debug("Normalized CUAddrs in %s" % (resPath,))
needsRewrite = True
@@ -207,10 +204,11 @@
except:
raise
- return errorOccurred
+ returnValue(errorOccurred)
+ at inlineCallbacks
def upgradeCalendarHome(homePath, directory, cuaCache):
errorOccurred = False
@@ -229,7 +227,7 @@
rmdir(calPath)
continue
log.debug("Upgrading calendar: %s" % (calPath,))
- if not upgradeCalendarCollection(calPath, directory, cuaCache):
+ if not (yield upgradeCalendarCollection(calPath, directory, cuaCache)):
errorOccurred = True
# Change the calendar-free-busy-set xattrs of the inbox to the
@@ -254,7 +252,7 @@
log.error("Failed to upgrade calendar home %s: %s" % (homePath, e))
raise
- return errorOccurred
+ returnValue(errorOccurred)
@@ -288,9 +286,10 @@
@UpgradeOneHome.responder
+ @inlineCallbacks
def upgradeOne(self, path):
- result = upgradeCalendarHome(path, self.directory, self.cuaCache)
- return dict(succeeded=result)
+ result = yield upgradeCalendarHome(path, self.directory, self.cuaCache)
+ returnValue(dict(succeeded=result))
@@ -543,9 +542,9 @@
# Skip non-directories
continue
- if not upgradeCalendarHome(
+ if not (yield upgradeCalendarHome(
homePath, directory, cuaCache
- ):
+ )):
setError()
count += 1
@@ -564,6 +563,7 @@
+ at inlineCallbacks
def normalizeCUAddrs(data, directory, cuaCache):
"""
Normalize calendar user addresses to urn:uuid: form.
@@ -583,23 +583,24 @@
"""
cal = Component.fromString(data)
+ @inlineCallbacks
def lookupFunction(cuaddr, principalFunction, config):
# Return cached results, if any.
if cuaddr in cuaCache:
- return cuaCache[cuaddr]
+ returnValue(cuaCache[cuaddr])
- result = normalizationLookup(cuaddr, principalFunction, config)
+ result = yield normalizationLookup(cuaddr, principalFunction, config)
# Cache the result
cuaCache[cuaddr] = result
- return result
+ returnValue(result)
- cal.normalizeCalendarUserAddresses(lookupFunction,
+ yield cal.normalizeCalendarUserAddresses(lookupFunction,
directory.principalForCalendarUserAddress)
newData = str(cal)
- return newData, not newData == data
+ returnValue(newData, not newData == data)
@@ -904,31 +905,31 @@
-# Deferred
-def migrateFromOD(config, directory):
- #
- # Migrates locations and resources from OD
- #
- try:
- from twistedcaldav.directory.appleopendirectory import OpenDirectoryService
- from calendarserver.tools.resources import migrateResources
- except ImportError:
- return succeed(None)
+# # Deferred
+# def migrateFromOD(config, directory):
+# #
+# # Migrates locations and resources from OD
+# #
+# try:
+# from twistedcaldav.directory.appleopendirectory import OpenDirectoryService
+# from calendarserver.tools.resources import migrateResources
+# except ImportError:
+# return succeed(None)
- log.warn("Migrating locations and resources")
+# log.warn("Migrating locations and resources")
- userService = directory.serviceForRecordType("users")
- resourceService = directory.serviceForRecordType("resources")
- if (
- not isinstance(userService, OpenDirectoryService) or
- not isinstance(resourceService, XMLDirectoryService)
- ):
- # Configuration requires no migration
- return succeed(None)
+# userService = directory.serviceForRecordType("users")
+# resourceService = directory.serviceForRecordType("resources")
+# if (
+# not isinstance(userService, OpenDirectoryService) or
+# not isinstance(resourceService, XMLDirectoryService)
+# ):
+# # Configuration requires no migration
+# return succeed(None)
- # Create internal copies of resources and locations based on what is
- # found in OD
- return migrateResources(userService, resourceService)
+# # Create internal copies of resources and locations based on what is
+# # found in OD
+# return migrateResources(userService, resourceService)
@@ -997,7 +998,9 @@
"""
Execute the step.
"""
- return self.doUpgrade()
+ return succeed(None)
+ # MOVE2WHO
+ # return self.doUpgrade()
@@ -1025,7 +1028,7 @@
def stepWithResult(self, result):
if self.doPostImport:
- directory = directoryFromConfig(self.config)
+ directory = self.store.directoryService()
# Load proxy assignments from XML if specified
if self.config.ProxyLoadFromFile:
@@ -1035,27 +1038,28 @@
loader = XMLCalendarUserProxyLoader(self.config.ProxyLoadFromFile)
yield loader.updateProxyDB()
- # Populate the group membership cache
- if (self.config.GroupCaching.Enabled and
- self.config.GroupCaching.EnableUpdater):
- proxydb = calendaruserproxy.ProxyDBService
- if proxydb is None:
- proxydbClass = namedClass(self.config.ProxyDBService.type)
- proxydb = proxydbClass(**self.config.ProxyDBService.params)
+ # # Populate the group membership cache
+ # if (self.config.GroupCaching.Enabled and
+ # self.config.GroupCaching.EnableUpdater):
+ # proxydb = calendaruserproxy.ProxyDBService
+ # if proxydb is None:
+ # proxydbClass = namedClass(self.config.ProxyDBService.type)
+ # proxydb = proxydbClass(**self.config.ProxyDBService.params)
- updater = GroupMembershipCacheUpdater(proxydb,
- directory,
- self.config.GroupCaching.UpdateSeconds,
- self.config.GroupCaching.ExpireSeconds,
- self.config.GroupCaching.LockSeconds,
- namespace=self.config.GroupCaching.MemcachedPool,
- useExternalProxies=self.config.GroupCaching.UseExternalProxies)
- yield updater.updateCache(fast=True)
+ # # MOVE2WHO FIXME: port to new group cacher
+ # updater = GroupMembershipCacheUpdater(proxydb,
+ # directory,
+ # self.config.GroupCaching.UpdateSeconds,
+ # self.config.GroupCaching.ExpireSeconds,
+ # self.config.GroupCaching.LockSeconds,
+ # namespace=self.config.GroupCaching.MemcachedPool,
+ # useExternalProxies=self.config.GroupCaching.UseExternalProxies)
+ # yield updater.updateCache(fast=True)
- uid, gid = getCalendarServerIDs(self.config)
- dbPath = os.path.join(self.config.DataRoot, "proxies.sqlite")
- if os.path.exists(dbPath):
- os.chown(dbPath, uid, gid)
+ uid, gid = getCalendarServerIDs(self.config)
+ dbPath = os.path.join(self.config.DataRoot, "proxies.sqlite")
+ if os.path.exists(dbPath):
+ os.chown(dbPath, uid, gid)
# Process old inbox items
self.store.setMigrating(True)
Modified: CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/util.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/util.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/twistedcaldav/util.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -23,6 +23,7 @@
from hashlib import md5, sha1
from twisted.internet import ssl, reactor
+from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.web import client
from twisted.python import failure
from twext.python.log import Logger
@@ -495,6 +496,7 @@
+ at inlineCallbacks
def normalizationLookup(cuaddr, principalFunction, config):
"""
Lookup function to be passed to ical.normalizeCalendarUserAddresses.
@@ -503,13 +505,13 @@
principal for the cuaddr.
"""
try:
- principal = principalFunction(cuaddr)
+ principal = yield principalFunction(cuaddr)
except Exception, e:
log.debug("Lookup of %s failed: %s" % (cuaddr, e))
principal = None
if principal is None:
- return (None, None, None)
+ returnValue((None, None, None))
else:
rec = principal.record
@@ -520,9 +522,9 @@
# to single-quotes.
fullName = rec.fullName.replace('"', "'")
- cuas = principal.record.calendarUserAddresses
+ cuas = principal.record.calendarUserAddresses()
- return (fullName, rec.guid, cuas)
+ returnValue((fullName, rec.guid, cuas))
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/caldav/scheduler.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/caldav/scheduler.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/caldav/scheduler.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -85,13 +85,14 @@
))
+ @inlineCallbacks
def checkOriginator(self):
"""
Check the validity of the Originator header. Extract the corresponding principal.
"""
# Verify that Originator is a valid calendar user
- originatorPrincipal = self.txn.directoryService().recordWithCalendarUserAddress(self.originator)
+ originatorPrincipal = yield self.txn.directoryService().recordWithCalendarUserAddress(self.originator)
if originatorPrincipal is None:
# Local requests MUST have a principal.
log.error("Could not find principal for originator: %s" % (self.originator,))
@@ -122,7 +123,7 @@
results = []
for recipient in self.recipients:
# Get the principal resource for this recipient
- principal = self.txn.directoryService().recordWithCalendarUserAddress(recipient)
+ principal = yield self.txn.directoryService().recordWithCalendarUserAddress(recipient)
# If no principal we may have a remote recipient but we should check whether
# the address is one that ought to be on our server and treat that as a missing
@@ -161,7 +162,7 @@
# Verify that the ORGANIZER's cu address maps to a valid user
organizer = self.calendar.getOrganizer()
if organizer:
- organizerPrincipal = self.txn.directoryService().recordWithCalendarUserAddress(organizer)
+ organizerPrincipal = yield self.txn.directoryService().recordWithCalendarUserAddress(organizer)
if organizerPrincipal:
if organizerPrincipal.calendarsEnabled():
@@ -225,6 +226,7 @@
))
+ @inlineCallbacks
def checkAttendeeAsOriginator(self):
"""
Check the validity of the ATTENDEE value as this is the originator of the iTIP message.
@@ -232,7 +234,7 @@
"""
# Attendee's Outbox MUST be the request URI
- attendeePrincipal = self.txn.directoryService().recordWithCalendarUserAddress(self.attendee)
+ attendeePrincipal = yield self.txn.directoryService().recordWithCalendarUserAddress(self.attendee)
if attendeePrincipal:
if self.doingPOST is not None and attendeePrincipal.uid != self.originator_uid:
log.error("ATTENDEE in calendar data does not match owner of Outbox: %s" % (self.calendar,))
@@ -257,11 +259,11 @@
# Prevent spoofing of ORGANIZER with specific METHODs when local
if self.isiTIPRequest:
- self.checkOrganizerAsOriginator()
+ return self.checkOrganizerAsOriginator()
# Prevent spoofing when doing reply-like METHODs
else:
- self.checkAttendeeAsOriginator()
+ return self.checkAttendeeAsOriginator()
def finalChecks(self):
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/freebusy.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/freebusy.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/freebusy.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -47,6 +47,8 @@
fbcacher = Memcacher("FBCache", pickle=True)
+
+
class FBCacheEntry(object):
CACHE_DAYS_FLOATING_ADJUST = 1
@@ -212,12 +214,12 @@
# TODO: actually we by pass altogether by assuming anyone can check anyone else's freebusy
# May need organizer principal
- organizer_record = calresource.directoryService().recordWithCalendarUserAddress(organizer) if organizer else None
+ organizer_record = (yield calresource.directoryService().recordWithCalendarUserAddress(organizer)) if organizer else None
organizer_uid = organizer_record.uid if organizer_record else ""
# Free busy is per-user
attendee_uid = calresource.viewerHome().uid()
- attendee_record = calresource.directoryService().recordWithUID(attendee_uid)
+ attendee_record = yield calresource.directoryService().recordWithUID(attendee_uid.decode("utf-8"))
# Get the timezone property from the collection.
tz = calresource.getTimezone()
@@ -237,7 +239,7 @@
authz_record = organizer_record
if hasattr(calresource._txn, "_authz_uid") and calresource._txn._authz_uid != organizer_uid:
authz_uid = calresource._txn._authz_uid
- authz_record = calresource.directoryService().recordWithUID(authz_uid)
+ authz_record = yield calresource.directoryService().recordWithUID(authz_uid.decode("utf-8"))
# Check if attendee is also the organizer or the delegate doing the request
if attendee_uid in (organizer_uid, authz_uid):
@@ -335,7 +337,7 @@
if excludeuid:
# See if we have a UID match
if (excludeuid == uid):
- test_record = calresource.directoryService().recordWithCalendarUserAddress(test_organizer) if test_organizer else None
+ test_record = (yield calresource.directoryService().recordWithCalendarUserAddress(test_organizer)) if test_organizer else None
test_uid = test_record.uid if test_record else ""
# Check that ORGANIZER's match (security requirement)
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/imip/inbound.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/imip/inbound.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/imip/inbound.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -329,9 +329,12 @@
toAddr = organizer[7:]
elif organizer.startswith("urn:uuid:"):
guid = organizer[9:]
- record = self.directory.recordWithGUID(guid)
- if record and record.emailAddresses:
- toAddr = list(record.emailAddresses)[0]
+ record = yield self.directory.recordWithGUID(guid)
+ try:
+ if record and record.emailAddresses:
+ toAddr = list(record.emailAddresses)[0]
+ except AttributeError:
+ pass
if toAddr is None:
log.error("Don't have an email address for the organizer; "
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/implicit.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/implicit.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/implicit.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -297,7 +297,7 @@
organizer_scheduling = (yield self.isOrganizerScheduling())
if organizer_scheduling:
self.state = "organizer"
- elif self.isAttendeeScheduling():
+ elif (yield self.isAttendeeScheduling()):
self.state = "attendee"
elif self.organizer:
# There is an ORGANIZER that is not this user but no ATTENDEE property for
@@ -365,7 +365,7 @@
# Get some useful information from the calendar
yield self.extractCalendarData()
- self.organizerPrincipal = self.calendar_home.directoryService().recordWithCalendarUserAddress(self.organizer)
+ self.organizerPrincipal = yield self.calendar_home.directoryService().recordWithCalendarUserAddress(self.organizer)
self.organizerAddress = (yield addressmapping.mapper.getCalendarUser(self.organizer, self.organizerPrincipal))
# Originator is the organizer in this case
@@ -447,7 +447,7 @@
self.calendar = calendar_old
yield self.extractCalendarData()
- self.organizerPrincipal = self.calendar_home.directoryService().recordWithCalendarUserAddress(self.organizer)
+ self.organizerPrincipal = yield self.calendar_home.directoryService().recordWithCalendarUserAddress(self.organizer)
self.organizerAddress = (yield addressmapping.mapper.getCalendarUser(self.organizer, self.organizerPrincipal))
# Originator is the organizer in this case
@@ -479,7 +479,7 @@
# Get some useful information from the calendar
yield self.extractCalendarData()
- self.attendeePrincipal = self.calendar_home.directoryService().recordWithUID(self.calendar_home.uid())
+ self.attendeePrincipal = yield self.calendar_home.directoryService().recordWithUID(self.calendar_home.uid().decode("utf-8"))
self.originator = self.attendee = self.attendeePrincipal.canonicalCalendarUserAddress()
result = (yield self.scheduleWithOrganizer())
@@ -491,7 +491,7 @@
def extractCalendarData(self):
# Get the originator who is the owner of the calendar resource being modified
- self.originatorPrincipal = self.calendar_home.directoryService().recordWithUID(self.calendar_home.uid())
+ self.originatorPrincipal = yield self.calendar_home.directoryService().recordWithUID(self.calendar_home.uid().decode("utf-8"))
# Pick the canonical CUA:
self.originator = self.originatorPrincipal.canonicalCalendarUserAddress()
@@ -555,7 +555,7 @@
returnValue(False)
# Organizer must map to a valid principal
- self.organizerPrincipal = self.calendar_home.directoryService().recordWithCalendarUserAddress(self.organizer)
+ self.organizerPrincipal = yield self.calendar_home.directoryService().recordWithCalendarUserAddress(self.organizer)
self.organizerAddress = (yield addressmapping.mapper.getCalendarUser(self.organizer, self.organizerPrincipal))
if not self.organizerPrincipal:
returnValue(False)
@@ -567,21 +567,22 @@
returnValue(True)
+ @inlineCallbacks
def isAttendeeScheduling(self):
# First must have organizer property
if not self.organizer:
- return False
+ returnValue(False)
# Check to see whether any attendee is the owner
for attendee in self.attendees:
- attendeePrincipal = self.calendar_home.directoryService().recordWithCalendarUserAddress(attendee)
+ attendeePrincipal = yield self.calendar_home.directoryService().recordWithCalendarUserAddress(attendee)
if attendeePrincipal and attendeePrincipal.uid == self.calendar_home.uid():
self.attendee = attendee
self.attendeePrincipal = attendeePrincipal
- return True
+ returnValue(True)
- return False
+ returnValue(False)
def makeScheduler(self):
@@ -1033,7 +1034,7 @@
if attendee.parameterValue("SCHEDULE-AGENT", "SERVER").upper() == "CLIENT":
cuaddr = attendee.value()
if cuaddr not in coerced:
- attendeePrincipal = self.calendar_home.directoryService().recordWithCalendarUserAddress(cuaddr)
+ attendeePrincipal = yield self.calendar_home.directoryService().recordWithCalendarUserAddress(cuaddr)
attendeeAddress = (yield addressmapping.mapper.getCalendarUser(cuaddr, attendeePrincipal))
local_attendee = type(attendeeAddress) in (LocalCalendarUser, OtherServerCalendarUser,)
coerced[cuaddr] = local_attendee
@@ -1096,7 +1097,7 @@
# Handle split by not scheduling local attendees
if self.split_details is not None:
- attendeePrincipal = self.calendar_home.directoryService().recordWithCalendarUserAddress(attendee)
+ attendeePrincipal = yield self.calendar_home.directoryService().recordWithCalendarUserAddress(attendee)
attendeeAddress = (yield addressmapping.mapper.getCalendarUser(attendee, attendeePrincipal))
if type(attendeeAddress) is LocalCalendarUser:
continue
@@ -1153,7 +1154,7 @@
# Handle split by not scheduling local attendees
if self.split_details is not None:
- attendeePrincipal = self.calendar_home.directoryService().recordWithCalendarUserAddress(attendee)
+ attendeePrincipal = yield self.calendar_home.directoryService().recordWithCalendarUserAddress(attendee)
attendeeAddress = (yield addressmapping.mapper.getCalendarUser(attendee, attendeePrincipal))
if type(attendeeAddress) is LocalCalendarUser:
continue
@@ -1213,7 +1214,7 @@
# Handle split by not scheduling local attendees
if self.split_details is not None:
- attendeePrincipal = self.calendar_home.directoryService().recordWithCalendarUserAddress(attendee)
+ attendeePrincipal = yield self.calendar_home.directoryService().recordWithCalendarUserAddress(attendee)
attendeeAddress = (yield addressmapping.mapper.getCalendarUser(attendee, attendeePrincipal))
if type(attendeeAddress) is LocalCalendarUser:
continue
@@ -1278,7 +1279,7 @@
# Handle split by not scheduling local attendees
if self.split_details is not None:
- attendeePrincipal = self.calendar_home.directoryService().recordWithCalendarUserAddress(attendee)
+ attendeePrincipal = yield self.calendar_home.directoryService().recordWithCalendarUserAddress(attendee)
attendeeAddress = (yield addressmapping.mapper.getCalendarUser(attendee, attendeePrincipal))
if type(attendeeAddress) is LocalCalendarUser:
continue
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/ischedule/delivery.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/ischedule/delivery.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/ischedule/delivery.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -228,7 +228,7 @@
# Loop over at most 3 redirects
ssl, host, port, path = self.server.details()
for _ignore in xrange(3):
- self._prepareRequest(host, port)
+ yield self._prepareRequest(host, port)
response = (yield self._processRequest(ssl, host, port, path))
if response.code not in (responsecode.MOVED_PERMANENTLY, responsecode.TEMPORARY_REDIRECT,):
break
@@ -334,16 +334,18 @@
returnValue(iostr.getvalue())
+ @inlineCallbacks
def _prepareRequest(self, host, port):
"""
Setup the request for sending. We might need to do this several times
whilst following redirects.
"""
- component, method = self._prepareData()
- self._prepareHeaders(host, port, component, method)
+ component, method = (yield self._prepareData())
+ yield self._prepareHeaders(host, port, component, method)
+ @inlineCallbacks
def _prepareHeaders(self, host, port, component, method):
"""
Always generate a new set of headers because the Host may varying during redirects,
@@ -357,7 +359,7 @@
# The Originator must be the ORGANIZER (for a request) or ATTENDEE (for a reply)
originator = self.scheduler.organizer.cuaddr if self.scheduler.isiTIPRequest else self.scheduler.attendee
if self.server.unNormalizeAddresses:
- originator = normalizeCUAddress(originator, normalizationLookup, self.scheduler.txn.directoryService().recordWithCalendarUserAddress, toUUID=False)
+ originator = yield normalizeCUAddress(originator, normalizationLookup, self.scheduler.txn.directoryService().recordWithCalendarUserAddress, toUUID=False)
self.headers.addRawHeader("Originator", utf8String(originator))
self.sign_headers.append("Originator")
@@ -399,6 +401,7 @@
self.sign_headers.append("Authorization")
+ @inlineCallbacks
def _prepareData(self):
"""
Prepare data via normalization etc. Only need to do this once even when
@@ -411,7 +414,7 @@
normalizedCalendar = self.scheduler.calendar.duplicate()
self.original_organizer = normalizedCalendar.getOrganizer()
if self.server.unNormalizeAddresses:
- normalizedCalendar.normalizeCalendarUserAddresses(
+ yield normalizedCalendar.normalizeCalendarUserAddresses(
normalizationLookup,
self.scheduler.txn.directoryService().recordWithCalendarUserAddress,
toUUID=False)
@@ -423,12 +426,12 @@
component = normalizedCalendar.mainType()
method = normalizedCalendar.propertyValue("METHOD")
self.data = str(normalizedCalendar)
- return component, method
+ returnValue(component, method)
else:
cal = Component.fromString(self.data)
component = cal.mainType()
method = cal.propertyValue("METHOD")
- return component, method
+ returnValue(component, method)
@inlineCallbacks
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/ischedule/resource.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/ischedule/resource.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/ischedule/resource.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -353,11 +353,13 @@
davxml.Privilege(caldavxml.ScheduleDeliver()),
)
- return davxml.ACL(
- # DAV:Read, CalDAV:schedule-deliver for all principals (includes anonymous)
- davxml.ACE(
- davxml.Principal(davxml.All()),
- davxml.Grant(*privs),
- davxml.Protected(),
- ),
+ return succeed(
+ davxml.ACL(
+ # DAV:Read, CalDAV:schedule-deliver for all principals (includes anonymous)
+ davxml.ACE(
+ davxml.Principal(davxml.All()),
+ davxml.Grant(*privs),
+ davxml.Protected(),
+ ),
+ )
)
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/ischedule/scheduler.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/ischedule/scheduler.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/ischedule/scheduler.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -186,7 +186,7 @@
# Normalize recipient addresses
results = []
for recipient in recipients:
- normalized = normalizeCUAddress(recipient, normalizationLookup, self.txn.directoryService().recordWithCalendarUserAddress)
+ normalized = yield normalizeCUAddress(recipient, normalizationLookup, self.txn.directoryService().recordWithCalendarUserAddress)
self.recipientsNormalizationMap[normalized] = recipient
results.append(normalized)
recipients = results
@@ -205,7 +205,7 @@
if not self.checkForFreeBusy():
# Need to normalize the calendar data and recipient values to keep those in sync,
# as we might later try to match them
- self.calendar.normalizeCalendarUserAddresses(normalizationLookup, self.txn.directoryService().recordWithCalendarUserAddress)
+ return self.calendar.normalizeCalendarUserAddresses(normalizationLookup, self.txn.directoryService().recordWithCalendarUserAddress)
def checkAuthorization(self):
@@ -226,7 +226,7 @@
"""
# For remote requests we do not allow the originator to be a local user or one within our domain.
- originatorPrincipal = self.txn.directoryService().recordWithCalendarUserAddress(self.originator)
+ originatorPrincipal = (yield self.txn.directoryService().recordWithCalendarUserAddress(self.originator))
localUser = (yield addressmapping.mapper.isCalendarUserInMyDomain(self.originator))
if originatorPrincipal or localUser:
if originatorPrincipal.thisServer():
@@ -367,7 +367,7 @@
# Verify that the ORGANIZER's cu address does not map to a valid user
organizer = self.calendar.getOrganizer()
if organizer:
- organizerPrincipal = self.txn.directoryService().recordWithCalendarUserAddress(organizer)
+ organizerPrincipal = yield self.txn.directoryService().recordWithCalendarUserAddress(organizer)
if organizerPrincipal:
if organizerPrincipal.thisServer():
log.error("Invalid ORGANIZER in calendar data: %s" % (self.calendar,))
@@ -408,7 +408,7 @@
"""
# Attendee cannot be local.
- attendeePrincipal = self.txn.directoryService().recordWithCalendarUserAddress(self.attendee)
+ attendeePrincipal = yield self.txn.directoryService().recordWithCalendarUserAddress(self.attendee)
if attendeePrincipal:
if attendeePrincipal.thisServer():
log.error("Invalid ATTENDEE in calendar data: %s" % (self.calendar,))
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/processing.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/processing.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/processing.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -36,6 +36,7 @@
from txdav.caldav.datastore.scheduling.work import ScheduleRefreshWork, \
ScheduleAutoReplyWork
from txdav.caldav.icalendarstore import ComponentUpdateState, ComponentRemoveState
+from txdav.who.idirectory import AutoScheduleMode
import collections
import hashlib
@@ -58,6 +59,8 @@
log = Logger()
+
+
class ImplicitProcessorException(Exception):
def __init__(self, msg):
@@ -604,19 +607,28 @@
@param calendar: the iTIP message to process
@type calendar: L{Component}
@param automode: the auto-schedule mode for the recipient
- @type automode: C{str}
+ @type automode: L{txdav.who.idirectory.AutoScheduleMode}
@return: C{tuple} of C{bool}, C{bool}, C{str} indicating whether changes were made, whether the inbox item
should be added, and the new PARTSTAT.
"""
-
# First ignore the none mode
- if automode == "none":
+ if automode == AutoScheduleMode.none:
returnValue((False, True, "",))
- elif not automode or automode == "default":
- automode = config.Scheduling.Options.AutoSchedule.DefaultMode
+ elif not automode:
+ automode = {
+ "none": AutoScheduleMode.none,
+ "accept-always": AutoScheduleMode.accept,
+ "decline-always": AutoScheduleMode.decline,
+ "accept-if-free": AutoScheduleMode.acceptIfFree,
+ "decline-if-busy": AutoScheduleMode.declineIfBusy,
+ "automatic": AutoScheduleMode.acceptIfFreeDeclineIfBusy,
+ }.get(
+ config.Scheduling.Options.AutoSchedule.DefaultMode,
+ "automatic"
+ )
- log.debug("ImplicitProcessing - recipient '%s' processing UID: '%s' - checking for auto-reply with mode: %s" % (self.recipient.cuaddr, self.uid, automode,))
+ log.debug("ImplicitProcessing - recipient '%s' processing UID: '%s' - checking for auto-reply with mode: %s" % (self.recipient.cuaddr, self.uid, automode.name,))
cuas = self.recipient.principal.calendarUserAddresses
@@ -704,13 +716,19 @@
partstat_counts = collections.defaultdict(int)
for instance in instances.instances.itervalues():
if instance.partstat == "NEEDS-ACTION" and instance.active:
- if automode == "accept-always":
+ if automode == AutoScheduleMode.accept:
freePartstat = busyPartstat = "ACCEPTED"
- elif automode == "decline-always":
+ elif automode == AutoScheduleMode.decline:
freePartstat = busyPartstat = "DECLINED"
else:
- freePartstat = "ACCEPTED" if automode in ("accept-if-free", "automatic",) else "NEEDS-ACTION"
- busyPartstat = "DECLINED" if automode in ("decline-if-busy", "automatic",) else "NEEDS-ACTION"
+ freePartstat = "ACCEPTED" if automode in (
+ AutoScheduleMode.acceptIfFree,
+ AutoScheduleMode.acceptIfFreeDeclineIfBusy,
+ ) else "NEEDS-ACTION"
+ busyPartstat = "DECLINED" if automode in (
+ AutoScheduleMode.declineIfBusy,
+ AutoScheduleMode.acceptIfFreeDeclineIfBusy,
+ ) else "NEEDS-ACTION"
instance.partstat = freePartstat if instance.free else busyPartstat
partstat_counts[instance.partstat] += 1
@@ -901,7 +919,7 @@
# We only need to fix data that already exists
if recipient_resource is not None:
- if originator_calendar.mainType() != None:
+ if originator_calendar.mainType() is not None:
yield self.writeCalendarResource(None, recipient_resource, originator_calendar)
else:
yield self.deleteCalendarResource(recipient_resource)
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/scheduler.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/scheduler.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/scheduler.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -162,7 +162,7 @@
"""
self.calendar = calendar
- self.preProcessCalendarData()
+ yield self.preProcessCalendarData()
if self.logItems is not None:
self.logItems["recipients"] = len(recipients)
@@ -550,7 +550,7 @@
results = []
for recipient in self.recipients:
# Get the principal resource for this recipient
- principal = self.txn.directoryService().recordWithCalendarUserAddress(recipient)
+ principal = yield self.txn.directoryService().recordWithCalendarUserAddress(recipient)
# If no principal we may have a remote recipient but we should check whether
# the address is one that ought to be on our server and treat that as a missing
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/work.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/work.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/scheduling/work.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -45,6 +45,8 @@
log = Logger()
+
+
class ScheduleWorkMixin(object):
"""
Base class for common schedule work item behavior.
@@ -189,7 +191,7 @@
try:
home = (yield self.transaction.calendarHomeWithResourceID(self.homeResourceID))
resource = (yield home.objectResourceWithID(self.resourceID))
- organizerPrincipal = home.directoryService().recordWithUID(home.uid())
+ organizerPrincipal = yield home.directoryService().recordWithUID(home.uid().decode("utf-8"))
organizer = organizerPrincipal.canonicalCalendarUserAddress()
calendar_old = Component.fromString(self.icalendarTextOld) if self.icalendarTextOld else None
calendar_new = Component.fromString(self.icalendarTextNew) if self.icalendarTextNew else None
@@ -311,7 +313,7 @@
try:
home = (yield self.transaction.calendarHomeWithResourceID(self.homeResourceID))
resource = (yield home.objectResourceWithID(self.resourceID))
- attendeePrincipal = home.directoryService().recordWithUID(home.uid())
+ attendeePrincipal = yield home.directoryService().recordWithUID(home.uid().decode("utf-8"))
attendee = attendeePrincipal.canonicalCalendarUserAddress()
calendar = (yield resource.componentForUser())
organizer = calendar.validOrganizerForScheduling()
@@ -336,6 +338,7 @@
self._dequeued()
except Exception, e:
+ # FIXME: calendar may not be set here!
log.debug("ScheduleReplyWork - exception ID: {id}, UID: '{uid}', {err}", id=self.workID, uid=calendar.resourceUID(), err=str(e))
raise
except:
@@ -381,7 +384,7 @@
try:
home = (yield self.transaction.calendarHomeWithResourceID(self.homeResourceID))
- attendeePrincipal = home.directoryService().recordWithUID(home.uid())
+ attendeePrincipal = yield home.directoryService().recordWithUID(home.uid().decode("utf-8"))
attendee = attendeePrincipal.canonicalCalendarUserAddress()
calendar = Component.fromString(self.icalendarText)
organizer = calendar.validOrganizerForScheduling()
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/sql.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/sql.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1915,14 +1915,14 @@
# Normalize the calendar user addresses once we know we have valid
# calendar data
- component.normalizeCalendarUserAddresses(normalizationLookup, self.directoryService().recordWithCalendarUserAddress)
+ yield component.normalizeCalendarUserAddresses(normalizationLookup, self.directoryService().recordWithCalendarUserAddress)
# Possible timezone stripping
if config.EnableTimezonesByReference:
component.stripKnownTimezones()
# Check location/resource organizer requirement
- self.validLocationResourceOrganizer(component, inserting, internal_state)
+ yield self.validLocationResourceOrganizer(component, inserting, internal_state)
# Check access
if config.EnablePrivateEvents:
@@ -1986,13 +1986,14 @@
raise TooManyAttendeesError("Attendee list size %d is larger than allowed limit %d" % (attendeeListLength, config.MaxAttendeesPerInstance))
+ @inlineCallbacks
def validLocationResourceOrganizer(self, component, inserting, internal_state):
"""
If the calendar owner is a location or resource, check whether an ORGANIZER property is required.
"""
if internal_state == ComponentUpdateState.NORMAL:
- originatorPrincipal = self.calendar().ownerHome().directoryRecord()
+ originatorPrincipal = yield self.calendar().ownerHome().directoryRecord()
cutype = originatorPrincipal.getCUType() if originatorPrincipal is not None else "INDIVIDUAL"
organizer = component.getOrganizer()
@@ -2009,10 +2010,10 @@
# Find current principal and update modified by details
if hasattr(self._txn, "_authz_uid"):
- authz = self.directoryService().recordWithUID(self._txn._authz_uid)
+ authz = yield self.directoryService().recordWithUID(self._txn._authz_uid.decode("utf-8"))
prop = Property("X-CALENDARSERVER-MODIFIED-BY", authz.canonicalCalendarUserAddress())
prop.setParameter("CN", authz.displayName())
- for candidate in authz.calendarUserAddresses:
+ for candidate in authz.calendarUserAddresses():
if candidate.startswith("mailto:"):
prop.setParameter("EMAIL", candidate[7:])
break
@@ -2108,7 +2109,7 @@
log.debug("Organizer and attendee properties were entirely removed by the client. Restoring existing properties.")
# Get the originator who is the owner of the calendar resource being modified
- originatorPrincipal = self.calendar().ownerHome().directoryRecord()
+ originatorPrincipal = yield self.calendar().ownerHome().directoryRecord()
originatorAddresses = originatorPrincipal.calendarUserAddresses
for component in calendar.subcomponents():
@@ -2145,7 +2146,7 @@
log.debug("Sync COMPLETED property change.")
# Get the originator who is the owner of the calendar resource being modified
- originatorPrincipal = self.calendar().ownerHome().directoryRecord()
+ originatorPrincipal = yield self.calendar().ownerHome().directoryRecord()
originatorAddresses = originatorPrincipal.calendarUserAddresses
for component in calendar.subcomponents():
@@ -2264,6 +2265,7 @@
self._componentChanged = True
+ @inlineCallbacks
def addStructuredLocation(self, component):
"""
Scan the component for ROOM attendees; if any are associated with an
@@ -2277,12 +2279,12 @@
value = attendee.value()
if value.startswith("urn:uuid:"):
guid = value[9:]
- loc = self.directoryService().recordWithGUID(guid)
+ loc = yield self.directoryService().recordWithGUID(guid)
if loc is not None:
guid = loc.extras.get("associatedAddress",
None)
if guid is not None:
- addr = self.directoryService().recordWithGUID(guid)
+ addr = yield self.directoryService().recordWithGUID(guid)
if addr is not None:
street = addr.extras.get("streetAddress", "")
geo = addr.extras.get("geo", "")
@@ -2539,7 +2541,7 @@
self.processAlarms(component, inserting)
# Process structured location
- self.addStructuredLocation(component)
+ yield self.addStructuredLocation(component)
# Do scheduling
implicit_result = (yield self.doImplicitScheduling(component, inserting, internal_state))
@@ -3760,9 +3762,9 @@
raise InvalidSplit()
# Cannot be attendee
- ownerPrincipal = self.calendar().ownerHome().directoryRecord()
+ ownerPrincipal = yield self.calendar().ownerHome().directoryRecord()
organizer = component.getOrganizer()
- organizerPrincipal = self.directoryService().recordWithCalendarUserAddress(organizer) if organizer else None
+ organizerPrincipal = (yield self.directoryService().recordWithCalendarUserAddress(organizer)) if organizer else None
if organizer is not None and organizerPrincipal.uid != ownerPrincipal.uid:
raise InvalidSplit()
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/test/attachments/accounts.xml
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/test/attachments/accounts.xml 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/test/attachments/accounts.xml 2014-03-13 20:40:10 UTC (rev 12899)
@@ -18,19 +18,19 @@
<!DOCTYPE accounts SYSTEM "../../../conf/auth/accounts.dtd">
-<accounts realm="/Search">
- <user>
+<directory realm="/Search">
+ <record type="user">
<uid>home1</uid>
- <guid>home1</guid>
+ <short-name>home1</short-name>
<password>home1</password>
- <name>Example User 1</name>
- <email-address>home1 at example.com</email-address>
- </user>
- <user>
+ <full-name>Example User 1</full-name>
+ <email>home1 at example.com</email>
+ </record>
+ <record type="user">
<uid>home2</uid>
- <guid>home2</guid>
+ <short-name>home2</short-name>
<password>home2</password>
- <name>Example User 2</name>
- <email-address>home2 at example.com</email-address>
- </user>
-</accounts>
+ <full-name>Example User 2</full-name>
+ <email>home2 at example.com</email>
+ </record>
+</directory>
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/test/test_attachments.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/test/test_attachments.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/test/test_attachments.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -14,7 +14,7 @@
# limitations under the License.
##
-from calendarserver.tap.util import directoryFromConfig
+from txdav.who.util import directoryFromConfig
from pycalendar.datetime import DateTime
from pycalendar.value import Value
@@ -49,7 +49,7 @@
"""
storePath = FilePath(__file__).parent().child("calendar_store")
-homeRoot = storePath.child("ho").child("me").child("home1")
+homeRoot = storePath.child("ho").child("me").child(u"home1")
cal1Root = homeRoot.child("calendar_1")
calendar1_objectNames = [
@@ -597,7 +597,7 @@
self.assertTrue(os.path.exists(apath))
- home = (yield self.transactionUnderTest().calendarHomeWithUID("home1"))
+ home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertNotEqual(quota, 0)
@@ -609,7 +609,7 @@
self.assertFalse(os.path.exists(apath))
- home = (yield self.transactionUnderTest().calendarHomeWithUID("home1"))
+ home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertEqual(quota, 0)
@@ -648,7 +648,7 @@
self.assertTrue(os.path.exists(apath1))
self.assertTrue(os.path.exists(apath2))
- home = (yield self.transactionUnderTest().calendarHomeWithUID("home1"))
+ home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertNotEqual(quota, 0)
@@ -661,7 +661,7 @@
self.assertFalse(os.path.exists(apath1))
self.assertFalse(os.path.exists(apath2))
- home = (yield self.transactionUnderTest().calendarHomeWithUID("home1"))
+ home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertEqual(quota, 0)
@@ -743,7 +743,7 @@
self.assertTrue(os.path.exists(apath))
- home = (yield self.transactionUnderTest().calendarHomeWithUID("home1"))
+ home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertNotEqual(quota, 0)
@@ -755,7 +755,7 @@
self.assertTrue(os.path.exists(apath))
- home = (yield self.transactionUnderTest().calendarHomeWithUID("home1"))
+ home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertNotEqual(quota, 0)
@@ -767,7 +767,7 @@
self.assertFalse(os.path.exists(apath))
- home = (yield self.transactionUnderTest().calendarHomeWithUID("home1"))
+ home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertEqual(quota, 0)
@@ -1131,7 +1131,7 @@
self.assertTrue(os.path.exists(apath))
- home = (yield self.transactionUnderTest().calendarHomeWithUID("home1"))
+ home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertNotEqual(quota, 0)
@@ -1143,7 +1143,7 @@
self.assertFalse(os.path.exists(apath))
- home = (yield self.transactionUnderTest().calendarHomeWithUID("home1"))
+ home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertEqual(quota, 0)
@@ -1178,7 +1178,7 @@
self.assertTrue(os.path.exists(apath1))
self.assertTrue(os.path.exists(apath2))
- home = (yield self.transactionUnderTest().calendarHomeWithUID("home1"))
+ home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertNotEqual(quota, 0)
@@ -1191,7 +1191,7 @@
self.assertFalse(os.path.exists(apath1))
self.assertFalse(os.path.exists(apath2))
- home = (yield self.transactionUnderTest().calendarHomeWithUID("home1"))
+ home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertEqual(quota, 0)
@@ -1218,7 +1218,7 @@
self.assertTrue(os.path.exists(apath))
- home = (yield self.transactionUnderTest().calendarHomeWithUID("home1"))
+ home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertNotEqual(quota, 0)
@@ -1230,7 +1230,7 @@
self.assertTrue(os.path.exists(apath))
- home = (yield self.transactionUnderTest().calendarHomeWithUID("home1"))
+ home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertNotEqual(quota, 0)
@@ -1242,7 +1242,7 @@
self.assertFalse(os.path.exists(apath))
- home = (yield self.transactionUnderTest().calendarHomeWithUID("home1"))
+ home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertEqual(quota, 0)
@@ -1363,7 +1363,7 @@
}
requirements = {
- "home1" : {
+ u"home1" : {
"calendar1" : {
"1.1.ics" : (PLAIN_ICS % {"year": now, "uid": "1.1", }, metadata,),
"1.2.ics" : (ATTACHMENT_ICS % {"year": now, "uid": "1.2", "userid": "user01", "dropboxid": "1.2"}, metadata,),
@@ -1372,7 +1372,7 @@
"1.5.ics" : (ATTACHMENT_ICS % {"year": now, "uid": "1.5", "userid": "user01", "dropboxid": "1.4"}, metadata,),
}
},
- "home2" : {
+ u"home2" : {
"calendar2" : {
"2-2.1.ics" : (PLAIN_ICS % {"year": now, "uid": "2-2.1", }, metadata,),
"2-2.2.ics" : (ATTACHMENT_ICS % {"year": now, "uid": "2-2.2", "userid": "user02", "dropboxid": "2.2"}, metadata,),
@@ -1488,16 +1488,16 @@
"""
Add the full set of attachments to be used for testing.
"""
- yield self._addAttachment("home1", "calendar1", "1.2.ics", "1.2", "attach_1_2_1.txt")
- yield self._addAttachment("home1", "calendar1", "1.2.ics", "1.2", "attach_1_2_2.txt")
- yield self._addAttachment("home1", "calendar1", "1.3.ics", "1.3", "attach_1_3.txt")
- yield self._addAttachment("home1", "calendar1", "1.4.ics", "1.4", "attach_1_4.txt")
- yield self._addAttachmentProperty("home1", "calendar1", "1.5.ics", "1.4", "home1", "attach_1_4.txt")
+ yield self._addAttachment(u"home1", "calendar1", "1.2.ics", "1.2", "attach_1_2_1.txt")
+ yield self._addAttachment(u"home1", "calendar1", "1.2.ics", "1.2", "attach_1_2_2.txt")
+ yield self._addAttachment(u"home1", "calendar1", "1.3.ics", "1.3", "attach_1_3.txt")
+ yield self._addAttachment(u"home1", "calendar1", "1.4.ics", "1.4", "attach_1_4.txt")
+ yield self._addAttachmentProperty(u"home1", "calendar1", "1.5.ics", "1.4", "home1", "attach_1_4.txt")
- yield self._addAttachment("home2", "calendar2", "2-2.2.ics", "2.2", "attach_2_2.txt")
- yield self._addAttachmentProperty("home2", "calendar2", "2-2.3.ics", "1.3", "home1", "attach_1_3.txt")
- yield self._addAttachmentProperty("home2", "calendar3", "2-3.2.ics", "1.4", "home1", "attach_1_4.txt")
- yield self._addAttachmentProperty("home2", "calendar3", "2-3.3.ics", "1.4", "home1", "attach_1_4.txt")
+ yield self._addAttachment(u"home2", "calendar2", "2-2.2.ics", "2.2", "attach_2_2.txt")
+ yield self._addAttachmentProperty(u"home2", "calendar2", "2-2.3.ics", "1.3", "home1", "attach_1_3.txt")
+ yield self._addAttachmentProperty(u"home2", "calendar3", "2-3.2.ics", "1.4", "home1", "attach_1_4.txt")
+ yield self._addAttachmentProperty(u"home2", "calendar3", "2-3.3.ics", "1.4", "home1", "attach_1_4.txt")
@inlineCallbacks
@@ -1586,7 +1586,7 @@
"""
Test L{txdav.caldav.datastore.sql.DropboxAttachment.convertToManaged} converts properly to a ManagedAttachment.
"""
- yield self._addAttachment("home1", "calendar1", "1.2.ics", "1.2", "attach_1_2.txt")
+ yield self._addAttachment(u"home1", "calendar1", "1.2.ics", "1.2", "attach_1_2.txt")
txn = self._sqlCalendarStore.newTransaction()
@@ -1616,11 +1616,11 @@
"""
Test L{txdav.caldav.datastore.sql.ManagedAttachment.newReference} creates a new managed attachment reference.
"""
- yield self._addAttachment("home1", "calendar1", "1.4.ics", "1.4", "attach_1_4.txt")
+ yield self._addAttachment(u"home1", "calendar1", "1.4.ics", "1.4", "attach_1_4.txt")
txn = self._sqlCalendarStore.newTransaction()
- home = (yield txn.calendarHomeWithUID("home1"))
+ home = (yield txn.calendarHomeWithUID(u"home1"))
calendar = (yield home.calendarWithName("calendar1"))
event4 = (yield calendar.calendarObjectWithName("1.4.ics"))
event5 = (yield calendar.calendarObjectWithName("1.5.ics"))
@@ -1664,12 +1664,12 @@
"""
Test L{txdav.caldav.datastore.sql.CalendarObject.convertAttachments} re-writes calendar data.
"""
- yield self._addAttachment("home1", "calendar1", "1.2.ics", "1.2", "attach_1_2_1.txt")
- yield self._addAttachment("home1", "calendar1", "1.2.ics", "1.2", "attach_1_2_2.txt")
+ yield self._addAttachment(u"home1", "calendar1", "1.2.ics", "1.2", "attach_1_2_1.txt")
+ yield self._addAttachment(u"home1", "calendar1", "1.2.ics", "1.2", "attach_1_2_2.txt")
txn = self._sqlCalendarStore.newTransaction()
- home = (yield txn.calendarHomeWithUID("home1"))
+ home = (yield txn.calendarHomeWithUID(u"home1"))
calendar = (yield home.calendarWithName("calendar1"))
event = (yield calendar.calendarObjectWithName("1.2.ics"))
@@ -1688,7 +1688,7 @@
txn = self._sqlCalendarStore.newTransaction()
- home = (yield txn.calendarHomeWithUID("home1"))
+ home = (yield txn.calendarHomeWithUID(u"home1"))
calendar = (yield home.calendarWithName("calendar1"))
event = (yield calendar.calendarObjectWithName("1.2.ics"))
@@ -1712,7 +1712,7 @@
# Convert the second dropbox attachment
txn = self._sqlCalendarStore.newTransaction()
- home = (yield txn.calendarHomeWithUID("home1"))
+ home = (yield txn.calendarHomeWithUID(u"home1"))
calendar = (yield home.calendarWithName("calendar1"))
event = (yield calendar.calendarObjectWithName("1.2.ics"))
dattachment = (yield DropBoxAttachment.load(txn, "1.2.dropbox", "attach_1_2_2.txt"))
@@ -1722,7 +1722,7 @@
yield txn.commit()
txn = self._sqlCalendarStore.newTransaction()
- home = (yield txn.calendarHomeWithUID("home1"))
+ home = (yield txn.calendarHomeWithUID(u"home1"))
calendar = (yield home.calendarWithName("calendar1"))
event = (yield calendar.calendarObjectWithName("1.2.ics"))
component = (yield event.componentForUser()).mainComponent()
@@ -1760,14 +1760,14 @@
yield calstore._upgradeDropbox(txn, "1.2.dropbox")
yield txn.commit()
- yield self._verifyConversion("home1", "calendar1", "1.2.ics", ("attach_1_2_1.txt", "attach_1_2_2.txt",))
- yield self._verifyNoConversion("home1", "calendar1", "1.3.ics", ("attach_1_3.txt",))
- yield self._verifyNoConversion("home1", "calendar1", "1.4.ics", ("attach_1_4.txt",))
- yield self._verifyNoConversion("home1", "calendar1", "1.5.ics", ("attach_1_4.txt",))
- yield self._verifyNoConversion("home2", "calendar2", "2-2.2.ics", ("attach_2_2.txt",))
- yield self._verifyNoConversion("home2", "calendar2", "2-2.3.ics", ("attach_1_3.txt",))
- yield self._verifyNoConversion("home2", "calendar3", "2-3.2.ics", ("attach_1_4.txt",))
- yield self._verifyNoConversion("home2", "calendar3", "2-3.3.ics", ("attach_1_4.txt",))
+ yield self._verifyConversion(u"home1", "calendar1", "1.2.ics", ("attach_1_2_1.txt", "attach_1_2_2.txt",))
+ yield self._verifyNoConversion(u"home1", "calendar1", "1.3.ics", ("attach_1_3.txt",))
+ yield self._verifyNoConversion(u"home1", "calendar1", "1.4.ics", ("attach_1_4.txt",))
+ yield self._verifyNoConversion(u"home1", "calendar1", "1.5.ics", ("attach_1_4.txt",))
+ yield self._verifyNoConversion(u"home2", "calendar2", "2-2.2.ics", ("attach_2_2.txt",))
+ yield self._verifyNoConversion(u"home2", "calendar2", "2-2.3.ics", ("attach_1_3.txt",))
+ yield self._verifyNoConversion(u"home2", "calendar3", "2-3.2.ics", ("attach_1_4.txt",))
+ yield self._verifyNoConversion(u"home2", "calendar3", "2-3.3.ics", ("attach_1_4.txt",))
@inlineCallbacks
@@ -1784,14 +1784,14 @@
yield calstore._upgradeDropbox(txn, "1.3.dropbox")
yield txn.commit()
- yield self._verifyNoConversion("home1", "calendar1", "1.2.ics", ("attach_1_2_1.txt", "attach_1_2_2.txt",))
- yield self._verifyConversion("home1", "calendar1", "1.3.ics", ("attach_1_3.txt",))
- yield self._verifyNoConversion("home1", "calendar1", "1.4.ics", ("attach_1_4.txt",))
- yield self._verifyNoConversion("home1", "calendar1", "1.5.ics", ("attach_1_4.txt",))
- yield self._verifyNoConversion("home2", "calendar2", "2-2.2.ics", ("attach_2_2.txt",))
- yield self._verifyConversion("home2", "calendar2", "2-2.3.ics", ("attach_1_3.txt",))
- yield self._verifyNoConversion("home2", "calendar3", "2-3.2.ics", ("attach_1_4.txt",))
- yield self._verifyNoConversion("home2", "calendar3", "2-3.3.ics", ("attach_1_4.txt",))
+ yield self._verifyNoConversion(u"home1", "calendar1", "1.2.ics", ("attach_1_2_1.txt", "attach_1_2_2.txt",))
+ yield self._verifyConversion(u"home1", "calendar1", "1.3.ics", ("attach_1_3.txt",))
+ yield self._verifyNoConversion(u"home1", "calendar1", "1.4.ics", ("attach_1_4.txt",))
+ yield self._verifyNoConversion(u"home1", "calendar1", "1.5.ics", ("attach_1_4.txt",))
+ yield self._verifyNoConversion(u"home2", "calendar2", "2-2.2.ics", ("attach_2_2.txt",))
+ yield self._verifyConversion(u"home2", "calendar2", "2-2.3.ics", ("attach_1_3.txt",))
+ yield self._verifyNoConversion(u"home2", "calendar3", "2-3.2.ics", ("attach_1_4.txt",))
+ yield self._verifyNoConversion(u"home2", "calendar3", "2-3.3.ics", ("attach_1_4.txt",))
@inlineCallbacks
@@ -1808,14 +1808,14 @@
yield calstore._upgradeDropbox(txn, "1.4.dropbox")
yield txn.commit()
- yield self._verifyNoConversion("home1", "calendar1", "1.2.ics", ("attach_1_2_1.txt", "attach_1_2_2.txt",))
- yield self._verifyNoConversion("home1", "calendar1", "1.3.ics", ("attach_1_3.txt",))
- yield self._verifyConversion("home1", "calendar1", "1.4.ics", ("attach_1_4.txt",))
- yield self._verifyConversion("home1", "calendar1", "1.5.ics", ("attach_1_4.txt",))
- yield self._verifyNoConversion("home2", "calendar2", "2-2.2.ics", ("attach_2_2.txt",))
- yield self._verifyNoConversion("home2", "calendar2", "2-2.3.ics", ("attach_1_3.txt",))
- yield self._verifyConversion("home2", "calendar3", "2-3.2.ics", ("attach_1_4.txt",))
- yield self._verifyConversion("home2", "calendar3", "2-3.3.ics", ("attach_1_4.txt",))
+ yield self._verifyNoConversion(u"home1", "calendar1", "1.2.ics", ("attach_1_2_1.txt", "attach_1_2_2.txt",))
+ yield self._verifyNoConversion(u"home1", "calendar1", "1.3.ics", ("attach_1_3.txt",))
+ yield self._verifyConversion(u"home1", "calendar1", "1.4.ics", ("attach_1_4.txt",))
+ yield self._verifyConversion(u"home1", "calendar1", "1.5.ics", ("attach_1_4.txt",))
+ yield self._verifyNoConversion(u"home2", "calendar2", "2-2.2.ics", ("attach_2_2.txt",))
+ yield self._verifyNoConversion(u"home2", "calendar2", "2-2.3.ics", ("attach_1_3.txt",))
+ yield self._verifyConversion(u"home2", "calendar3", "2-3.2.ics", ("attach_1_4.txt",))
+ yield self._verifyConversion(u"home2", "calendar3", "2-3.3.ics", ("attach_1_4.txt",))
@inlineCallbacks
@@ -1830,14 +1830,14 @@
calstore = CalendarStoreFeatures(self._sqlCalendarStore)
yield calstore.upgradeToManagedAttachments(2)
- yield self._verifyConversion("home1", "calendar1", "1.2.ics", ("attach_1_2_1.txt", "attach_1_2_2.txt",))
- yield self._verifyConversion("home1", "calendar1", "1.3.ics", ("attach_1_3.txt",))
- yield self._verifyConversion("home1", "calendar1", "1.4.ics", ("attach_1_4.txt",))
- yield self._verifyConversion("home1", "calendar1", "1.5.ics", ("attach_1_4.txt",))
- yield self._verifyConversion("home2", "calendar2", "2-2.2.ics", ("attach_2_2.txt",))
- yield self._verifyConversion("home2", "calendar2", "2-2.3.ics", ("attach_1_3.txt",))
- yield self._verifyConversion("home2", "calendar3", "2-3.2.ics", ("attach_1_4.txt",))
- yield self._verifyConversion("home2", "calendar3", "2-3.3.ics", ("attach_1_4.txt",))
+ yield self._verifyConversion(u"home1", "calendar1", "1.2.ics", ("attach_1_2_1.txt", "attach_1_2_2.txt",))
+ yield self._verifyConversion(u"home1", "calendar1", "1.3.ics", ("attach_1_3.txt",))
+ yield self._verifyConversion(u"home1", "calendar1", "1.4.ics", ("attach_1_4.txt",))
+ yield self._verifyConversion(u"home1", "calendar1", "1.5.ics", ("attach_1_4.txt",))
+ yield self._verifyConversion(u"home2", "calendar2", "2-2.2.ics", ("attach_2_2.txt",))
+ yield self._verifyConversion(u"home2", "calendar2", "2-2.3.ics", ("attach_1_3.txt",))
+ yield self._verifyConversion(u"home2", "calendar3", "2-3.2.ics", ("attach_1_4.txt",))
+ yield self._verifyConversion(u"home2", "calendar3", "2-3.3.ics", ("attach_1_4.txt",))
# Paths do not exist
for path in self.paths.values():
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/test/util.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/test/util.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/test/util.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -16,7 +16,7 @@
##
from twisted.trial.unittest import TestCase
from twext.python.clsprop import classproperty
-from twisted.internet.defer import inlineCallbacks
+from twisted.internet.defer import inlineCallbacks, succeed
"""
Store test utility functions
@@ -40,7 +40,7 @@
def recordWithCalendarUserAddress(self, cuaddr):
- return self.recordsByCUA.get(cuaddr)
+ return succeed(self.recordsByCUA.get(cuaddr))
def addRecord(self, record):
@@ -117,7 +117,7 @@
def isProxyFor(self, other):
- return False
+ return succeed(False)
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/util.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/util.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/datastore/util.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -100,6 +100,7 @@
+ at inlineCallbacks
def normalizationLookup(cuaddr, recordFunction, config):
"""
Lookup function to be passed to ical.normalizeCalendarUserAddresses.
@@ -108,23 +109,25 @@
record for the cuaddr.
"""
try:
- record = recordFunction(cuaddr)
+ record = yield recordFunction(cuaddr)
except Exception, e:
log.debug("Lookup of %s failed: %s" % (cuaddr, e))
record = None
if record is None:
- return (None, None, None)
+ returnValue((None, None, None))
else:
# RFC5545 syntax does not allow backslash escaping in
# parameter values. A double-quote is thus not allowed
# in a parameter value except as the start/end delimiters.
# Single quotes are allowed, so we convert any double-quotes
# to single-quotes.
- return (
- record.fullName.replace('"', "'"),
- record.uid,
- record.calendarUserAddresses,
+ returnValue(
+ (
+ record.displayName.replace('"', "'"),
+ record.uid,
+ record.calendarUserAddresses,
+ )
)
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/icalendardirectoryservice.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/icalendardirectoryservice.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/caldav/icalendardirectoryservice.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -37,8 +37,8 @@
"""
Return the record for the specified calendar user address.
- @return: the record.
- @rtype: L{ICalendarStoreDirectoryRecord}
+ @return: Deferred resulting in the record.
+ @rtype: L{Deferred} resulting in L{ICalendarStoreDirectoryRecord}
"""
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/file.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/file.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/file.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -700,7 +700,7 @@
def directoryRecord(self):
- return self.directoryService().recordWithUID(self.uid())
+ return self.directoryService().recordWithUID(self.uid().decode("utf-8"))
def retrieveOldShares(self):
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/podding/conduit.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/podding/conduit.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/podding/conduit.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -76,32 +76,33 @@
self.store = store
- def validRequst(self, source_guid, destination_guid):
+ @inlineCallbacks
+ def validRequest(self, source_uid, destination_uid):
"""
- Verify that the specified GUIDs are valid for the request and return the
+ Verify that the specified uids are valid for the request and return the
matching directory records.
- @param source_guid: GUID for the user on whose behalf the request is being made
- @type source_guid: C{str}
- @param destination_guid: GUID for the user to whom the request is being sent
- @type destination_guid: C{str}
+ @param source_uid: UID for the user on whose behalf the request is being made
+ @type source_uid: C{str}
+ @param destination_uid: UID for the user to whom the request is being sent
+ @type destination_uid: C{str}
- @return: C{tuple} of L{IStoreDirectoryRecord}
+ @return: L{Deferred} resulting in C{tuple} of L{IStoreDirectoryRecord}
"""
- source = self.store.directoryService().recordWithUID(source_guid)
+ source = yield self.store.directoryService().recordWithUID(source_uid)
if source is None:
- raise DirectoryRecordNotFoundError("Cross-pod source: {}".format(source_guid))
+ raise DirectoryRecordNotFoundError("Cross-pod source: {}".format(source_uid))
if not source.thisServer():
- raise FailedCrossPodRequestError("Cross-pod source not on this server: {}".format(source_guid))
+ raise FailedCrossPodRequestError("Cross-pod source not on this server: {}".format(source_uid))
- destination = self.store.directoryService().recordWithUID(destination_guid)
+ destination = yield self.store.directoryService().recordWithUID(destination_uid)
if destination is None:
- raise DirectoryRecordNotFoundError("Cross-pod destination: {}".format(destination_guid))
+ raise DirectoryRecordNotFoundError("Cross-pod destination: {}".format(destination_uid))
if destination.thisServer():
- raise FailedCrossPodRequestError("Cross-pod destination on this server: {}".format(destination_guid))
+ raise FailedCrossPodRequestError("Cross-pod destination on this server: {}".format(destination_uid))
- return (source, destination,)
+ returnValue((source, destination,))
@inlineCallbacks
@@ -166,13 +167,13 @@
@param homeType: Type of home being shared.
@type homeType: C{int}
- @param ownerUID: GUID of the sharer.
+ @param ownerUID: UID of the sharer.
@type ownerUID: C{str}
@param ownerID: resource ID of the sharer calendar
@type ownerID: C{int}
@param ownerName: owner's name of the sharer calendar
@type ownerName: C{str}
- @param shareeUID: GUID of the sharee
+ @param shareeUID: UID of the sharee
@type shareeUID: C{str}
@param shareUID: Resource/invite ID for sharee
@type shareUID: C{str}
@@ -186,7 +187,7 @@
@type supported_components: C{str}
"""
- _ignore_sender, recipient = self.validRequst(ownerUID, shareeUID)
+ _ignore_sender, recipient = yield self.validRequest(ownerUID, shareeUID)
action = {
"action": "shareinvite",
@@ -250,17 +251,17 @@
@param homeType: Type of home being shared.
@type homeType: C{int}
- @param ownerUID: GUID of the sharer.
+ @param ownerUID: UID of the sharer.
@type ownerUID: C{str}
@param ownerID: resource ID of the sharer calendar
@type ownerID: C{int}
- @param shareeUID: GUID of the sharee
+ @param shareeUID: UID of the sharee
@type shareeUID: C{str}
@param shareUID: Resource/invite ID for sharee
@type shareUID: C{str}
"""
- _ignore_sender, recipient = self.validRequst(ownerUID, shareeUID)
+ _ignore_sender, recipient = yield self.validRequest(ownerUID, shareeUID)
action = {
"action": "shareuninvite",
@@ -313,9 +314,9 @@
@param homeType: Type of home being shared.
@type homeType: C{int}
- @param ownerUID: GUID of the sharer.
+ @param ownerUID: UID of the sharer.
@type ownerUID: C{str}
- @param shareeUID: GUID of the recipient
+ @param shareeUID: UID of the recipient
@type shareeUID: C{str}
@param shareUID: Resource/invite ID for recipient
@type shareUID: C{str}
@@ -325,7 +326,7 @@
@type summary: C{str}
"""
- _ignore_sender, recipient = self.validRequst(shareeUID, ownerUID)
+ _ignore_sender, recipient = yield self.validRequest(shareeUID, ownerUID)
action = {
"action": "sharereply",
@@ -398,7 +399,7 @@
actionName = "add-attachment"
shareeView = objectResource._parentCollection
- action, recipient = self._send(actionName, shareeView, objectResource)
+ action, recipient = yield self._send(actionName, shareeView, objectResource)
action["rids"] = rids
action["filename"] = filename
result = yield self.sendRequest(shareeView._txn, recipient, action, stream, content_type)
@@ -458,7 +459,7 @@
actionName = "update-attachment"
shareeView = objectResource._parentCollection
- action, recipient = self._send(actionName, shareeView, objectResource)
+ action, recipient = yield self._send(actionName, shareeView, objectResource)
action["managedID"] = managed_id
action["filename"] = filename
result = yield self.sendRequest(shareeView._txn, recipient, action, stream, content_type)
@@ -514,7 +515,7 @@
actionName = "remove-attachment"
shareeView = objectResource._parentCollection
- action, recipient = self._send(actionName, shareeView, objectResource)
+ action, recipient = yield self._send(actionName, shareeView, objectResource)
action["rids"] = rids
action["managedID"] = managed_id
result = yield self.sendRequest(shareeView._txn, recipient, action)
@@ -557,6 +558,7 @@
# Sharer data access related apis
#
+ @inlineCallbacks
def _send(self, action, parent, child=None):
"""
Base behavior for an operation on a L{CommonHomeChild}.
@@ -570,7 +572,7 @@
ownerID = parent.external_id()
shareeUID = parent.viewerHome().uid()
- _ignore_sender, recipient = self.validRequst(shareeUID, ownerUID)
+ _ignore_sender, recipient = yield self.validRequest(shareeUID, ownerUID)
result = {
"action": action,
@@ -581,7 +583,7 @@
}
if child is not None:
result["resource_id"] = child.id()
- return result, recipient
+ returnValue((result, recipient))
@inlineCallbacks
@@ -644,7 +646,7 @@
@type kwargs: C{dict}
"""
- action, recipient = self._send(actionName, shareeView, objectResource)
+ action, recipient = yield self._send(actionName, shareeView, objectResource)
if args is not None:
action["arguments"] = args
if kwargs is not None:
@@ -710,7 +712,7 @@
servertoserver,
event_details,
):
- action, recipient = self._send("freebusy", calresource)
+ action, recipient = yield self._send("freebusy", calresource)
action["timerange"] = [timerange.start.getText(), timerange.end.getText()]
action["matchtotal"] = matchtotal
action["excludeuid"] = excludeuid
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/podding/resource.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/podding/resource.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/podding/resource.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -179,11 +179,13 @@
davxml.Privilege(davxml.Read()),
)
- return davxml.ACL(
- # DAV:Read for all principals (includes anonymous)
- davxml.ACE(
- davxml.Principal(davxml.All()),
- davxml.Grant(*privs),
- davxml.Protected(),
- ),
+ return succeed(
+ davxml.ACL(
+ # DAV:Read for all principals (includes anonymous)
+ davxml.ACE(
+ davxml.Principal(davxml.All()),
+ davxml.Grant(*privs),
+ davxml.Protected(),
+ ),
+ )
)
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/podding/test/test_conduit.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/podding/test/test_conduit.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/podding/test/test_conduit.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -123,29 +123,41 @@
}
- def test_validRequst(self):
+ @inlineCallbacks
+ def test_validRequest(self):
"""
Cross-pod request fails when there is no shared secret header present.
"""
conduit = PoddingConduit(self.storeUnderTest())
- r1, r2 = conduit.validRequst("user01", "puser02")
+ r1, r2 = yield conduit.validRequest("user01", "puser02")
self.assertTrue(r1 is not None)
self.assertTrue(r2 is not None)
- self.assertRaises(DirectoryRecordNotFoundError, conduit.validRequst, "bogus01", "user02")
- self.assertRaises(DirectoryRecordNotFoundError, conduit.validRequst, "user01", "bogus02")
- self.assertRaises(FailedCrossPodRequestError, conduit.validRequst, "user01", "user02")
+ self.assertFailure(
+ conduit.validRequest("bogus01", "user02"),
+ DirectoryRecordNotFoundError
+ )
+ self.assertFailure(
+ conduit.validRequest("user01", "bogus02"),
+ DirectoryRecordNotFoundError
+ )
+ self.assertFailure(
+ conduit.validRequest("user01", "user02"),
+ FailedCrossPodRequestError
+ )
+
+
class TestConduitToConduit(MultiStoreConduitTest):
class FakeConduit(PoddingConduit):
@inlineCallbacks
def send_fake(self, txn, ownerUID, shareeUID):
- _ignore_owner, sharee = self.validRequst(ownerUID, shareeUID)
+ _ignore_owner, sharee = yield self.validRequest(ownerUID, shareeUID)
action = {
"action": "fake",
"echo": "bravo"
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/sql.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/sql.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -211,6 +211,10 @@
return self._directoryService
+ def setDirectoryService(self, directoryService):
+ self._directoryService = directoryService
+
+
def callWithNewTransactions(self, callback):
"""
Registers a method to be called whenever a new transaction is
@@ -922,102 +926,164 @@
@classproperty
def _addGroupQuery(cls):
gr = schema.GROUPS
- return Insert({gr.NAME: Parameter("name"),
- gr.GROUP_GUID: Parameter("groupGUID"),
- gr.MEMBERSHIP_HASH: Parameter("membershipHash")},
- Return=gr.GROUP_ID)
+ return Insert(
+ {
+ gr.NAME: Parameter("name"),
+ gr.GROUP_GUID: Parameter("groupUID"),
+ gr.MEMBERSHIP_HASH: Parameter("membershipHash")
+ },
+ Return=gr.GROUP_ID
+ )
@classproperty
def _updateGroupQuery(cls):
gr = schema.GROUPS
- return Update({gr.MEMBERSHIP_HASH: Parameter("membershipHash"),
- gr.NAME: Parameter("name"), gr.MODIFIED: Parameter("timestamp")},
- Where=(gr.GROUP_GUID == Parameter("groupGUID")))
+ return Update(
+ {
+ gr.MEMBERSHIP_HASH: Parameter("membershipHash"),
+ gr.NAME: Parameter("name"),
+ gr.MODIFIED:
+ Parameter("timestamp")
+ },
+ Where=(gr.GROUP_GUID == Parameter("groupUID"))
+ )
@classproperty
- def _groupByGUID(cls):
+ def _groupByUID(cls):
gr = schema.GROUPS
- return Select([gr.GROUP_ID, gr.NAME, gr.MEMBERSHIP_HASH], From=gr,
- Where=(
- gr.GROUP_GUID == Parameter("groupGUID")
- )
- )
+ return Select(
+ [gr.GROUP_ID, gr.NAME, gr.MEMBERSHIP_HASH],
+ From=gr,
+ Where=(gr.GROUP_GUID == Parameter("groupUID"))
+ )
@classproperty
def _groupByID(cls):
gr = schema.GROUPS
- return Select([gr.GROUP_GUID, gr.NAME, gr.MEMBERSHIP_HASH], From=gr,
- Where=(
- gr.GROUP_ID == Parameter("groupID")
- )
- )
+ return Select(
+ [gr.GROUP_GUID, gr.NAME, gr.MEMBERSHIP_HASH],
+ From=gr,
+ Where=(gr.GROUP_ID == Parameter("groupID"))
+ )
@classproperty
def _deleteGroup(cls):
gr = schema.GROUPS
- return Delete(From=gr,
- Where=(gr.GROUP_ID == Parameter("groupID")))
+ return Delete(
+ From=gr,
+ Where=(gr.GROUP_ID == Parameter("groupID"))
+ )
- def addGroup(self, groupGUID, name, membershipHash):
+ def addGroup(self, groupUID, name, membershipHash):
"""
- @type groupGUID: C{UUID}
+ @type groupUID: C{unicode}
+ @type name: C{unicode}
+ @type membershipHash: C{str}
"""
- return self._addGroupQuery.on(self, name=name,
- groupGUID=str(groupGUID), membershipHash=membershipHash)
+ return self._addGroupQuery.on(
+ self,
+ name=name.encode("utf-8"),
+ groupUID=groupUID.encode("utf-8"),
+ membershipHash=membershipHash
+ )
- def updateGroup(self, groupGUID, name, membershipHash):
+ def updateGroup(self, groupUID, name, membershipHash):
"""
- @type groupGUID: C{UUID}
+ @type groupUID: C{unicode}
+ @type name: C{unicode}
+ @type membershipHash: C{str}
"""
timestamp = datetime.datetime.utcnow()
- return self._updateGroupQuery.on(self, name=name,
- groupGUID=str(groupGUID), timestamp=timestamp,
- membershipHash=membershipHash)
+ return self._updateGroupQuery.on(
+ self,
+ name=name.encode("utf-8"),
+ groupUID=groupUID.encode("utf-8"),
+ timestamp=timestamp,
+ membershipHash=membershipHash
+ )
@inlineCallbacks
- def groupByGUID(self, groupGUID):
+ def groupByUID(self, groupUID):
"""
- @type groupGUID: C{UUID}
+ Return or create a record for the group UID.
+
+ @type groupUID: C{unicode}
+
+ @return: Deferred firing with tuple of group ID C{str}, group name
+ C{unicode}, and membership hash C{str}
"""
- results = (yield self._groupByGUID.on(self, groupGUID=str(groupGUID)))
+ results = (
+ yield self._groupByUID.on(
+ self, groupUID=groupUID.encode("utf-8")
+ )
+ )
if results:
- returnValue(results[0])
+ returnValue((
+ results[0][0], # group id
+ results[0][1].decode("utf-8"), # name
+ results[0][2], # membership hash
+ ))
else:
- savepoint = SavepointAction("groupByGUID")
+ savepoint = SavepointAction("groupByUID")
yield savepoint.acquire(self)
try:
- yield self.addGroup(groupGUID, "", "")
+ yield self.addGroup(groupUID, u"", "")
except Exception:
yield savepoint.rollback(self)
- results = (yield self._groupByGUID.on(self,
- groupGUID=str(groupGUID)))
+ results = (
+ yield self._groupByUID.on(
+ self, groupUID=groupUID.encode("utf-8")
+ )
+ )
if results:
- returnValue(results[0])
+ returnValue((
+ results[0][0], # group id
+ results[0][1].decode("utf-8"), # name
+ results[0][2], # membership hash
+ ))
else:
raise
else:
yield savepoint.release(self)
- results = (yield self._groupByGUID.on(self,
- groupGUID=str(groupGUID)))
+ results = (
+ yield self._groupByUID.on(
+ self, groupUID=groupUID.encode("utf-8")
+ )
+ )
if results:
- returnValue(results[0])
+ returnValue((
+ results[0][0], # group id
+ results[0][1].decode("utf-8"), # name
+ results[0][2], # membership hash
+ ))
else:
raise
@inlineCallbacks
def groupByID(self, groupID):
+ """
+ Given a group ID, return the group UID, or raise NotFoundError
+
+ @type groupID: C{str}
+ @return: Deferred firing with a tuple of group UID C{unicode},
+ group name C{unicode}, and membership hash C{str}
+ """
try:
results = (yield self._groupByID.on(self, groupID=groupID))[0]
if results:
- results = [UUID("urn:uuid:" + results[0])] + results[1:]
+ results = (
+ results[0].decode("utf-8"),
+ results[1].decode("utf-8"),
+ results[2]
+ )
returnValue(results)
except IndexError:
raise NotFoundError
@@ -1037,7 +1103,7 @@
return Insert(
{
gm.GROUP_ID: Parameter("groupID"),
- gm.MEMBER_GUID: Parameter("memberGUID")
+ gm.MEMBER_GUID: Parameter("memberUID")
}
)
@@ -1050,7 +1116,7 @@
Where=(
gm.GROUP_ID == Parameter("groupID")
).And(
- gm.MEMBER_GUID == Parameter("memberGUID")
+ gm.MEMBER_GUID == Parameter("memberUID")
)
)
@@ -1069,25 +1135,35 @@
@classproperty
def _selectGroupsForQuery(cls):
+ gr = schema.GROUPS
gm = schema.GROUP_MEMBERSHIP
+
return Select(
- [gm.GROUP_ID],
- From=gm,
+ [gr.GROUP_GUID],
+ From=gr,
Where=(
- gm.MEMBER_GUID == Parameter("guid")
+ gr.GROUP_ID.In(
+ Select(
+ [gm.GROUP_ID],
+ From=gm,
+ Where=(
+ gm.MEMBER_GUID == Parameter("uid")
+ )
+ )
+ )
)
)
- def addMemberToGroup(self, memberGUID, groupID):
+ def addMemberToGroup(self, memberUID, groupID):
return self._addMemberToGroupQuery.on(
- self, groupID=groupID, memberGUID=str(memberGUID)
+ self, groupID=groupID, memberUID=memberUID.encode("utf-8")
)
- def removeMemberFromGroup(self, memberGUID, groupID):
+ def removeMemberFromGroup(self, memberUID, groupID):
return self._removeMemberFromGroupQuery.on(
- self, groupID=groupID, memberGUID=str(memberGUID)
+ self, groupID=groupID, memberUID=memberUID.encode("utf-8")
)
@@ -1107,25 +1183,29 @@
members = set()
results = (yield self._selectGroupMembersQuery.on(self, groupID=groupID))
for row in results:
- members.add(UUID("urn:uuid:" + row[0]))
+ members.add(row[0].decode("utf-8"))
returnValue(members)
@inlineCallbacks
- def groupsFor(self, guid):
+ def groupsFor(self, uid):
"""
- Returns the cached set of GUIDs for the groups this given guid is
+ Returns the cached set of UIDs for the groups this given uid is
a member of.
- @param guid: the guid
- @type guid: C{UUID}
+ @param uid: the uid
+ @type uid: C{unicode}
@return: the set of group IDs
@rtype: a Deferred which fires with a set() of C{int} group IDs
"""
groups = set()
- results = (yield self._selectGroupsForQuery.on(self, guid=str(guid)))
+ results = (
+ yield self._selectGroupsForQuery.on(
+ self, uid=uid.encode("utf-8")
+ )
+ )
for row in results:
- groups.add(row[0])
+ groups.add(row[0].decode("utf-8"))
returnValue(groups)
# End of Group Members
@@ -1168,6 +1248,19 @@
@classproperty
+ def _removeDelegatesQuery(cls):
+ de = schema.DELEGATES
+ return Delete(
+ From=de,
+ Where=(
+ de.DELEGATOR == Parameter("delegator")
+ ).And(
+ de.READ_WRITE == Parameter("readWrite")
+ )
+ )
+
+
+ @classproperty
def _removeDelegateGroupQuery(cls):
ds = schema.DELEGATE_GROUPS
return Delete(
@@ -1183,6 +1276,19 @@
@classproperty
+ def _removeDelegateGroupsQuery(cls):
+ ds = schema.DELEGATE_GROUPS
+ return Delete(
+ From=ds,
+ Where=(
+ ds.DELEGATOR == Parameter("delegator")
+ ).And(
+ ds.READ_WRITE == Parameter("readWrite")
+ )
+ )
+
+
+ @classproperty
def _selectDelegatesQuery(cls):
de = schema.DELEGATES
return Select(
@@ -1199,13 +1305,23 @@
@classproperty
def _selectDelegateGroupsQuery(cls):
ds = schema.DELEGATE_GROUPS
+ gr = schema.GROUPS
+
return Select(
- [ds.GROUP_ID],
- From=ds,
+ [gr.GROUP_GUID],
+ From=gr,
Where=(
- ds.DELEGATOR == Parameter("delegator")
- ).And(
- ds.READ_WRITE == Parameter("readWrite")
+ gr.GROUP_ID.In(
+ Select(
+ [ds.GROUP_ID],
+ From=ds,
+ Where=(
+ ds.DELEGATOR == Parameter("delegator")
+ ).And(
+ ds.READ_WRITE == Parameter("readWrite")
+ )
+ )
+ )
)
)
@@ -1317,18 +1433,18 @@
Adds a row to the DELEGATES table. The delegate should not be a
group. To delegate to a group, call addDelegateGroup() instead.
- @param delegator: the GUID of the delegator
- @type delegator: C{UUID}
- @param delegate: the GUID of the delegate
- @type delegate: C{UUID}
+ @param delegator: the UID of the delegator
+ @type delegator: C{unicode}
+ @param delegate: the UID of the delegate
+ @type delegate: C{unicode}
@param readWrite: grant read and write access if True, otherwise
read-only access
@type readWrite: C{boolean}
"""
return self._addDelegateQuery.on(
self,
- delegator=str(delegator),
- delegate=str(delegate),
+ delegator=delegator.encode("utf-8"),
+ delegate=delegate.encode("utf-8"),
readWrite=1 if readWrite else 0
)
@@ -1339,8 +1455,8 @@
Adds a row to the DELEGATE_GROUPS table. The delegate should be a
group. To delegate to a person, call addDelegate() instead.
- @param delegator: the GUID of the delegator
- @type delegator: C{UUID}
+ @param delegator: the UID of the delegator
+ @type delegator: C{unicode}
@param delegateGroupID: the GROUP_ID of the delegate group
@type delegateGroupID: C{int}
@param readWrite: grant read and write access if True, otherwise
@@ -1349,7 +1465,7 @@
"""
return self._addDelegateGroupQuery.on(
self,
- delegator=str(delegator),
+ delegator=delegator.encode("utf-8"),
groupID=delegateGroupID,
readWrite=1 if readWrite else 0,
isExternal=1 if isExternal else 0
@@ -1361,29 +1477,47 @@
Removes a row from the DELEGATES table. The delegate should not be a
group. To remove a delegate group, call removeDelegateGroup() instead.
- @param delegator: the GUID of the delegator
- @type delegator: C{UUID}
- @param delegate: the GUID of the delegate
- @type delegate: C{UUID}
+ @param delegator: the UID of the delegator
+ @type delegator: C{unicode}
+ @param delegate: the UID of the delegate
+ @type delegate: C{unicode}
@param readWrite: remove read and write access if True, otherwise
read-only access
@type readWrite: C{boolean}
"""
return self._removeDelegateQuery.on(
self,
- delegator=str(delegator),
- delegate=str(delegate),
+ delegator=delegator.encode("utf-8"),
+ delegate=delegate.encode("utf-8"),
readWrite=1 if readWrite else 0
)
+ def removeDelegates(self, delegator, readWrite):
+ """
+ Removes all rows for this delegator/readWrite combination from the
+ DELEGATES table.
+
+ @param delegator: the UID of the delegator
+ @type delegator: C{unicode}
+ @param readWrite: remove read and write access if True, otherwise
+ read-only access
+ @type readWrite: C{boolean}
+ """
+ return self._removeDelegatesQuery.on(
+ self,
+ delegator=delegator.encode("utf-8"),
+ readWrite=1 if readWrite else 0
+ )
+
+
def removeDelegateGroup(self, delegator, delegateGroupID, readWrite):
"""
Removes a row from the DELEGATE_GROUPS table. The delegate should be a
group. To remove a delegate person, call removeDelegate() instead.
- @param delegator: the GUID of the delegator
- @type delegator: C{UUID}
+ @param delegator: the UID of the delegator
+ @type delegator: C{unicode}
@param delegateGroupID: the GROUP_ID of the delegate group
@type delegateGroupID: C{int}
@param readWrite: remove read and write access if True, otherwise
@@ -1392,26 +1526,44 @@
"""
return self._removeDelegateGroupQuery.on(
self,
- delegator=str(delegator),
+ delegator=delegator.encode("utf-8"),
groupID=delegateGroupID,
readWrite=1 if readWrite else 0
)
+ def removeDelegateGroups(self, delegator, readWrite):
+ """
+ Removes all rows for this delegator/readWrite combination from the
+ DELEGATE_GROUPS table.
+
+ @param delegator: the UID of the delegator
+ @type delegator: C{unicode}
+ @param readWrite: remove read and write access if True, otherwise
+ read-only access
+ @type readWrite: C{boolean}
+ """
+ return self._removeDelegateGroupsQuery.on(
+ self,
+ delegator=delegator.encode("utf-8"),
+ readWrite=1 if readWrite else 0
+ )
+
+
@inlineCallbacks
- def delegates(self, delegator, readWrite):
+ def delegates(self, delegator, readWrite, expanded=False):
"""
- Returns the GUIDs of all delegates for the given delegator. If
- delegate access was granted to any groups, those groups' members
- (flattened) will be included. No GUIDs of the groups themselves
- will be returned.
+ Returns the UIDs of all delegates for the given delegator. If
+ expanded is False, only the direct delegates (users and groups)
+ are returned. If expanded is True, the expanded membmership is
+ returned, not including the groups themselves.
- @param delegator: the GUID of the delegator
- @type delegator: C{UUID}
+ @param delegator: the UID of the delegator
+ @type delegator: C{unicode}
@param readWrite: the access-type to check for; read and write
access if True, otherwise read-only access
@type readWrite: C{boolean}
- @returns: the GUIDs of the delegates (for the specified access
+ @returns: the UIDs of the delegates (for the specified access
type)
@rtype: a Deferred resulting in a set
"""
@@ -1421,35 +1573,48 @@
results = (
yield self._selectDelegatesQuery.on(
self,
- delegator=str(delegator),
+ delegator=delegator.encode("utf-8"),
readWrite=1 if readWrite else 0
)
)
for row in results:
- delegates.add(UUID("urn:uuid:" + row[0]))
+ delegates.add(row[0].decode("utf-8"))
- # Finally get those who are in groups which have been delegated to
- results = (
- yield self._selectIndirectDelegatesQuery.on(
- self,
- delegator=str(delegator),
- readWrite=1 if readWrite else 0
+ if expanded:
+ # Get those who are in groups which have been delegated to
+ results = (
+ yield self._selectIndirectDelegatesQuery.on(
+ self,
+ delegator=delegator.encode("utf-8"),
+ readWrite=1 if readWrite else 0
+ )
)
- )
- for row in results:
- delegates.add(UUID("urn:uuid:" + row[0]))
+ for row in results:
+ delegates.add(row[0].decode("utf-8"))
+ else:
+ # Get the directly-delegated-to groups
+ results = (
+ yield self._selectDelegateGroupsQuery.on(
+ self,
+ delegator=delegator.encode("utf-8"),
+ readWrite=1 if readWrite else 0
+ )
+ )
+ for row in results:
+ delegates.add(row[0].decode("utf-8"))
+
returnValue(delegates)
@inlineCallbacks
def delegators(self, delegate, readWrite):
"""
- Returns the GUIDs of all delegators which have granted access to
+ Returns the UIDs of all delegators which have granted access to
the given delegate, either directly or indirectly via groups.
- @param delegate: the GUID of the delegate
- @type delegate: C{UUID}
+ @param delegate: the UID of the delegate
+ @type delegate: C{unicode}
@param readWrite: the access-type to check for; read and write
access if True, otherwise read-only access
@type readWrite: C{boolean}
@@ -1463,24 +1628,24 @@
results = (
yield self._selectDirectDelegatorsQuery.on(
self,
- delegate=str(delegate),
+ delegate=delegate.encode("utf-8"),
readWrite=1 if readWrite else 0
)
)
for row in results:
- delegators.add(UUID("urn:uuid:" + row[0]))
+ delegators.add(row[0].decode("utf-8"))
# Finally get those who have delegated to groups the delegate
# is a member of
results = (
yield self._selectIndirectDelegatorsQuery.on(
self,
- delegate=str(delegate),
+ delegate=delegate.encode("utf-8"),
readWrite=1 if readWrite else 0
)
)
for row in results:
- delegators.add(UUID("urn:uuid:" + row[0]))
+ delegators.add(row[0].decode("utf-8"))
returnValue(delegators)
@@ -1488,11 +1653,11 @@
@inlineCallbacks
def allGroupDelegates(self):
"""
- Return the GUIDs of all groups which have been delegated to. Useful
+ Return the UIDs of all groups which have been delegated to. Useful
for obtaining the set of groups which need to be synchronized from
the directory.
- @returns: the GUIDs of all delegated-to groups
+ @returns: the UIDs of all delegated-to groups
@rtype: a Deferred resulting in a set
"""
gr = schema.GROUPS
@@ -1505,7 +1670,7 @@
).on(self))
delegates = set()
for row in results:
- delegates.add(UUID("urn:uuid:" + row[0]))
+ delegates.add(row[0].decode("utf-8"))
returnValue(delegates)
@@ -1513,22 +1678,22 @@
@inlineCallbacks
def externalDelegates(self):
"""
- Returns a dictionary mapping delegate GUIDs to (read-group, write-group)
+ Returns a dictionary mapping delegate UIDs to (read-group, write-group)
tuples, including only those assignments that originated from the
directory.
- @returns: dictionary mapping delegator guid to (readDelegateGUID,
- writeDelegateGUID) tuples
+ @returns: dictionary mapping delegator uid to (readDelegateUID,
+ writeDelegateUID) tuples
@rtype: a Deferred resulting in a dictionary
"""
delegates = {}
# Get the externally managed delegates (which are all groups)
results = (yield self._selectExternalDelegateGroupsQuery.on(self))
- for delegator, readDelegateGUID, writeDelegateGUID in results:
- delegates[UUID(delegator)] = (
- UUID(readDelegateGUID) if readDelegateGUID else None,
- UUID(writeDelegateGUID) if writeDelegateGUID else None
+ for delegator, readDelegateUID, writeDelegateUID in results:
+ delegates[delegator.encode("utf-8")] = (
+ readDelegateUID.encode("utf-8") if readDelegateUID else None,
+ writeDelegateUID.encode("utf-8") if writeDelegateUID else None
)
returnValue(delegates)
@@ -1537,7 +1702,7 @@
@inlineCallbacks
def assignExternalDelegates(
self, delegator, readDelegateGroupID, writeDelegateGroupID,
- readDelegateGUID, writeDelegateGUID
+ readDelegateUID, writeDelegateUID
):
"""
Update the external delegate group table so we can quickly identify
@@ -1560,12 +1725,12 @@
)
# Store new assignments in the external comparison table
- if readDelegateGUID or writeDelegateGUID:
+ if readDelegateUID or writeDelegateUID:
readDelegateForDB = (
- str(readDelegateGUID) if readDelegateGUID else ""
+ readDelegateUID.encode("utf-8") if readDelegateUID else ""
)
writeDelegateForDB = (
- str(writeDelegateGUID) if writeDelegateGUID else ""
+ writeDelegateUID.encode("utf-8") if writeDelegateUID else ""
)
yield self._storeExternalDelegateGroupsPairQuery.on(
self,
@@ -2742,6 +2907,9 @@
@classmethod
@inlineCallbacks
def homeWithUID(cls, txn, uid, create=False):
+ """
+ @param uid: I'm going to assume uid is utf-8 encoded bytes
+ """
homeObject = yield cls.makeClass(txn, uid)
if homeObject is not None:
returnValue(homeObject)
@@ -2750,7 +2918,7 @@
returnValue(None)
# Determine if the user is local or external
- record = txn.directoryService().recordWithUID(uid)
+ record = yield txn.directoryService().recordWithUID(uid.decode("utf-8"))
if record is None:
raise DirectoryRecordNotFoundError("Cannot create home for UID since no directory record exists: {}".format(uid))
@@ -2844,7 +3012,7 @@
def directoryRecord(self):
- return self.directoryService().recordWithUID(self.uid())
+ return self.directoryService().recordWithUID(self.uid().decode("utf-8"))
@inlineCallbacks
@@ -6744,6 +6912,9 @@
@classmethod
@inlineCallbacks
def notificationsWithUID(cls, txn, uid, create):
+ """
+ @param uid: I'm going to assume uid is utf-8 encoded bytes
+ """
rows = yield cls._resourceIDFromUIDQuery.on(txn, uid=uid)
if rows:
@@ -6751,7 +6922,7 @@
created = False
elif create:
# Determine if the user is local or external
- record = txn.directoryService().recordWithUID(uid)
+ record = yield txn.directoryService().recordWithUID(uid.decode("utf-8"))
if record is None:
raise DirectoryRecordNotFoundError("Cannot create home for UID since no directory record exists: {}".format(uid))
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/test/util.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/test/util.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/common/datastore/test/util.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -46,7 +46,7 @@
from twisted.application.service import Service
from twisted.internet import reactor
from twisted.internet.defer import Deferred, inlineCallbacks
-from twisted.internet.defer import returnValue
+from twisted.internet.defer import returnValue, succeed
from twisted.internet.task import deferLater
from twisted.trial.unittest import TestCase
@@ -110,14 +110,14 @@
def recordWithUID(self, uid):
- return self.records.get(uid)
+ return succeed(self.records.get(uid))
def recordWithGUID(self, guid):
for record in self.records.itervalues():
if record.guid == guid:
- return record
- return None
+ return succeed(record)
+ return succeed(None)
def addRecord(self, record):
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/dps/client.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/dps/client.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/dps/client.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -15,53 +15,92 @@
##
import cPickle as pickle
+import uuid
from twext.python.log import Logger
from twext.who.directory import DirectoryRecord as BaseDirectoryRecord
from twext.who.directory import DirectoryService as BaseDirectoryService
-from twext.who.idirectory import RecordType
+from twext.who.expression import Operand
+from twext.who.idirectory import RecordType, IDirectoryService
import twext.who.idirectory
from twext.who.util import ConstantsContainer
from twisted.internet import reactor
-from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twisted.internet.protocol import ClientCreator
from twisted.protocols import amp
+from twisted.python.constants import Names, NamedConstant
+from txdav.caldav.icalendardirectoryservice import ICalendarStoreDirectoryRecord
+from txdav.common.idirectoryservice import IStoreDirectoryService
from txdav.dps.commands import (
RecordWithShortNameCommand, RecordWithUIDCommand, RecordWithGUIDCommand,
RecordsWithRecordTypeCommand, RecordsWithEmailAddressCommand,
+ RecordsMatchingTokensCommand, RecordsMatchingFieldsCommand,
+ MembersCommand, GroupsCommand, SetMembersCommand,
VerifyPlaintextPasswordCommand, VerifyHTTPDigestCommand
)
+from txdav.who.directory import (
+ CalendarDirectoryRecordMixin, CalendarDirectoryServiceMixin
+)
+import txdav.who.delegates
import txdav.who.idirectory
from zope.interface import implementer
-
log = Logger()
-
##
## Client implementation of Directory Proxy Service
##
- at implementer(twext.who.idirectory.IDirectoryService)
-class DirectoryService(BaseDirectoryService):
+
+## MOVE2WHO TODOs:
+## augmented service
+## configuration of aggregate services
+## hooking up delegates
+## calverify needs deferreds, including:
+## component.normalizeCalendarUserAddresses
+
+ at implementer(IDirectoryService, IStoreDirectoryService)
+class DirectoryService(BaseDirectoryService, CalendarDirectoryServiceMixin):
"""
Client side of directory proxy
"""
recordType = ConstantsContainer(
(twext.who.idirectory.RecordType,
- txdav.who.idirectory.RecordType)
+ txdav.who.idirectory.RecordType,
+ txdav.who.delegates.RecordType)
)
+ fieldName = ConstantsContainer(
+ (twext.who.idirectory.FieldName,
+ txdav.who.idirectory.FieldName)
+ )
+
+ # MOVE2WHO: we talked about passing these in instead:
+ # def __init__(self, fieldNames, recordTypes):
+ # self.fieldName = fieldNames
+ # self.recordType = recordTypes
+
+
+ # MOVE2WHO needed?
+ def getGroups(self, guids=None):
+ return succeed(set())
+ # END MOVE2WHO
+
+
+
def _dictToRecord(self, serializedFields):
"""
- This to be replaced by something awesome
+ Turn a dictionary of fields sent from the server into a directory
+ record
"""
if not serializedFields:
return None
+ # print("FIELDS", serializedFields)
+
fields = {}
for fieldName, value in serializedFields.iteritems():
try:
@@ -70,8 +109,21 @@
# unknown field
pass
else:
- fields[field] = value
- fields[self.fieldName.recordType] = self.recordType.user
+ valueType = self.fieldName.valueType(field)
+ if valueType in (unicode, bool):
+ fields[field] = value
+ elif valueType is uuid.UUID:
+ fields[field] = uuid.UUID(value)
+ elif issubclass(valueType, Names):
+ if value is not None:
+ fields[field] = field.valueType.lookupByName(value)
+ else:
+ fields[field] = None
+ elif issubclass(valueType, NamedConstant):
+ if fieldName == "recordType": # Is there a better way?
+ fields[field] = self.recordType.lookupByName(value)
+
+ # print("AFTER:", fields)
return DirectoryRecord(self, fields)
@@ -127,15 +179,31 @@
def recordWithShortName(self, recordType, shortName):
+ # MOVE2WHO
+ # temporary hack until we can fix all callers not to pass strings:
+ if isinstance(recordType, (str, unicode)):
+ recordType = self.recordType.lookupByName(recordType)
+
+ # MOVE2WHO, REMOVE THIS HACK TOO:
+ if not isinstance(shortName, unicode):
+ log.warn("Need to change shortName to unicode")
+ shortName = shortName.decode("utf-8")
+
+
return self._call(
RecordWithShortNameCommand,
self._processSingleRecord,
- recordType=recordType.description.encode("utf-8"),
+ recordType=recordType.name.encode("utf-8"),
shortName=shortName.encode("utf-8")
)
def recordWithUID(self, uid):
+ # MOVE2WHO, REMOVE THIS:
+ if not isinstance(uid, unicode):
+ log.warn("Need to change uid to unicode")
+ uid = uid.decode("utf-8")
+
return self._call(
RecordWithUIDCommand,
self._processSingleRecord,
@@ -147,7 +215,7 @@
return self._call(
RecordWithGUIDCommand,
self._processSingleRecord,
- guid=guid.encode("utf-8")
+ guid=str(guid)
)
@@ -155,10 +223,12 @@
return self._call(
RecordsWithRecordTypeCommand,
self._processMultipleRecords,
- recordType=recordType.description.encode("utf-8")
+ recordType=recordType.name.encode("utf-8")
)
+ listRecords = recordsWithRecordType
+
def recordsWithEmailAddress(self, emailAddress):
return self._call(
RecordsWithEmailAddressCommand,
@@ -167,9 +237,47 @@
)
+ def recordsMatchingTokens(self, tokens, context=None, limitResults=50,
+ timeoutSeconds=10):
+ return self._call(
+ RecordsMatchingTokensCommand,
+ self._processMultipleRecords,
+ tokens=[t.encode("utf-8") for t in tokens],
+ context=context
+ )
-class DirectoryRecord(BaseDirectoryRecord):
+ def recordsMatchingFields(self, fields, operand=Operand.OR, recordType=None):
+
+ newFields = []
+ for fieldName, searchTerm, matchFlags, matchType in fields:
+ newFields.append(
+ (
+ fieldName.encode("utf-8"),
+ searchTerm.encode("utf-8"),
+ matchFlags.name.encode("utf-8"),
+ matchType.name.encode("utf-8")
+ )
+ )
+ if recordType is not None:
+ recordType = recordType.name.encode("utf-8")
+
+ return self._call(
+ RecordsMatchingFieldsCommand,
+ self._processMultipleRecords,
+ fields=newFields,
+ operand=operand.name.encode("utf-8"),
+ recordType=recordType
+ )
+
+
+
+
+
+ at implementer(ICalendarStoreDirectoryRecord)
+class DirectoryRecord(BaseDirectoryRecord, CalendarDirectoryRecordMixin):
+
+
def verifyPlaintextPassword(self, password):
return self.service._call(
VerifyPlaintextPasswordCommand,
@@ -201,6 +309,50 @@
+ def members(self):
+ return self.service._call(
+ MembersCommand,
+ self.service._processMultipleRecords,
+ uid=self.uid.encode("utf-8")
+ )
+
+
+ def groups(self):
+ return self.service._call(
+ GroupsCommand,
+ self.service._processMultipleRecords,
+ uid=self.uid.encode("utf-8")
+ )
+
+
+ def setMembers(self, members):
+ log.debug("DPS Client setMembers")
+ memberUIDs = [m.uid.encode("utf-8") for m in members]
+ return self.service._call(
+ SetMembersCommand,
+ lambda x: x['success'],
+ uid=self.uid.encode("utf-8"),
+ memberUIDs=memberUIDs
+ )
+
+
+ # For scheduling/freebusy
+ # FIXME: doesn't this need to happen in the DPS?
+ @inlineCallbacks
+ def isProxyFor(self, other):
+ for recordType in (
+ txdav.who.delegates.RecordType.readDelegatorGroup,
+ txdav.who.delegates.RecordType.writeDelegatorGroup,
+ ):
+ delegatorGroup = yield self.service.recordWithShortName(
+ recordType, self.uid
+ )
+ if delegatorGroup:
+ if other in (yield delegatorGroup.members()):
+ returnValue(True)
+
+
+
# Test client:
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/dps/commands.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/dps/commands.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/dps/commands.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -67,7 +67,27 @@
]
+class RecordsMatchingTokensCommand(amp.Command):
+ arguments = [
+ ('tokens', amp.ListOf(amp.String())),
+ ('context', amp.String(optional=True)),
+ ]
+ response = [
+ ('fieldsList', amp.String()),
+ ]
+
+class RecordsMatchingFieldsCommand(amp.Command):
+ arguments = [
+ ('fields', amp.ListOf(amp.ListOf(amp.String()))),
+ ('operand', amp.String()),
+ ('recordType', amp.String(optional=True)),
+ ]
+ response = [
+ ('fieldsList', amp.String()),
+ ]
+
+
class UpdateRecordsCommand(amp.Command):
arguments = [
('fieldsList', amp.String()),
@@ -89,6 +109,37 @@
+class MembersCommand(amp.Command):
+ arguments = [
+ ('uid', amp.String()),
+ ]
+ response = [
+ ('fieldsList', amp.String()),
+ ]
+
+
+
+class GroupsCommand(amp.Command):
+ arguments = [
+ ('uid', amp.String()),
+ ]
+ response = [
+ ('fieldsList', amp.String()),
+ ]
+
+
+
+class SetMembersCommand(amp.Command):
+ arguments = [
+ ('uid', amp.String()),
+ ('memberUIDs', amp.ListOf(amp.String())),
+ ]
+ response = [
+ ('success', amp.Boolean()),
+ ]
+
+
+
class VerifyPlaintextPasswordCommand(amp.Command):
arguments = [
('uid', amp.String()),
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/dps/server.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/dps/server.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/dps/server.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -15,10 +15,10 @@
##
import cPickle as pickle
-import os
import uuid
from twext.python.log import Logger
+from twext.who.expression import MatchType, MatchFlags, Operand
from twext.who.idirectory import RecordType
from twisted.application import service
from twisted.application.strports import service as strPortsService
@@ -26,21 +26,22 @@
from twisted.internet.protocol import Factory
from twisted.plugin import IPlugin
from twisted.protocols import amp
-from twisted.python.filepath import FilePath
+from twisted.python.constants import Names, NamedConstant
from twisted.python.usage import Options, UsageError
from twistedcaldav.config import config
from twistedcaldav.stdconfig import DEFAULT_CONFIG, DEFAULT_CONFIG_FILE
from txdav.dps.commands import (
RecordWithShortNameCommand, RecordWithUIDCommand, RecordWithGUIDCommand,
RecordsWithRecordTypeCommand, RecordsWithEmailAddressCommand,
+ RecordsMatchingTokensCommand, RecordsMatchingFieldsCommand,
+ MembersCommand, GroupsCommand, SetMembersCommand,
VerifyPlaintextPasswordCommand, VerifyHTTPDigestCommand,
# UpdateRecordsCommand, RemoveRecordsCommand
)
-from twext.who.ldap import DirectoryService as LDAPDirectoryService
-from txdav.who.xml import DirectoryService as XMLDirectoryService
+from txdav.who.util import directoryFromConfig
from zope.interface import implementer
-from twisted.cred.credentials import UsernamePassword
+
log = Logger()
@@ -63,15 +64,21 @@
def recordToDict(self, record):
"""
- This to be replaced by something awesome
+ Turn a record in a dictionary of fields which can be reconstituted
+ within the client
"""
fields = {}
if record is not None:
for field, value in record.fields.iteritems():
- # print("%s: %s" % (field.name, value))
- valueType = self._directory.fieldName.valueType(field)
- if valueType is unicode:
+ valueType = record.service.fieldName.valueType(field)
+ # print("%s: %s (%s)" % (field.name, value, valueType))
+ if valueType in (unicode, bool):
fields[field.name] = value
+ elif valueType is uuid.UUID:
+ fields[field.name] = str(value)
+ elif issubclass(valueType, (Names, NamedConstant)):
+ fields[field.name] = value.name if value else None
+ # print("Server side fields", fields)
return fields
@@ -82,7 +89,7 @@
shortName = shortName.decode("utf-8")
log.debug("RecordWithShortName: {r} {n}", r=recordType, n=shortName)
record = (yield self._directory.recordWithShortName(
- RecordType.lookupByName(recordType), shortName)
+ self._directory.recordType.lookupByName(recordType), shortName)
)
fields = self.recordToDict(record)
response = {
@@ -158,6 +165,124 @@
returnValue(response)
+ @RecordsMatchingTokensCommand.responder
+ @inlineCallbacks
+ def recordsMatchingTokens(self, tokens, context=None):
+ tokens = [t.decode("utf-8") for t in tokens]
+ log.debug("RecordsMatchingTokens: {t}", t=(", ".join(tokens)))
+ records = yield self._directory.recordsMatchingTokens(
+ tokens, context=context
+ )
+ fieldsList = []
+ for record in records:
+ fieldsList.append(self.recordToDict(record))
+ response = {
+ "fieldsList": pickle.dumps(fieldsList),
+ }
+ log.debug("Responding with: {response}", response=response)
+ returnValue(response)
+
+
+ @RecordsMatchingFieldsCommand.responder
+ @inlineCallbacks
+ def recordsMatchingFields(self, fields, operand="OR", recordType=None):
+ log.debug("RecordsMatchingFields")
+ newFields = []
+ for fieldName, searchTerm, matchFlags, matchType in fields:
+ fieldName = fieldName.decode("utf-8")
+ searchTerm = searchTerm.decode("utf-8")
+ matchFlags = MatchFlags.lookupByName(matchFlags.decode("utf-8"))
+ matchType = MatchType.lookupByName(matchType.decode("utf-8"))
+ newFields.append((fieldName, searchTerm, matchFlags, matchType))
+ operand = Operand.lookupByName(operand)
+ if recordType:
+ recordType = RecordType.lookupByName(recordType)
+ records = yield self._directory.recordsMatchingFields(
+ newFields, operand=operand, recordType=recordType
+ )
+ fieldsList = []
+ for record in records:
+ fieldsList.append(self.recordToDict(record))
+ response = {
+ "fieldsList": pickle.dumps(fieldsList),
+ }
+ log.debug("Responding with: {response}", response=response)
+ returnValue(response)
+
+
+ @MembersCommand.responder
+ @inlineCallbacks
+ def members(self, uid):
+ uid = uid.decode("utf-8")
+ log.debug("Members: {u}", u=uid)
+ try:
+ record = (yield self._directory.recordWithUID(uid))
+ except Exception as e:
+ log.error("Failed in members", error=e)
+ record = None
+
+ fieldsList = []
+ if record is not None:
+ for member in (yield record.members()):
+ fieldsList.append(self.recordToDict(member))
+ response = {
+ "fieldsList": pickle.dumps(fieldsList),
+ }
+ log.debug("Responding with: {response}", response=response)
+ returnValue(response)
+
+
+ @SetMembersCommand.responder
+ @inlineCallbacks
+ def setMembers(self, uid, memberUIDs):
+ uid = uid.decode("utf-8")
+ memberUIDs = [m.decode("utf-8") for m in memberUIDs]
+ log.debug("Set Members: {u} -> {m}", u=uid, m=memberUIDs)
+ try:
+ record = (yield self._directory.recordWithUID(uid))
+ except Exception as e:
+ log.error("Failed in setMembers", error=e)
+ record = None
+
+ if record is not None:
+ memberRecords = []
+ for memberUID in memberUIDs:
+ memberRecord = yield self._directory.recordWithUID(memberUID)
+ if memberRecord is not None:
+ memberRecords.append(memberRecord)
+ yield record.setMembers(memberRecords)
+ success = True
+ else:
+ success = False
+
+ response = {
+ "success": success,
+ }
+ log.debug("Responding with: {response}", response=response)
+ returnValue(response)
+
+
+ @GroupsCommand.responder
+ @inlineCallbacks
+ def groups(self, uid):
+ uid = uid.decode("utf-8")
+ log.debug("Groups: {u}", u=uid)
+ try:
+ record = (yield self._directory.recordWithUID(uid))
+ except Exception as e:
+ log.error("Failed in groups", error=e)
+ record = None
+
+ fieldsList = []
+ for group in (yield record.groups()):
+ fieldsList.append(self.recordToDict(group))
+ response = {
+ "fieldsList": pickle.dumps(fieldsList),
+ }
+ log.debug("Responding with: {response}", response=response)
+ returnValue(response)
+
+
@VerifyPlaintextPasswordCommand.responder
@inlineCallbacks
def verifyPlaintextPassword(self, uid, password):
@@ -333,36 +458,17 @@
else:
setproctitle("CalendarServer Directory Proxy Service")
- directoryType = config.DirectoryProxy.DirectoryType
- args = config.DirectoryProxy.Arguments
- kwds = config.DirectoryProxy.Keywords
+ try:
+ directory = directoryFromConfig(config)
+ except Exception as e:
+ log.error("Failed to create directory service", error=e)
+ raise
- if directoryType == "OD":
- from twext.who.opendirectory import DirectoryService as ODDirectoryService
- directory = ODDirectoryService(*args, **kwds)
+ log.info("Created directory service")
- elif directoryType == "LDAP":
- authDN = kwds.pop("authDN", "")
- password = kwds.pop("password", "")
- if authDN and password:
- creds = UsernamePassword(authDN, password)
- else:
- creds = None
- kwds["credentials"] = creds
- debug = kwds.pop("debug", "")
- directory = LDAPDirectoryService(*args, _debug=debug, **kwds)
-
- elif directoryType == "XML":
- path = kwds.pop("path", "")
- if not path or not os.path.exists(path):
- log.error("Path not found for XML directory: {p}", p=path)
- fp = FilePath(path)
- directory = XMLDirectoryService(fp, *args, **kwds)
-
- else:
- log.error("Invalid DirectoryType: {dt}", dt=directoryType)
-
- desc = "unix:{path}:mode=660".format(
- path=config.DirectoryProxy.SocketPath
+ return strPortsService(
+ "unix:{path}:mode=660".format(
+ path=config.DirectoryProxy.SocketPath
+ ),
+ DirectoryProxyAMPFactory(directory)
)
- return strPortsService(desc, DirectoryProxyAMPFactory(directory))
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/dps/test/test.xml
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/dps/test/test.xml 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/dps/test/test.xml 2014-03-13 20:40:10 UTC (rev 12899)
@@ -23,6 +23,7 @@
<record type="user">
<uid>__sagen__</uid>
+ <guid>B3B1158F-0564-4F5B-81E4-A89EA5FF81B0</guid>
<short-name>sagen</short-name>
<full-name>Morgen Sagen</full-name>
<password>negas</password>
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/dps/test/test_client.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/dps/test/test_client.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/dps/test/test_client.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -25,17 +25,34 @@
from twisted.trial import unittest
from txdav.dps.client import DirectoryService
from txdav.dps.server import DirectoryProxyAMPProtocol
+from txdav.who.directory import CalendarDirectoryServiceMixin
testMode = "xml" # "xml" or "od"
if testMode == "xml":
from txdav.who.xml import DirectoryService as XMLDirectoryService
+
+ # Mix in the calendar-specific service methods
+ class CalendarXMLDirectorySerivce(
+ XMLDirectoryService,
+ CalendarDirectoryServiceMixin
+ ):
+ pass
+
elif testMode == "od":
odpw = "secret"
from twext.who.opendirectory import DirectoryService as OpenDirectoryService
+ # Mix in the calendar-specific service methods
+ class CalendarODDirectorySerivce(
+ OpenDirectoryService,
+ CalendarDirectoryServiceMixin
+ ):
+ pass
+
+
class DPSClientTest(unittest.TestCase):
def setUp(self):
@@ -46,9 +63,9 @@
# The "remote" directory service
if testMode == "xml":
path = os.path.join(os.path.dirname(__file__), "test.xml")
- remoteDirectory = XMLDirectoryService(FilePath(path))
+ remoteDirectory = CalendarXMLDirectorySerivce(FilePath(path))
elif testMode == "od":
- remoteDirectory = OpenDirectoryService()
+ remoteDirectory = CalendarODDirectorySerivce()
# Connect the two services directly via an IOPump
client = AMP()
@@ -112,6 +129,18 @@
@inlineCallbacks
+ def test_recordsMatchingTokens(self):
+ records = (yield self.directory.recordsMatchingTokens(
+ [u"anche"]
+ ))
+ self.assertEquals(len(records), 2)
+ self.assertEquals(
+ set([u"__dre__", u"__wsanchez__"]),
+ set([r.uid for r in records])
+ )
+
+
+ @inlineCallbacks
def test_verifyPlaintextPassword(self):
if testMode == "xml":
expectations = (
Copied: CalendarServer/branches/users/sagen/move2who-3/txdav/who/augment.py (from rev 12898, CalendarServer/branches/users/sagen/move2who-2/txdav/who/augment.py)
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/who/augment.py (rev 0)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/who/augment.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -0,0 +1,291 @@
+# -*- test-case-name: txdav.who.test.test_augment -*-
+##
+# Copyright (c) 2013 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+"""
+Augmenting Directory Service
+"""
+
+from twext.python.log import Logger
+from twext.who.directory import DirectoryRecord
+from twext.who.directory import DirectoryService as BaseDirectoryService
+from twext.who.idirectory import IDirectoryService, RecordType
+from twext.who.util import ConstantsContainer
+from twisted.internet.defer import inlineCallbacks, returnValue
+from txdav.common.idirectoryservice import IStoreDirectoryService
+from txdav.who.directory import (
+ CalendarDirectoryRecordMixin, CalendarDirectoryServiceMixin
+)
+from txdav.who.idirectory import AutoScheduleMode, FieldName
+from zope.interface import implementer
+
+
+log = Logger()
+
+
+class AugmentedDirectoryRecord(DirectoryRecord, CalendarDirectoryRecordMixin):
+
+ def __init__(self, service, baseRecord, augmentedFields):
+ DirectoryRecord.__init__(self, service, augmentedFields)
+ self._baseRecord = baseRecord
+
+
+ @inlineCallbacks
+ def members(self):
+ augmented = []
+ records = yield self._baseRecord.members()
+ for record in records:
+ augmented.append((yield self.service.augment(record)))
+ returnValue(augmented)
+
+
+ @inlineCallbacks
+ def groups(self):
+ augmented = []
+ txn = self.service._store.newTransaction()
+ groupUIDs = yield txn.groupsFor(self.uid)
+ for groupUID in groupUIDs:
+ groupRecord = yield self.service.recordWithShortName(
+ RecordType.group, groupUID
+ )
+ if groupRecord:
+ augmented.append((yield self.service.augment(groupRecord)))
+ returnValue(augmented)
+
+
+ at implementer(IDirectoryService, IStoreDirectoryService)
+class AugmentedDirectoryService(BaseDirectoryService,
+ CalendarDirectoryServiceMixin):
+
+ fieldName = ConstantsContainer((
+ BaseDirectoryService.fieldName,
+ FieldName,
+ ))
+
+
+ @property
+ def recordType(self):
+ """
+ Defer to the directory service we're augmenting
+ """
+ return self._directory.recordType
+
+
+ def __init__(self, directory, store, augmentDB):
+ BaseDirectoryService.__init__(self, directory.realmName)
+ self._directory = directory
+ self._store = store
+ self._augmentDB = augmentDB
+
+
+ def recordTypes(self):
+ return self._directory.recordTypes()
+
+
+ @inlineCallbacks
+ def recordsFromExpression(self, expression):
+ records = yield self._directory.recordsFromExpression(expression)
+ augmented = []
+ for record in records:
+ record = yield self.augment(record)
+ augmented.append(record)
+ returnValue(augmented)
+
+
+ @inlineCallbacks
+ def recordsWithFieldValue(self, fieldName, value):
+ records = yield self._directory.recordsWithFieldValue(
+ fieldName, value
+ )
+ augmented = []
+ for record in records:
+ record = yield self.augment(record)
+ augmented.append(record)
+ returnValue(augmented)
+
+
+ @inlineCallbacks
+ def recordWithUID(self, uid):
+ # MOVE2WHO, REMOVE THIS:
+ if not isinstance(uid, unicode):
+ log.warn("Need to change uid to unicode")
+ uid = uid.decode("utf-8")
+
+ record = yield self._directory.recordWithUID(uid)
+ record = yield self.augment(record)
+ returnValue(record)
+
+
+ @inlineCallbacks
+ def recordWithGUID(self, guid):
+ record = yield self._directory.recordWithGUID(guid)
+ record = yield self.augment(record)
+ returnValue(record)
+
+
+ @inlineCallbacks
+ def recordsWithRecordType(self, recordType):
+ records = yield self._directory.recordsWithRecordType(recordType)
+ augmented = []
+ for record in records:
+ record = yield self.augment(record)
+ augmented.append(record)
+ returnValue(augmented)
+
+
+ @inlineCallbacks
+ def recordWithShortName(self, recordType, shortName):
+ # log.debug(
+ # "Augment - recordWithShortName {rt}, {n}",
+ # rt=recordType.name,
+ # n=shortName
+ # )
+ # MOVE2WHO, REMOVE THIS:
+ if not isinstance(shortName, unicode):
+ log.warn("Need to change shortName to unicode")
+ shortName = shortName.decode("utf-8")
+
+ record = yield self._directory.recordWithShortName(recordType, shortName)
+ record = yield self.augment(record)
+ # log.debug(
+ # "Augment - recordWithShortName {rt}, {n} returned {r}, {u}",
+ # rt=recordType.name,
+ # n=shortName,
+ # r=record.recordType.name,
+ # u=record.uid
+ # )
+ returnValue(record)
+
+
+ @inlineCallbacks
+ def recordsWithEmailAddress(self, emailAddress):
+ # MOVE2WHO, REMOVE THIS:
+ if not isinstance(emailAddress, unicode):
+ log.warn("Need to change emailAddress to unicode")
+ emailAddress = emailAddress.decode("utf-8")
+
+ records = yield self._directory.recordsWithEmailAddress(emailAddress)
+ augmented = []
+ for record in records:
+ record = yield self.augment(record)
+ augmented.append(record)
+ returnValue(augmented)
+
+
+ @inlineCallbacks
+ def listRecords(self, recordType):
+ records = yield self._directory.listRecords(recordType)
+ augmented = []
+ for record in records:
+ record = yield self.augment(record)
+ augmented.append(record)
+ returnValue(augmented)
+
+
+ @inlineCallbacks
+ def updateRecords(self, records, create=False):
+ return self._directory.updateRecords(records, create=create)
+
+
+ @inlineCallbacks
+ def removeRecords(self, uids):
+ return self._directory.removeRecords(uids)
+
+
+ def assignToField(self, fields, name, value):
+ field = self.fieldName.lookupByName(name)
+ fields[field] = value
+
+
+ @inlineCallbacks
+ def augment(self, record):
+ if record is None:
+ returnValue(None)
+
+ # MOVE2WHO
+ # FIXME: hacked by appending an "s" -- need a mapping
+ try:
+ augmentRecord = yield self._augmentDB.getAugmentRecord(
+ record.uid,
+ record.recordType.name + "s"
+ )
+ except KeyError:
+ # Augments does not know about this record type, so return
+ # the original record
+ returnValue(record)
+
+ fields = record.fields.copy()
+
+ # print("Got augment record", augmentRecord)
+
+ if augmentRecord:
+ # record.enabled = augmentRecord.enabled
+ # record.serverID = augmentRecord.serverID
+ self.assignToField(
+ fields, "hasCalendars",
+ augmentRecord.enabledForCalendaring
+ )
+ self.assignToField(
+ fields, "hasContacts",
+ augmentRecord.enabledForAddressBooks
+ )
+ autoScheduleMode = {
+ "none": AutoScheduleMode.none,
+ "accept-always": AutoScheduleMode.accept,
+ "decline-always": AutoScheduleMode.decline,
+ "accept-if-free": AutoScheduleMode.acceptIfFree,
+ "decline-if-busy": AutoScheduleMode.declineIfBusy,
+ "automatic": AutoScheduleMode.acceptIfFreeDeclineIfBusy,
+ }.get(augmentRecord.autoScheduleMode, None)
+ self.assignToField(
+ fields, "autoScheduleMode",
+ autoScheduleMode
+ )
+ self.assignToField(
+ fields, "autoAcceptGroup",
+ unicode(augmentRecord.autoAcceptGroup)
+ )
+ self.assignToField(
+ fields, "loginAllowed",
+ augmentRecord.enabledForLogin
+ )
+
+ if (
+ (
+ fields.get(self.fieldName.lookupByName("hasCalendars"), False) or
+ fields.get(self.fieldName.lookupByName("hasContacts"), False)
+ ) and record.recordType == RecordType.group
+ ):
+ self.assignToField(fields, "hasCalendars", False)
+ self.assignToField(fields, "hasContacts", False)
+
+ # For augment records cloned from the Default augment record,
+ # don't emit this message:
+ if not augmentRecord.clonedFromDefault:
+ log.error("Group '%s(%s)' cannot be enabled for calendaring or address books" % (record.guid, record.shortNames[0],))
+
+ else:
+ # Groups are by default always enabled
+ # record.enabled = (record.recordType == record.service.recordType_groups)
+ # record.serverID = ""
+ self.assignToField(fields, "hasCalendars", False)
+ self.assignToField(fields, "hasContacts", False)
+ self.assignToField(fields, "loginAllowed", False)
+
+ # print("Augmented fields", fields)
+
+ # Clone to a new record with the augmented fields
+ returnValue(AugmentedDirectoryRecord(self, record, fields))
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/who/delegates.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/who/delegates.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/who/delegates.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1,4 +1,4 @@
-# -*- test-case-name: twext.who.test.test_delegates -*-
+# -*- test-case-name: txdav.who.test.test_delegates -*-
##
# Copyright (c) 2013 Apple Inc. All rights reserved.
#
@@ -19,13 +19,201 @@
Delegate assignments
"""
-from twisted.internet.defer import inlineCallbacks, returnValue
-from twext.who.idirectory import RecordType
+from twext.python.log import Logger
+from twext.who.directory import (
+ DirectoryService as BaseDirectoryService,
+ DirectoryRecord as BaseDirectoryRecord
+)
+from twext.who.expression import MatchExpression, MatchType
+from twext.who.idirectory import (
+ RecordType as BaseRecordType, FieldName, NotAllowedError
+)
+from twisted.internet.defer import inlineCallbacks, returnValue, succeed
+from twisted.python.constants import Names, NamedConstant
-from twext.python.log import Logger
+
log = Logger()
+
+class RecordType(Names):
+ """
+ Constants for read-only delegates and read-write delegate groups
+ """
+
+ readDelegateGroup = NamedConstant()
+ readDelegateGroup.description = u"read-delegate-group"
+
+ writeDelegateGroup = NamedConstant()
+ writeDelegateGroup.description = u"write-delegate-group"
+
+ readDelegatorGroup = NamedConstant()
+ readDelegatorGroup.description = u"read-delegator-group"
+
+ writeDelegatorGroup = NamedConstant()
+ writeDelegatorGroup.description = u"write-delegator-group"
+
+
+class DirectoryRecord(BaseDirectoryRecord):
+
+
+ @inlineCallbacks
+ def members(self, expanded=False):
+ """
+ If this is a readDelegateGroup or writeDelegateGroup, the members
+ will consist of the records who are delegates *of* this record.
+ If this is a readDelegatorGroup or writeDelegatorGroup,
+ the members will consist of the records who have delegated *to*
+ this record.
+ """
+ parentUID, proxyType = self.uid.split("#")
+
+ txn = self.service._store.newTransaction()
+
+ if self.recordType in (
+ RecordType.readDelegateGroup, RecordType.writeDelegateGroup
+ ): # Members are delegates of this record
+ readWrite = (self.recordType is RecordType.writeDelegateGroup)
+ delegateUIDs = (
+ yield txn.delegates(parentUID, readWrite, expanded=expanded)
+ )
+
+ else: # Members have delegated to this record
+ readWrite = (self.recordType is RecordType.writeDelegatorGroup)
+ delegateUIDs = (
+ yield txn.delegators(parentUID, readWrite)
+ )
+
+ records = []
+ for uid in delegateUIDs:
+ if uid != parentUID:
+ record = (yield self.service._masterDirectory.recordWithUID(uid))
+ if record is not None:
+ records.append(record)
+ yield txn.commit()
+
+ returnValue(records)
+
+
+
+ @inlineCallbacks
+ def setMembers(self, memberRecords):
+ """
+ Replace the members of this group with the new members.
+
+ @param memberRecords: The new members of the group
+ @type memberRecords: iterable of L{iDirectoryRecord}s
+ """
+ if self.recordType not in (
+ RecordType.readDelegateGroup, RecordType.writeDelegateGroup
+ ):
+ raise NotAllowedError("Setting members not supported")
+
+ parentUID, proxyType = self.uid.split("#")
+ readWrite = (self.recordType is RecordType.writeDelegateGroup)
+
+ log.debug(
+ "Setting delegate assignments for {u} ({rw}) to {m}".format(
+ u=parentUID, rw=("write" if readWrite else "read"),
+ m=[r.uid for r in memberRecords]
+ )
+ )
+
+ txn = self.service._store.newTransaction()
+
+ yield txn.removeDelegates(parentUID, readWrite)
+ yield txn.removeDelegateGroups(parentUID, readWrite)
+
+ delegator = yield self.service._masterDirectory.recordWithUID(parentUID)
+
+ for delegate in memberRecords:
+ yield addDelegate(txn, delegator, delegate, readWrite)
+
+ yield txn.commit()
+
+
+
+def recordTypeToProxyType(recordType):
+ return {
+ RecordType.readDelegateGroup: "calendar-proxy-read",
+ RecordType.writeDelegateGroup: "calendar-proxy-write",
+ RecordType.readDelegatorGroup: "calendar-proxy-read-for",
+ RecordType.writeDelegatorGroup: "calendar-proxy-write-for",
+ }.get(recordType, None)
+
+
+def proxyTypeToRecordType(proxyType):
+ return {
+ "calendar-proxy-read": RecordType.readDelegateGroup,
+ "calendar-proxy-write": RecordType.writeDelegateGroup,
+ "calendar-proxy-read-for": RecordType.readDelegatorGroup,
+ "calendar-proxy-write-for": RecordType.writeDelegatorGroup,
+ }.get(proxyType, None)
+
+
+
+class DirectoryService(BaseDirectoryService):
+ """
+ Delegate directory service
+ """
+
+ recordType = RecordType
+
+
+ def __init__(self, realmName, store):
+ BaseDirectoryService.__init__(self, realmName)
+ self._store = store
+ self._masterDirectory = None
+
+
+ def setMasterDirectory(self, masterDirectory):
+ self._masterDirectory = masterDirectory
+
+
+ def recordWithShortName(self, recordType, shortName):
+ uid = shortName + "#" + recordTypeToProxyType(recordType)
+
+ record = DirectoryRecord(self, {
+ FieldName.uid: uid,
+ FieldName.recordType: recordType,
+ FieldName.shortNames: (uid,),
+ })
+ return succeed(record)
+
+
+ def recordWithUID(self, uid):
+ if "#" not in uid: # Not a delegate group uid
+ return succeed(None)
+ uid, proxyType = uid.split("#")
+ recordType = proxyTypeToRecordType(proxyType)
+ if recordType is None:
+ return succeed(None)
+ return self.recordWithShortName(recordType, uid)
+
+
+ @inlineCallbacks
+ def recordsFromExpression(self, expression, records=None):
+ """
+ It's only ever appropriate to look up delegate group record by
+ shortName or uid. When wrapped by an aggregate directory, looking up
+ by shortName will already go directly to recordWithShortName. However
+ when looking up by UID, it won't. Inspect the expression to see if
+ it's one we can handle.
+ """
+ if isinstance(expression, MatchExpression):
+ if(
+ (expression.fieldName is FieldName.uid) and
+ (expression.matchType is MatchType.equals) and
+ ("#" in expression.fieldValue)
+ ):
+ record = yield self.recordWithUID(expression.fieldValue)
+ if record is not None:
+ returnValue((record,))
+
+ returnValue(())
+
+
+
@inlineCallbacks
def addDelegate(txn, delegator, delegate, readWrite):
"""
@@ -39,12 +227,12 @@
@param readWrite: if True, read and write access is granted; read-only
access otherwise
"""
- if delegate.recordType == RecordType.group:
+ if delegate.recordType == BaseRecordType.group:
# find the groupID
- groupID, name, membershipHash = (yield txn.groupByGUID(delegate.guid))
- yield txn.addDelegateGroup(delegator.guid, groupID, readWrite)
+ groupID, name, membershipHash = (yield txn.groupByUID(delegate.uid))
+ yield txn.addDelegateGroup(delegator.uid, groupID, readWrite)
else:
- yield txn.addDelegate(delegator.guid, delegate.guid, readWrite)
+ yield txn.addDelegate(delegator.uid, delegate.uid, readWrite)
@inlineCallbacks
@@ -60,16 +248,16 @@
@param readWrite: if True, read and write access is revoked; read-only
access otherwise
"""
- if delegate.recordType == RecordType.group:
+ if delegate.recordType == BaseRecordType.group:
# find the groupID
- groupID, name, membershipHash = (yield txn.groupByGUID(delegate.guid))
- yield txn.removeDelegateGroup(delegator.guid, groupID, readWrite)
+ groupID, name, membershipHash = (yield txn.groupByUID(delegate.uid))
+ yield txn.removeDelegateGroup(delegator.uid, groupID, readWrite)
else:
- yield txn.removeDelegate(delegator.guid, delegate.guid, readWrite)
+ yield txn.removeDelegate(delegator.uid, delegate.uid, readWrite)
@inlineCallbacks
-def delegatesOf(txn, delegator, readWrite):
+def delegatesOf(txn, delegator, readWrite, expanded=False):
"""
Return the records of the delegates of "delegator". The type of access
is specified by the "readWrite" parameter.
@@ -83,10 +271,12 @@
"""
records = []
directory = delegator.service
- delegateGUIDs = (yield txn.delegates(delegator.guid, readWrite))
- for guid in delegateGUIDs:
- if guid != delegator.guid:
- record = (yield directory.recordWithGUID(guid))
+ delegateUIDs = (
+ yield txn.delegates(delegator.uid, readWrite, expanded=expanded)
+ )
+ for uid in delegateUIDs:
+ if uid != delegator.uid:
+ record = (yield directory.recordWithUID(uid))
if record is not None:
records.append(record)
returnValue(records)
@@ -107,18 +297,10 @@
"""
records = []
directory = delegate.service
- delegatorGUIDs = (yield txn.delegators(delegate.guid, readWrite))
- for guid in delegatorGUIDs:
- if guid != delegate.guid:
- record = (yield directory.recordWithGUID(guid))
+ delegatorUIDs = (yield txn.delegators(delegate.uid, readWrite))
+ for uid in delegatorUIDs:
+ if uid != delegate.uid:
+ record = (yield directory.recordWithUID(uid))
if record is not None:
records.append(record)
returnValue(records)
-
-
-def allGroupDelegates(txn):
- """
- @return: the GUIDs of all groups which are currently delegated to
- @rtype: a Deferred which fires with a set() of GUID strings
- """
- return txn.allGroupDelegates()
Copied: CalendarServer/branches/users/sagen/move2who-3/txdav/who/directory.py (from rev 12898, CalendarServer/branches/users/sagen/move2who-2/txdav/who/directory.py)
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/who/directory.py (rev 0)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/who/directory.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -0,0 +1,344 @@
+##
+# Copyright (c) 2006-2014 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+"""
+Calendar/Contacts specific methods for DirectoryRecord
+"""
+
+
+import uuid
+
+from twext.python.log import Logger
+from twext.who.expression import (
+ MatchType, Operand, MatchExpression, CompoundExpression, MatchFlags
+)
+from twext.who.idirectory import RecordType as BaseRecordType
+from twisted.cred.credentials import UsernamePassword
+from twisted.internet.defer import inlineCallbacks, returnValue
+from txdav.who.idirectory import RecordType as DAVRecordType
+from txweb2.auth.digest import DigestedCredentials
+
+log = Logger()
+
+
+__all__ = [
+ "CalendarDirectoryRecordMixin",
+ "CalendarDirectoryServiceMixin",
+]
+
+
+
+class CalendarDirectoryServiceMixin(object):
+
+ guid = "1332A615-4D3A-41FE-B636-FBE25BFB982E"
+
+ # Must maintain the hack for a bit longer:
+ def setPrincipalCollection(self, principalCollection):
+ """
+ Set the principal service that the directory relies on for doing proxy tests.
+
+ @param principalService: the principal service.
+ @type principalService: L{DirectoryProvisioningResource}
+ """
+ self.principalCollection = principalCollection
+
+
+ @inlineCallbacks
+ def recordWithCalendarUserAddress(self, address):
+ # FIXME: moved this here to avoid circular import problems
+ from txdav.caldav.datastore.scheduling.cuaddress import normalizeCUAddr
+ address = normalizeCUAddr(address)
+ record = None
+ if address.startswith("urn:uuid:"):
+ guid = address[9:]
+ record = yield self.recordWithGUID(uuid.UUID(guid))
+ elif address.startswith("mailto:"):
+ records = yield self.recordsWithEmailAddress(address[7:])
+ if records:
+ returnValue(records[0])
+ else:
+ returnValue(None)
+ elif address.startswith("/principals/"):
+ parts = address.split("/")
+ if len(parts) == 4:
+ if parts[2] == "__uids__":
+ uid = parts[3]
+ record = yield self.recordWithUID(uid)
+ else:
+ recordType = self.fieldName.lookupByName(parts[2])
+ record = yield self.recordWithShortName(recordType, parts[3])
+
+ returnValue(record if record and record.hasCalendars else None)
+
+
+ def recordsMatchingTokens(self, tokens, context=None, limitResults=50,
+ timeoutSeconds=10):
+ fields = [
+ ("fullNames", MatchType.contains),
+ ("emailAddresses", MatchType.startsWith),
+ ]
+ outer = []
+ for token in tokens:
+ inner = []
+ for name, matchType in fields:
+ inner.append(
+ MatchExpression(
+ self.fieldName.lookupByName(name),
+ token,
+ matchType,
+ MatchFlags.caseInsensitive
+ )
+ )
+ outer.append(
+ CompoundExpression(
+ inner,
+ Operand.OR
+ )
+ )
+ expression = CompoundExpression(outer, Operand.AND)
+ return self.recordsFromExpression(expression)
+
+
+ def recordsMatchingFieldsWithCUType(self, fields, operand=Operand.OR,
+ cuType=None):
+ if cuType:
+ recordType = CalendarDirectoryRecordMixin.fromCUType(cuType)
+ else:
+ recordType = None
+
+ return self.recordsMatchingFields(
+ fields, operand=operand, recordType=recordType
+ )
+
+
+ def recordsMatchingFields(self, fields, operand=Operand.OR, recordType=None):
+ """
+ @param fields: a iterable of tuples, each tuple consisting of:
+ directory field name (C{unicode})
+ search term (C{unicode})
+ match flags (L{twext.who.expression.MatchFlags})
+ match type (L{twext.who.expression.MatchType})
+ """
+ subExpressions = []
+ for fieldName, searchTerm, matchFlags, matchType in fields:
+ subExpression = MatchExpression(
+ self.fieldName.lookupByName(fieldName),
+ searchTerm,
+ matchType,
+ matchFlags
+ )
+ subExpressions.append(subExpression)
+
+ expression = CompoundExpression(subExpressions, operand)
+ return self.recordsFromExpression(expression)
+
+
+ # FIXME: Existing code assumes record type names are plural. Is there any
+ # reason to maintain backwards compatibility? I suppose there could be
+ # scripts referring to record type of "users", "locations"
+ def recordTypeToOldName(self, recordType):
+ return recordType.name + u"s"
+
+
+ def oldNameToRecordType(self, oldName):
+ return self.recordType.lookupByName(oldName[:-1])
+
+
+
+class CalendarDirectoryRecordMixin(object):
+
+
+ @inlineCallbacks
+ def verifyCredentials(self, credentials):
+
+ # XYZZY REMOVE THIS, it bypasses all authentication!:
+ returnValue(True)
+
+ if isinstance(credentials, UsernamePassword):
+ log.debug("UsernamePassword")
+ returnValue(
+ (yield self.verifyPlaintextPassword(credentials.password))
+ )
+
+ elif isinstance(credentials, DigestedCredentials):
+ log.debug("DigestedCredentials")
+ returnValue(
+ (yield self.verifyHTTPDigest(
+ self.shortNames[0],
+ self.service.realmName,
+ credentials.fields["uri"],
+ credentials.fields["nonce"],
+ credentials.fields.get("cnonce", ""),
+ credentials.fields["algorithm"],
+ credentials.fields.get("nc", ""),
+ credentials.fields.get("qop", ""),
+ credentials.fields["response"],
+ credentials.method
+ ))
+ )
+
+
+ @property
+ def calendarUserAddresses(self):
+ if not self.hasCalendars:
+ return frozenset()
+
+ try:
+ cuas = set(
+ ["mailto:%s" % (emailAddress,)
+ for emailAddress in self.emailAddresses]
+ )
+ except AttributeError:
+ cuas = set()
+
+ try:
+ if self.guid:
+ if isinstance(self.guid, uuid.UUID):
+ guid = unicode(self.guid).upper()
+ else:
+ guid = self.guid
+ cuas.add("urn:uuid:{guid}".format(guid=guid))
+ except AttributeError:
+ # No guid
+ pass
+ cuas.add("/principals/__uids__/{uid}/".format(uid=self.uid))
+ for shortName in self.shortNames:
+ cuas.add("/principals/{rt}/{sn}/".format(
+ rt=self.recordType.name + "s", sn=shortName)
+ )
+ return frozenset(cuas)
+
+
+ # Mapping from directory record.recordType to RFC2445 CUTYPE values
+ _cuTypes = {
+ BaseRecordType.user: 'INDIVIDUAL',
+ BaseRecordType.group: 'GROUP',
+ DAVRecordType.resource: 'RESOURCE',
+ DAVRecordType.location: 'ROOM',
+ }
+
+
+ def getCUType(self):
+ return self._cuTypes.get(self.recordType, "UNKNOWN")
+
+
+ @classmethod
+ def fromCUType(cls, cuType):
+ for key, val in cls._cuTypes.iteritems():
+ if val == cuType:
+ return key
+ return None
+
+
+ def applySACLs(self):
+ """
+ Disable calendaring and addressbooks as dictated by SACLs
+ """
+
+ # FIXME: need to re-implement SACLs
+ # if config.EnableSACLs and self.CheckSACL:
+ # username = self.shortNames[0]
+ # if self.CheckSACL(username, "calendar") != 0:
+ # self.log.debug("%s is not enabled for calendaring due to SACL"
+ # % (username,))
+ # self.enabledForCalendaring = False
+ # if self.CheckSACL(username, "addressbook") != 0:
+ # self.log.debug("%s is not enabled for addressbooks due to SACL"
+ # % (username,))
+ # self.enabledForAddressBooks = False
+
+
+ @property
+ def displayName(self):
+ return self.fullNames[0]
+
+
+ def cacheToken(self):
+ """
+ Generate a token that can be uniquely used to identify the state of this record for use
+ in a cache.
+ """
+ return hash((
+ self.__class__.__name__,
+ self.service.realmName,
+ self.recordType.name,
+ self.shortNames,
+ self.uid,
+ self.hasCalendars,
+ ))
+
+
+ def canonicalCalendarUserAddress(self):
+ """
+ Return a CUA for this record, preferring in this order:
+ urn:uuid: form
+ mailto: form
+ first in calendarUserAddresses list
+ """
+
+ cua = ""
+ for candidate in self.calendarUserAddresses:
+ # Pick the first one, but urn:uuid: and mailto: can override
+ if not cua:
+ cua = candidate
+ # But always immediately choose the urn:uuid: form
+ if candidate.startswith("urn:uuid:"):
+ cua = candidate
+ break
+ # Prefer mailto: if no urn:uuid:
+ elif candidate.startswith("mailto:"):
+ cua = candidate
+ return cua
+
+
+ def enabledAsOrganizer(self):
+ # MOVE2WHO FIXME TO LOOK AT CONFIG
+ if self.recordType == self.service.recordType.user:
+ return True
+ elif self.recordType == self.service.recordType.group:
+ return False # config.Scheduling.Options.AllowGroupAsOrganizer
+ elif self.recordType == self.service.recordType.location:
+ return False # config.Scheduling.Options.AllowLocationAsOrganizer
+ elif self.recordType == self.service.recordType.resource:
+ return False # config.Scheduling.Options.AllowResourceAsOrganizer
+ else:
+ return False
+
+
+ #MOVE2WHO
+ def thisServer(self):
+ return True
+
+
+ def isLoginEnabled(self):
+ return self.loginAllowed
+
+
+ #MOVE2WHO
+ def calendarsEnabled(self):
+ # In the old world, this *also* looked at config:
+ # return config.EnableCalDAV and self.enabledForCalendaring
+ return self.hasCalendars
+
+
+ def getAutoScheduleMode(self, organizer):
+ # MOVE2WHO Fix this to take organizer into account:
+ return self.autoScheduleMode
+
+
+ def canAutoSchedule(self, organizer=None):
+ # MOVE2WHO Fix this:
+ return True
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/who/groups.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/who/groups.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/who/groups.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1,4 +1,4 @@
-# -*- test-case-name: twext.who.test.test_groups -*-
+# -*- test-case-name: txdav.who.test.test_groups -*-
##
# Copyright (c) 2013 Apple Inc. All rights reserved.
#
@@ -22,7 +22,6 @@
from twext.enterprise.dal.record import fromTable
from twext.enterprise.dal.syntax import Delete, Select
from twext.enterprise.jobqueue import WorkItem, PeerConnectionPool
-from txdav.who.delegates import allGroupDelegates
from twext.who.idirectory import RecordType
from twisted.internet.defer import inlineCallbacks, returnValue
from txdav.common.datastore.sql_tables import schema
@@ -46,17 +45,14 @@
# Delete all other work items
yield Delete(From=self.table, Where=None).on(self.transaction)
- oldGroupCacher = getattr(self.transaction, "_groupCacher", None)
- newGroupCacher = getattr(self.transaction, "_newGroupCacher", None)
- if oldGroupCacher is not None or newGroupCacher is not None:
+ groupCacher = getattr(self.transaction, "_groupCacher", None)
+ if groupCacher is not None:
# Schedule next update
- # TODO: Be sure to move updateSeconds to the new cacher
- # implementation
notBefore = (
datetime.datetime.utcnow() +
- datetime.timedelta(seconds=oldGroupCacher.updateSeconds)
+ datetime.timedelta(seconds=groupCacher.updateSeconds)
)
log.debug(
"Scheduling next group cacher update: {when}", when=notBefore
@@ -68,22 +64,13 @@
# New implmementation
try:
- newGroupCacher.update(self.transaction)
+ yield groupCacher.update(self.transaction)
except Exception, e:
log.error(
"Failed to update new group membership cache ({error})",
error=e
)
- # Old implmementation
- try:
- oldGroupCacher.updateCache()
- except Exception, e:
- log.error(
- "Failed to update old group membership cache ({error})",
- error=e
- )
-
else:
notBefore = (
datetime.datetime.utcnow() +
@@ -126,25 +113,28 @@
class GroupRefreshWork(WorkItem, fromTable(schema.GROUP_REFRESH_WORK)):
- group = property(lambda self: self.groupGUID)
+ # Note, the schema has "groupGuid", but really it's a UID. At some point
+ # we should change the column name.
+ group = property(lambda self: self.groupGuid)
@inlineCallbacks
def doWork(self):
-
# Delete all other work items for this group
yield Delete(
- From=self.table, Where=(self.table.GROUP_GUID == self.groupGUID)
+ From=self.table, Where=(self.table.GROUP_GUID == self.groupGuid)
).on(self.transaction)
groupCacher = getattr(self.transaction, "_groupCacher", None)
if groupCacher is not None:
try:
- groupCacher.refreshGroup(self.transaction, self.groupGUID)
+ yield groupCacher.refreshGroup(
+ self.transaction, self.groupGuid.decode("utf-8")
+ )
except Exception, e:
log.error(
"Failed to refresh group {group} {err}",
- group=self.groupGUID, err=e
+ group=self.groupGuid, err=e
)
else:
@@ -154,11 +144,11 @@
)
log.debug(
"Rescheduling group refresh for {group}: {when}",
- group=self.groupGUID, when=notBefore
+ group=self.groupGuid, when=notBefore
)
yield self.transaction.enqueue(
GroupRefreshWork,
- groupGUID=self.groupGUID, notBefore=notBefore
+ groupGuid=self.groupGuid, notBefore=notBefore
)
@@ -182,6 +172,7 @@
)
).on(self.transaction)
+ # MOVE2WHO
# TODO: Pull this over from groupcacher branch
@@ -201,6 +192,7 @@
records.add(record)
for member in (yield record.members()):
if member not in records:
+ #MOVE2WHO
#TODO: HACK for old-style XML. FIX
if (
member.recordType != RecordType.group and
@@ -219,14 +211,14 @@
of two lists -- one for added/updated assignments, and one for removed
assignments.
- @param old: dictionary of delegator: (readGroupGUID, writeGroupGUID)
+ @param old: dictionary of delegator: (readGroupUID, writeGroupUID)
@type old: C{dict}
- @param new: dictionary of delegator: (readGroupGUID, writeGroupGUID)
+ @param new: dictionary of delegator: (readGroupUID, writeGroupUID)
@type new: C{dict}
@return: Tuple of two lists; the first list contains tuples of (delegator,
- (readGroupGUID, writeGroupGUID)), and represents all the new or updated
+ (readGroupUID, writeGroupUID)), and represents all the new or updated
assignments. The second list contains all the delegators which used to
have a delegate but don't anymore.
"""
@@ -251,13 +243,16 @@
def __init__(
self, directory,
- useExternalProxies=False, externalProxiesSource=None
+ updateSeconds=600,
+ useExternalProxies=False,
+ externalProxiesSource=None
):
self.directory = directory
self.useExternalProxies = useExternalProxies
if useExternalProxies and externalProxiesSource is None:
externalProxiesSource = self.directory.getExternalProxyAssignments
self.externalProxiesSource = externalProxiesSource
+ self.updateSeconds = updateSeconds
@inlineCallbacks
@@ -269,19 +264,21 @@
# yield self.applyExternalAssignments(txn, externalAssignments)
# Figure out which groups matter
- groupGUIDs = yield self.groupsToRefresh(txn)
+ groupUIDs = yield self.groupsToRefresh(txn)
self.log.debug(
- "Number of groups to refresh: {num}", num=len(groupGUIDs)
+ "Number of groups to refresh: {num}", num=len(groupUIDs)
)
# For each of those groups, create a per-group refresh work item
- for groupGUID in groupGUIDs:
+ for groupUID in groupUIDs:
notBefore = (
datetime.datetime.utcnow() +
datetime.timedelta(seconds=1)
)
+ self.log.debug("Enqueuing group refresh for {u}", u=groupUID)
yield txn.enqueue(
- GroupRefreshWork, groupGUID=groupGUID, notBefore=notBefore
+ GroupRefreshWork, groupGuid=groupUID, notBefore=notBefore
)
+ self.log.debug("Enqueued group refresh for {u}", u=groupUID)
@inlineCallbacks
@@ -290,84 +287,91 @@
oldAssignments = (yield txn.externalDelegates())
# external assignments is of the form:
- # { delegatorGUID: (readDelegateGroupGUID, writeDelegateGroupGUID),
+ # { delegatorUID: (readDelegateGroupUID, writeDelegateGroupUID),
# }
changed, removed = diffAssignments(oldAssignments, newAssignments)
if changed:
for (
- delegatorGUID, (readDelegateGUID, writeDelegateGUID)
+ delegatorUID, (readDelegateUID, writeDelegateUID)
) in changed:
readDelegateGroupID = writeDelegateGroupID = None
- if readDelegateGUID:
+ if readDelegateUID:
readDelegateGroupID, _ignore_name, hash = (
- yield txn.groupByGUID(readDelegateGUID)
+ yield txn.groupByUID(readDelegateUID)
)
- if writeDelegateGUID:
+ if writeDelegateUID:
writeDelegateGroupID, _ignore_name, hash = (
- yield txn.groupByGUID(writeDelegateGUID)
+ yield txn.groupByUID(writeDelegateUID)
)
yield txn.assignExternalDelegates(
- delegatorGUID, readDelegateGroupID, writeDelegateGroupID,
- readDelegateGUID, writeDelegateGUID
+ delegatorUID, readDelegateGroupID, writeDelegateGroupID,
+ readDelegateUID, writeDelegateUID
)
if removed:
- for delegatorGUID in removed:
+ for delegatorUID in removed:
yield txn.assignExternalDelegates(
- delegatorGUID, None, None, None, None
+ delegatorUID, None, None, None, None
)
@inlineCallbacks
- def refreshGroup(self, txn, groupGUID):
+ def refreshGroup(self, txn, groupUID):
# Does the work of a per-group refresh work item
- # Faults in the flattened membership of a group, as GUIDs
+ # Faults in the flattened membership of a group, as UIDs
# and updates the GROUP_MEMBERSHIP table
- record = (yield self.directory.recordWithGUID(groupGUID))
- membershipHashContent = hashlib.md5()
- members = (yield expandedMembers(record))
- members = list(members)
- members.sort(cmp=lambda x, y: cmp(x.guid, y.guid))
- for member in members:
- membershipHashContent.update(str(member.guid))
- membershipHash = membershipHashContent.hexdigest()
- groupID, _ignore_cachedName, cachedMembershipHash = (
- yield txn.groupByGUID(groupGUID)
- )
-
- if cachedMembershipHash != membershipHash:
- membershipChanged = True
- self.log.debug(
- "Group '{group}' changed", group=record.fullNames[0]
+ self.log.debug("Faulting in group: {g}", g=groupUID)
+ record = (yield self.directory.recordWithUID(groupUID))
+ if record is None:
+ # FIXME: the group has disappeared from the directory.
+ # How do we want to handle this?
+ self.log.info("Group has disappeared: {g}", g=groupUID)
+ else:
+ self.log.debug("Got group record: {u}", u=record.uid)
+ membershipHashContent = hashlib.md5()
+ members = (yield expandedMembers(record))
+ members = list(members)
+ members.sort(cmp=lambda x, y: cmp(x.uid, y.uid))
+ for member in members:
+ membershipHashContent.update(str(member.uid))
+ membershipHash = membershipHashContent.hexdigest()
+ groupID, _ignore_cachedName, cachedMembershipHash = (
+ yield txn.groupByUID(groupUID)
)
- else:
- membershipChanged = False
- yield txn.updateGroup(groupGUID, record.fullNames[0], membershipHash)
+ if cachedMembershipHash != membershipHash:
+ membershipChanged = True
+ self.log.debug(
+ "Group '{group}' changed", group=record.fullNames[0]
+ )
+ else:
+ membershipChanged = False
- if membershipChanged:
- newMemberGUIDs = set()
- for member in members:
- newMemberGUIDs.add(member.guid)
- yield self.synchronizeMembers(txn, groupID, newMemberGUIDs)
+ yield txn.updateGroup(groupUID, record.fullNames[0], membershipHash)
- yield self.scheduleEventReconciliations(txn, groupID, groupGUID)
+ if membershipChanged:
+ newMemberUIDs = set()
+ for member in members:
+ newMemberUIDs.add(member.uid)
+ yield self.synchronizeMembers(txn, groupID, newMemberUIDs)
+ yield self.scheduleEventReconciliations(txn, groupID, groupUID)
+
@inlineCallbacks
- def synchronizeMembers(self, txn, groupID, newMemberGUIDs):
+ def synchronizeMembers(self, txn, groupID, newMemberUIDs):
numRemoved = numAdded = 0
- cachedMemberGUIDs = (yield txn.membersOfGroup(groupID))
+ cachedMemberUIDs = (yield txn.membersOfGroup(groupID))
- for memberGUID in cachedMemberGUIDs:
- if memberGUID not in newMemberGUIDs:
+ for memberUID in cachedMemberUIDs:
+ if memberUID not in newMemberUIDs:
numRemoved += 1
- yield txn.removeMemberFromGroup(memberGUID, groupID)
+ yield txn.removeMemberFromGroup(memberUID, groupID)
- for memberGUID in newMemberGUIDs:
- if memberGUID not in cachedMemberGUIDs:
+ for memberUID in newMemberUIDs:
+ if memberUID not in cachedMemberUIDs:
numAdded += 1
- yield txn.addMemberToGroup(memberGUID, groupID)
+ yield txn.addMemberToGroup(memberUID, groupID)
returnValue((numAdded, numRemoved))
@@ -378,23 +382,23 @@
The members of the given group as recorded in the db
"""
members = set()
- memberGUIDs = (yield txn.membersOfGroup(groupID))
- for guid in memberGUIDs:
- record = (yield self.directory.recordWithGUID(guid))
+ memberUIDs = (yield txn.membersOfGroup(groupID))
+ for uid in memberUIDs:
+ record = (yield self.directory.recordWithUID(uid))
if record is not None:
members.add(record)
returnValue(members)
- def cachedGroupsFor(self, txn, guid):
+ def cachedGroupsFor(self, txn, uid):
"""
- The IDs of the groups the guid is a member of
+ The UIDs of the groups the uid is a member of
"""
- return txn.groupsFor(guid)
+ return txn.groupsFor(uid)
@inlineCallbacks
- def scheduleEventReconciliations(self, txn, groupID, groupGUID):
+ def scheduleEventReconciliations(self, txn, groupID, groupUID):
"""
Find all events who have this groupID as an attendee and create
work items for them.
@@ -415,29 +419,29 @@
)
log.debug(
"scheduling group reconciliation for "
- "({eventID}, {groupID}, {groupGUID}): {when}",
+ "({eventID}, {groupID}, {groupUID}): {when}",
eventID=eventID,
groupID=groupID,
- groupGUID=groupGUID,
+ groupUID=groupUID,
when=notBefore)
yield txn.enqueue(
GroupAttendeeReconciliationWork,
eventID=eventID,
groupID=groupID,
- groupGUID=groupGUID,
+ groupGuid=groupUID,
notBefore=notBefore
)
@inlineCallbacks
def groupsToRefresh(self, txn):
- delegatedGUIDs = set((yield allGroupDelegates(txn)))
+ delegatedUIDs = set((yield txn.allGroupDelegates()))
self.log.info(
- "There are {count} group delegates", count=len(delegatedGUIDs)
+ "There are {count} group delegates", count=len(delegatedUIDs)
)
- attendeeGroupGUIDs = set()
+ attendeeGroupUIDs = set()
# get all groups from events
groupAttendee = schema.GROUP_ATTENDEE
@@ -447,7 +451,7 @@
).on(txn)
groupIDs = set([row[0] for row in rows])
- # get groupGUIDs
+ # get groupUIDs
if groupIDs:
gr = schema.GROUPS
rows = yield Select(
@@ -455,6 +459,8 @@
From=gr,
Where=gr.GROUP_ID.In(groupIDs)
).on(txn)
- attendeeGroupGUIDs = set([row[0] for row in rows])
+ attendeeGroupUIDs = set([row[0] for row in rows])
- returnValue(delegatedGUIDs.union(attendeeGroupGUIDs))
+ # FIXME: is this a good place to clear out unreferenced groups?
+
+ returnValue(delegatedUIDs.union(attendeeGroupUIDs))
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/accounts/accounts.xml
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/accounts/accounts.xml 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/accounts/accounts.xml 2014-03-13 20:40:10 UTC (rev 12899)
@@ -18,174 +18,339 @@
<!DOCTYPE accounts SYSTEM "accounts.dtd">
-<accounts realm="Test Realm">
- <user>
+<directory realm="Test Realm">
+ <record type="user">
<uid>admin</uid>
- <guid>admin</guid>
+ <short-name>admin</short-name>
<password>admin</password>
- <name>Super User</name>
- <first-name>Super</first-name>
- <last-name>User</last-name>
- </user>
- <user>
+ <full-name>Super User</full-name>
+ </record>
+ <record type="user">
<uid>apprentice</uid>
- <guid>apprentice</guid>
+ <short-name>apprentice</short-name>
<password>apprentice</password>
- <name>Apprentice Super User</name>
- <first-name>Apprentice</first-name>
- <last-name>Super User</last-name>
- </user>
- <user>
+ <full-name>Apprentice Super User</full-name>
+ </record>
+ <record type="user">
<uid>wsanchez</uid>
- <guid>wsanchez</guid>
- <email-address>wsanchez at example.com</email-address>
+ <short-name>wsanchez</short-name>
+ <email>wsanchez at example.com</email>
<password>test</password>
- <name>Wilfredo Sanchez Vega</name>
- <first-name>Wilfredo</first-name>
- <last-name>Sanchez Vega</last-name>
- </user>
- <user>
+ <full-name>Wilfredo Sanchez Vega</full-name>
+ </record>
+ <record type="user">
<uid>cdaboo</uid>
- <guid>cdaboo</guid>
- <email-address>cdaboo at example.com</email-address>
+ <short-name>cdaboo</short-name>
+ <email>cdaboo at example.com</email>
<password>test</password>
- <name>Cyrus Daboo</name>
- <first-name>Cyrus</first-name>
- <last-name>Daboo</last-name>
- </user>
- <user>
+ <full-name>cyrus Daboo</full-name>
+ </record>
+ <record type="user">
<uid>sagen</uid>
- <guid>sagen</guid>
- <email-address>sagen at example.com</email-address>
+ <short-name>sagen</short-name>
+ <email>sagen at example.com</email>
<password>test</password>
- <name>Morgen Sagen</name>
- <first-name>Morgen</first-name>
- <last-name>Sagen</last-name>
- </user>
- <user>
+ <full-name>Morgen Sagen</full-name>
+ </record>
+ <record type="user">
<uid>dre</uid>
- <guid>andre</guid>
- <email-address>dre at example.com</email-address>
+ <short-name>andre</short-name>
+ <email>dre at example.com</email>
<password>test</password>
- <name>Andre LaBranche</name>
- <first-name>Andre</first-name>
- <last-name>LaBranche</last-name>
- </user>
- <user>
+ <full-name>Andre LaBranche</full-name>
+ </record>
+ <record type="user">
<uid>glyph</uid>
- <guid>glyph</guid>
- <email-address>glyph at example.com</email-address>
+ <short-name>glyph</short-name>
+ <email>glyph at example.com</email>
<password>test</password>
- <name>Glyph Lefkowitz</name>
- <first-name>Glyph</first-name>
- <last-name>Lefkowitz</last-name>
- </user>
- <user>
+ <full-name>Glyph Lefkowitz</full-name>
+ </record>
+ <record type="user">
<uid>i18nuser</uid>
- <guid>i18nuser</guid>
- <email-address>i18nuser at example.com</email-address>
+ <short-name>i18nuser</short-name>
+ <email>i18nuser at example.com</email>
<password>i18nuser</password>
- <name>まだ</name>
- <first-name>ま</first-name>
- <last-name>だ</last-name>
- </user>
+ <full-name>まだ</full-name>
+ </record>
+
+ <!-- twext.who xml doesn't (yet) support repeat
<user repeat="101">
<uid>user%02d</uid>
<uid>User %02d</uid>
- <guid>user%02d</guid>
+ <short-name>user%02d</short-name>
<password>user%02d</password>
- <name>User %02d</name>
- <first-name>User</first-name>
- <last-name>%02d</last-name>
- <email-address>user%02d at example.com</email-address>
- </user>
+ <full-name>User %02d</full-name>
+ <email>user%02d at example.com</email>
+ </record>
<user repeat="10">
<uid>public%02d</uid>
- <guid>public%02d</guid>
+ <short-name>public%02d</short-name>
<password>public%02d</password>
- <name>Public %02d</name>
- <first-name>Public</first-name>
- <last-name>%02d</last-name>
- </user>
- <group>
+ <full-name>Public %02d</full-name>
+ </record>
+ -->
+ <record type="user">
+ <short-name>user01</short-name>
+ <uid>user01</uid>
+ <password>user01</password>
+ <full-name>User 01</full-name>
+ <email>user01 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user02</short-name>
+ <uid>user02</uid>
+ <password>user02</password>
+ <full-name>User 02</full-name>
+ <email>user02 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user03</short-name>
+ <uid>user03</uid>
+ <password>user03</password>
+ <full-name>User 03</full-name>
+ <email>user03 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user04</short-name>
+ <uid>user04</uid>
+ <password>user04</password>
+ <full-name>User 04</full-name>
+ <email>user04 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user05</short-name>
+ <uid>user05</uid>
+ <password>user05</password>
+ <full-name>User 05</full-name>
+ <email>user05 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user06</short-name>
+ <uid>user06</uid>
+ <password>user06</password>
+ <full-name>User 06</full-name>
+ <email>user06 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user07</short-name>
+ <uid>user07</uid>
+ <password>user07</password>
+ <full-name>User 07</full-name>
+ <email>user07 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user08</short-name>
+ <uid>user08</uid>
+ <password>user08</password>
+ <full-name>User 08</full-name>
+ <email>user08 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user09</short-name>
+ <uid>user09</uid>
+ <password>user09</password>
+ <full-name>User 09</full-name>
+ <email>user09 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user10</short-name>
+ <uid>user10</uid>
+ <password>user10</password>
+ <full-name>User 10</full-name>
+ <email>user10 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user11</short-name>
+ <uid>user11</uid>
+ <password>user11</password>
+ <full-name>User 11</full-name>
+ <email>user11 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user12</short-name>
+ <uid>user12</uid>
+ <password>user12</password>
+ <full-name>User 12</full-name>
+ <email>user12 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user13</short-name>
+ <uid>user13</uid>
+ <password>user13</password>
+ <full-name>User 13</full-name>
+ <email>user13 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user14</short-name>
+ <uid>user14</uid>
+ <password>user14</password>
+ <full-name>User 14</full-name>
+ <email>user14 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user15</short-name>
+ <uid>user15</uid>
+ <password>user15</password>
+ <full-name>User 15</full-name>
+ <email>user15 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user16</short-name>
+ <uid>user16</uid>
+ <password>user16</password>
+ <full-name>User 16</full-name>
+ <email>user16 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user17</short-name>
+ <uid>user17</uid>
+ <password>user17</password>
+ <full-name>User 17</full-name>
+ <email>user17 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user18</short-name>
+ <uid>user18</uid>
+ <password>user18</password>
+ <full-name>User 18</full-name>
+ <email>user18 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user19</short-name>
+ <uid>user19</uid>
+ <password>user19</password>
+ <full-name>User 19</full-name>
+ <email>user19 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user20</short-name>
+ <uid>user20</uid>
+ <password>user20</password>
+ <full-name>User 20</full-name>
+ <email>user20 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user21</short-name>
+ <uid>user21</uid>
+ <password>user21</password>
+ <full-name>User 21</full-name>
+ <email>user21 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user22</short-name>
+ <uid>user22</uid>
+ <password>user22</password>
+ <full-name>User 22</full-name>
+ <email>user22 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user23</short-name>
+ <uid>user23</uid>
+ <password>user23</password>
+ <full-name>User 23</full-name>
+ <email>user23 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user24</short-name>
+ <uid>user24</uid>
+ <password>user24</password>
+ <full-name>User 24</full-name>
+ <email>user24 at example.com</email>
+ </record>
+
+ <record type="user">
+ <short-name>user25</short-name>
+ <uid>user25</uid>
+ <password>user25</password>
+ <full-name>User 25</full-name>
+ <email>user25 at example.com</email>
+ </record>
+
+ <record type="group">
<uid>group01</uid>
- <guid>group01</guid>
+ <short-name>group01</short-name>
<password>group01</password>
- <name>Group 01</name>
- <email-address>group01 at example.com</email-address>
- <members>
- <member type="users">user01</member>
- </members>
- </group>
- <group>
+ <full-name>Group 01</full-name>
+ <member-uid type="users">user01</member-uid>
+ </record>
+ <record type="group">
<uid>group02</uid>
- <guid>group02</guid>
+ <short-name>group02</short-name>
<password>group02</password>
- <name>Group 02</name>
- <email-address>group02 at example.com</email-address>
- <members>
- <member type="users">user06</member>
- <member type="users">user07</member>
- </members>
- </group>
- <group>
+ <full-name>Group 02</full-name>
+ <member-uid type="users">user06</member-uid>
+ <member-uid type="users">user07</member-uid>
+ </record>
+ <record type="group">
<uid>group03</uid>
- <guid>group03</guid>
+ <short-name>group03</short-name>
<password>group03</password>
- <name>Group 03</name>
- <members>
- <member type="users">user08</member>
- <member type="users">user09</member>
- </members>
- </group>
- <group>
+ <full-name>Group 03</full-name>
+ <member-uid type="users">user08</member-uid>
+ <member-uid type="users">user09</member-uid>
+ </record>
+ <record type="group">
<uid>group04</uid>
- <guid>group04</guid>
+ <short-name>group04</short-name>
<password>group04</password>
- <name>Group 04</name>
- <members>
- <member type="groups">group02</member>
- <member type="groups">group03</member>
- <member type="users">user10</member>
- </members>
- </group>
- <group> <!-- delegategroup -->
+ <full-name>Group 04</full-name>
+ <member-uid type="groups">group02</member-uid>
+ <member-uid type="groups">group03</member-uid>
+ <member-uid type="users">user10</member-uid>
+ </record>
+ <record type="group"> <!-- delegategroup -->
<uid>group05</uid>
- <guid>group05</guid>
+ <short-name>group05</short-name>
<password>group05</password>
- <name>Group 05</name>
- <members>
- <member type="groups">group06</member>
- <member type="users">user20</member>
- </members>
- </group>
- <group> <!-- delegatesubgroup -->
+ <full-name>Group 05</full-name>
+ <member-uid type="groups">group06</member-uid>
+ <member-uid type="users">user20</member-uid>
+ </record>
+ <record type="group"> <!-- delegatesubgroup -->
<uid>group06</uid>
- <guid>group06</guid>
+ <short-name>group06</short-name>
<password>group06</password>
- <name>Group 06</name>
- <members>
- <member type="users">user21</member>
- </members>
- </group>
- <group> <!-- readonlydelegategroup -->
+ <full-name>Group 06</full-name>
+ <member-uid type="users">user21</member-uid>
+ </record>
+ <record type="group"> <!-- readonlydelegategroup -->
<uid>group07</uid>
- <guid>group07</guid>
+ <short-name>group07</short-name>
<password>group07</password>
- <name>Group 07</name>
- <members>
- <member type="users">user22</member>
- <member type="users">user23</member>
- <member type="users">user24</member>
- </members>
- </group>
- <group>
+ <full-name>Group 07</full-name>
+ <member-uid type="users">user22</member-uid>
+ <member-uid type="users">user23</member-uid>
+ <member-uid type="users">user24</member-uid>
+ </record>
+ <record type="group">
<uid>disabledgroup</uid>
- <guid>disabledgroup</guid>
+ <short-name>disabledgroup</short-name>
<password>disabledgroup</password>
- <name>Disabled Group</name>
- <members>
- <member type="users">user01</member>
- </members>
- </group>
-</accounts>
+ <full-name>Disabled Group</full-name>
+ <member-uid type="users">user01</member-uid>
+ </record>
+</directory>
Copied: CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/accounts/augments.xml (from rev 12898, CalendarServer/branches/users/sagen/move2who-2/txdav/who/test/accounts/augments.xml)
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/accounts/augments.xml (rev 0)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/accounts/augments.xml 2014-03-13 20:40:10 UTC (rev 12899)
@@ -0,0 +1,185 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!DOCTYPE augments SYSTEM "augments.dtd">
+
+<augments>
+ <record>
+ <uid>Default</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ </record>
+ <record repeat="10">
+ <uid>location%02d</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ <auto-schedule>true</auto-schedule>
+ </record>
+ <record repeat="4">
+ <uid>resource%02d</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ <auto-schedule>true</auto-schedule>
+ </record>
+ <record>
+ <uid>resource05</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ <auto-schedule>true</auto-schedule>
+ <auto-schedule-mode>none</auto-schedule-mode>
+ </record>
+ <record>
+ <uid>resource06</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ <auto-schedule>true</auto-schedule>
+ <auto-schedule-mode>accept-always</auto-schedule-mode>
+ </record>
+ <record>
+ <uid>resource07</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ <auto-schedule>true</auto-schedule>
+ <auto-schedule-mode>decline-always</auto-schedule-mode>
+ </record>
+ <record>
+ <uid>resource08</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ <auto-schedule>true</auto-schedule>
+ <auto-schedule-mode>accept-if-free</auto-schedule-mode>
+ </record>
+ <record>
+ <uid>resource09</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ <auto-schedule>true</auto-schedule>
+ <auto-schedule-mode>decline-if-busy</auto-schedule-mode>
+ </record>
+ <record>
+ <uid>resource10</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ <auto-schedule>true</auto-schedule>
+ <auto-schedule-mode>automatic</auto-schedule-mode>
+ </record>
+ <record>
+ <uid>resource11</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ <auto-schedule>true</auto-schedule>
+ <auto-schedule-mode>decline-always</auto-schedule-mode>
+ <auto-accept-group>group01</auto-accept-group>
+ </record>
+ <record repeat="10">
+ <uid>group%02d</uid>
+ <enable>true</enable>
+ </record>
+ <record>
+ <uid>disabledgroup</uid>
+ <enable>false</enable>
+ </record>
+ <record>
+ <uid>delegatedroom</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>false</enable-addressbook>
+ <auto-schedule>false</auto-schedule>
+ </record>
+ <record>
+ <uid>03DFF660-8BCC-4198-8588-DD77F776F518</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ <enable-login>true</enable-login>
+ <auto-schedule>true</auto-schedule>
+ </record>
+ <record>
+ <uid>80689D41-DAF8-4189-909C-DB017B271892</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ <enable-login>true</enable-login>
+ <auto-schedule>true</auto-schedule>
+ <auto-schedule-mode>default</auto-schedule-mode>
+ </record>
+ <record>
+ <uid>C38BEE7A-36EE-478C-9DCB-CBF4612AFE65</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ <enable-login>true</enable-login>
+ <auto-schedule>true</auto-schedule>
+ <auto-schedule-mode>default</auto-schedule-mode>
+ <auto-accept-group>group01</auto-accept-group>
+ </record>
+ <record>
+ <uid>CCE95217-A57B-481A-AC3D-FEC9AB6CE3A9</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ <enable-login>true</enable-login>
+ <auto-schedule>true</auto-schedule>
+ </record>
+ <record>
+ <uid>0CE0BF31-5F9E-4801-A489-8C70CF287F5F</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ <enable-login>true</enable-login>
+ <auto-schedule>true</auto-schedule>
+ </record>
+ <record>
+ <uid>6F9EE33B-78F6-481B-9289-3D0812FF0D64</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ <enable-login>true</enable-login>
+ <auto-schedule>false</auto-schedule>
+ <auto-schedule-mode>default</auto-schedule-mode>
+ </record>
+ <record>
+ <uid>76E7ECA6-08BC-4AE7-930D-F2E7453993A5</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ <enable-login>true</enable-login>
+ <auto-schedule>false</auto-schedule>
+ <auto-schedule-mode>default</auto-schedule-mode>
+ </record>
+ <record>
+ <uid>63A2F949-2D8D-4C8D-B8A5-DCF2A94610F3</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ <enable-login>true</enable-login>
+ <auto-schedule>false</auto-schedule>
+ <auto-schedule-mode>default</auto-schedule-mode>
+ </record>
+ <record>
+ <uid>06E3BDCB-9C19-485A-B14E-F146A80ADDC6</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ <enable-login>true</enable-login>
+ <auto-schedule>true</auto-schedule>
+ <auto-schedule-mode>default</auto-schedule-mode>
+ </record>
+ <record>
+ <uid>4D66A20A-1437-437D-8069-2F14E8322234</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ <enable-addressbook>true</enable-addressbook>
+ <enable-login>true</enable-login>
+ <auto-schedule>true</auto-schedule>
+ <auto-schedule-mode>default</auto-schedule-mode>
+ </record>
+</augments>
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/accounts/resources.xml
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/accounts/resources.xml 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/accounts/resources.xml 2014-03-13 20:40:10 UTC (rev 12899)
@@ -1,34 +1,273 @@
-<?xml version="1.0" encoding="utf-8"?>
-
-<!--
-Copyright (c) 2006-2014 Apple Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
- -->
-
-<!DOCTYPE accounts SYSTEM "accounts.dtd">
-
-<accounts realm="Test Realm">
- <location repeat="10">
- <uid>location%02d</uid>
- <guid>location%02d</guid>
- <password>location%02d</password>
- <name>Room %02d</name>
- </location>
- <resource repeat="10">
- <uid>resource%02d</uid>
- <guid>resource%02d</guid>
- <password>resource%02d</password>
- <name>Resource %02d</name>
- </resource>
-</accounts>
+<directory realm="Test Realm">
+ <record type="location">
+ <short-name>fantastic</short-name>
+ <uid>4D66A20A-1437-437D-8069-2F14E8322234</uid>
+ <full-name>Fantastic Conference Room</full-name>
+ <extras>
+ <associatedAddress>63A2F949-2D8D-4C8D-B8A5-DCF2A94610F3</associatedAddress>
+ </extras>
+ </record>
+ <record type="location">
+ <short-name>jupiter</short-name>
+ <uid>jupiter</uid>
+ <full-name>Jupiter Conference Room, Building 2, 1st Floor</full-name>
+ </record>
+ <record type="location">
+ <short-name>uranus</short-name>
+ <uid>uranus</uid>
+ <full-name>Uranus Conference Room, Building 3, 1st Floor</full-name>
+ </record>
+ <record type="location">
+ <short-name>morgensroom</short-name>
+ <uid>03DFF660-8BCC-4198-8588-DD77F776F518</uid>
+ <full-name>Morgen's Room</full-name>
+ </record>
+ <record type="location">
+ <short-name>mercury</short-name>
+ <uid>mercury</uid>
+ <full-name>Mercury Conference Room, Building 1, 2nd Floor</full-name>
+ </record>
+ <record type="location">
+ <short-name>location09</short-name>
+ <uid>location09</uid>
+ <full-name>Room 09</full-name>
+ </record>
+ <record type="location">
+ <short-name>location08</short-name>
+ <uid>location08</uid>
+ <full-name>Room 08</full-name>
+ </record>
+ <record type="location">
+ <short-name>location07</short-name>
+ <uid>location07</uid>
+ <full-name>Room 07</full-name>
+ </record>
+ <record type="location">
+ <short-name>location06</short-name>
+ <uid>location06</uid>
+ <full-name>Room 06</full-name>
+ </record>
+ <record type="location">
+ <short-name>location05</short-name>
+ <uid>location05</uid>
+ <full-name>Room 05</full-name>
+ </record>
+ <record type="location">
+ <short-name>location04</short-name>
+ <uid>location04</uid>
+ <full-name>Room 04</full-name>
+ </record>
+ <record type="location">
+ <short-name>location03</short-name>
+ <uid>location03</uid>
+ <full-name>Room 03</full-name>
+ </record>
+ <record type="location">
+ <short-name>location02</short-name>
+ <uid>location02</uid>
+ <full-name>Room 02</full-name>
+ </record>
+ <record type="location">
+ <short-name>location01</short-name>
+ <uid>location01</uid>
+ <full-name>Room 01</full-name>
+ </record>
+ <record type="location">
+ <short-name>delegatedroom</short-name>
+ <uid>delegatedroom</uid>
+ <full-name>Delegated Conference Room</full-name>
+ </record>
+ <record type="location">
+ <short-name>mars</short-name>
+ <uid>redplanet</uid>
+ <full-name>Mars Conference Room, Building 1, 1st Floor</full-name>
+ </record>
+ <record type="location">
+ <short-name>sharissroom</short-name>
+ <uid>80689D41-DAF8-4189-909C-DB017B271892</uid>
+ <full-name>Shari's Room</full-name>
+ <extras>
+ <associatedAddress>6F9EE33B-78F6-481B-9289-3D0812FF0D64</associatedAddress>
+ </extras>
+ </record>
+ <record type="location">
+ <short-name>pluto</short-name>
+ <uid>pluto</uid>
+ <full-name>Pluto Conference Room, Building 2, 1st Floor</full-name>
+ </record>
+ <record type="location">
+ <short-name>saturn</short-name>
+ <uid>saturn</uid>
+ <full-name>Saturn Conference Room, Building 2, 1st Floor</full-name>
+ </record>
+ <record type="location">
+ <short-name>location10</short-name>
+ <uid>location10</uid>
+ <full-name>Room 10</full-name>
+ </record>
+ <record type="location">
+ <short-name>pretend</short-name>
+ <uid>06E3BDCB-9C19-485A-B14E-F146A80ADDC6</uid>
+ <full-name>Pretend Conference Room</full-name>
+ <extras>
+ <associatedAddress>76E7ECA6-08BC-4AE7-930D-F2E7453993A5</associatedAddress>
+ </extras>
+ </record>
+ <record type="location">
+ <short-name>neptune</short-name>
+ <uid>neptune</uid>
+ <full-name>Neptune Conference Room, Building 2, 1st Floor</full-name>
+ </record>
+ <record type="location">
+ <short-name>Earth</short-name>
+ <uid>Earth</uid>
+ <full-name>Earth Conference Room, Building 1, 1st Floor</full-name>
+ </record>
+ <record type="location">
+ <short-name>venus</short-name>
+ <uid>venus</uid>
+ <full-name>Venus Conference Room, Building 1, 2nd Floor</full-name>
+ </record>
+ <record type="resource">
+ <short-name>sharisotherresource</short-name>
+ <uid>CCE95217-A57B-481A-AC3D-FEC9AB6CE3A9</uid>
+ <full-name>Shari's Other Resource</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource15</short-name>
+ <uid>resource15</uid>
+ <full-name>Resource 15</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource14</short-name>
+ <uid>resource14</uid>
+ <full-name>Resource 14</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource17</short-name>
+ <uid>resource17</uid>
+ <full-name>Resource 17</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource16</short-name>
+ <uid>resource16</uid>
+ <full-name>Resource 16</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource11</short-name>
+ <uid>resource11</uid>
+ <full-name>Resource 11</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource10</short-name>
+ <uid>resource10</uid>
+ <full-name>Resource 10</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource13</short-name>
+ <uid>resource13</uid>
+ <full-name>Resource 13</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource12</short-name>
+ <uid>resource12</uid>
+ <full-name>Resource 12</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource19</short-name>
+ <uid>resource19</uid>
+ <full-name>Resource 19</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource18</short-name>
+ <uid>resource18</uid>
+ <full-name>Resource 18</full-name>
+ </record>
+ <record type="resource">
+ <short-name>sharisresource</short-name>
+ <uid>C38BEE7A-36EE-478C-9DCB-CBF4612AFE65</uid>
+ <full-name>Shari's Resource</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource20</short-name>
+ <uid>resource20</uid>
+ <full-name>Resource 20</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource06</short-name>
+ <uid>resource06</uid>
+ <full-name>Resource 06</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource07</short-name>
+ <uid>resource07</uid>
+ <full-name>Resource 07</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource04</short-name>
+ <uid>resource04</uid>
+ <full-name>Resource 04</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource05</short-name>
+ <uid>resource05</uid>
+ <full-name>Resource 05</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource02</short-name>
+ <uid>resource02</uid>
+ <full-name>Resource 02</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource03</short-name>
+ <uid>resource03</uid>
+ <full-name>Resource 03</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource01</short-name>
+ <uid>resource01</uid>
+ <full-name>Resource 01</full-name>
+ </record>
+ <record type="resource">
+ <short-name>sharisotherresource1</short-name>
+ <uid>0CE0BF31-5F9E-4801-A489-8C70CF287F5F</uid>
+ <full-name>Shari's Other Resource1</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource08</short-name>
+ <uid>resource08</uid>
+ <full-name>Resource 08</full-name>
+ </record>
+ <record type="resource">
+ <short-name>resource09</short-name>
+ <uid>resource09</uid>
+ <full-name>Resource 09</full-name>
+ </record>
+ <record type="address">
+ <short-name>testaddress1</short-name>
+ <uid>6F9EE33B-78F6-481B-9289-3D0812FF0D64</uid>
+ <full-name>Test Address One</full-name>
+ <extras>
+ <streetAddress>20300 Stevens Creek Blvd, Cupertino, CA 95014</streetAddress>
+ <geo>37.322281,-122.028345</geo>
+ </extras>
+ </record>
+ <record type="address">
+ <short-name>il2</short-name>
+ <uid>63A2F949-2D8D-4C8D-B8A5-DCF2A94610F3</uid>
+ <full-name>IL2</full-name>
+ <extras>
+ <streetAddress>2 Infinite Loop, Cupertino, CA 95014</streetAddress>
+ <geo>37.332633,-122.030502</geo>
+ </extras>
+ </record>
+ <record type="address">
+ <short-name>il1</short-name>
+ <uid>76E7ECA6-08BC-4AE7-930D-F2E7453993A5</uid>
+ <full-name>IL1</full-name>
+ <extras>
+ <streetAddress>1 Infinite Loop, Cupertino, CA 95014</streetAddress>
+ <geo>37.331741,-122.030333</geo>
+ </extras>
+ </record>
+</directory>
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/test_delegates.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/test_delegates.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/test_delegates.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -19,14 +19,14 @@
"""
from txdav.who.delegates import (
- addDelegate, removeDelegate, delegatesOf, delegatedTo, allGroupDelegates
+ addDelegate, removeDelegate, delegatesOf, delegatedTo,
+ DirectoryService, RecordType as DelegateRecordType
)
from txdav.who.groups import GroupCacher
from twext.who.idirectory import RecordType
from twext.who.test.test_xml import xmlService
from twisted.internet.defer import inlineCallbacks
from twistedcaldav.test.util import StoreTestCase
-from uuid import UUID
class DelegationTest(StoreTestCase):
@@ -34,14 +34,19 @@
@inlineCallbacks
def setUp(self):
yield super(DelegationTest, self).setUp()
+ self.store = self.storeUnderTest()
self.xmlService = xmlService(self.mktemp(), xmlData=testXMLConfig)
self.groupCacher = GroupCacher(self.xmlService)
+ self.delegateService = DirectoryService(
+ self.xmlService.realmName,
+ self.store
+ )
+ self.delegateService.setMasterDirectory(self.xmlService)
@inlineCallbacks
def test_directDelegation(self):
- store = self.storeUnderTest()
- txn = store.newTransaction()
+ txn = self.store.newTransaction()
delegator = yield self.xmlService.recordWithUID(u"__wsanchez__")
delegate1 = yield self.xmlService.recordWithUID(u"__sagen__")
@@ -50,24 +55,68 @@
# Add 1 delegate
yield addDelegate(txn, delegator, delegate1, True)
delegates = (yield delegatesOf(txn, delegator, True))
- self.assertEquals(["sagen"], [d.shortNames[0] for d in delegates])
+ self.assertEquals(["__sagen__"], [d.uid for d in delegates])
delegators = (yield delegatedTo(txn, delegate1, True))
- self.assertEquals(["wsanchez"], [d.shortNames[0] for d in delegators])
+ self.assertEquals(["__wsanchez__"], [d.uid for d in delegators])
+ yield txn.commit() # So delegateService will see the changes
+ txn = self.store.newTransaction()
+
+ # The "proxy-write" pseudoGroup will have one member
+ pseudoGroup = yield self.delegateService.recordWithShortName(
+ DelegateRecordType.writeDelegateGroup,
+ u"__wsanchez__"
+ )
+ self.assertEquals(pseudoGroup.uid, u"__wsanchez__#calendar-proxy-write")
+ self.assertEquals(
+ [r.uid for r in (yield pseudoGroup.members())],
+ [u"__sagen__"]
+ )
+ # The "proxy-read" pseudoGroup will have no members
+ pseudoGroup = yield self.delegateService.recordWithShortName(
+ DelegateRecordType.readDelegateGroup,
+ u"__wsanchez__"
+ )
+ self.assertEquals(pseudoGroup.uid, u"__wsanchez__#calendar-proxy-read")
+ self.assertEquals(
+ [r.uid for r in (yield pseudoGroup.members())],
+ []
+ )
+ # The "proxy-write-for" pseudoGroup will have one member
+ pseudoGroup = yield self.delegateService.recordWithShortName(
+ DelegateRecordType.writeDelegatorGroup,
+ u"__sagen__"
+ )
+ self.assertEquals(pseudoGroup.uid, u"__sagen__#calendar-proxy-write-for")
+ self.assertEquals(
+ [r.uid for r in (yield pseudoGroup.members())],
+ [u"__wsanchez__"]
+ )
+ # The "proxy-read-for" pseudoGroup will have no members
+ pseudoGroup = yield self.delegateService.recordWithShortName(
+ DelegateRecordType.readDelegatorGroup,
+ u"__sagen__"
+ )
+ self.assertEquals(pseudoGroup.uid, u"__sagen__#calendar-proxy-read-for")
+ self.assertEquals(
+ [r.uid for r in (yield pseudoGroup.members())],
+ []
+ )
+
# Add another delegate
yield addDelegate(txn, delegator, delegate2, True)
delegates = (yield delegatesOf(txn, delegator, True))
self.assertEquals(
- set(["sagen", "cdaboo"]),
- set([d.shortNames[0] for d in delegates])
+ set(["__sagen__", "__cdaboo__"]),
+ set([d.uid for d in delegates])
)
delegators = (yield delegatedTo(txn, delegate2, True))
- self.assertEquals(["wsanchez"], [d.shortNames[0] for d in delegators])
+ self.assertEquals(["__wsanchez__"], [d.uid for d in delegators])
# Remove 1 delegate
yield removeDelegate(txn, delegator, delegate1, True)
delegates = (yield delegatesOf(txn, delegator, True))
- self.assertEquals(["cdaboo"], [d.shortNames[0] for d in delegates])
+ self.assertEquals(["__cdaboo__"], [d.uid for d in delegates])
delegators = (yield delegatedTo(txn, delegate1, True))
self.assertEquals(0, len(delegators))
@@ -78,11 +127,40 @@
delegators = (yield delegatedTo(txn, delegate2, True))
self.assertEquals(0, len(delegators))
+ yield txn.commit() # So delegateService will see the changes
+ # Now set delegate assignments by using pseudoGroup.setMembers()
+ pseudoGroup = yield self.delegateService.recordWithShortName(
+ DelegateRecordType.writeDelegateGroup,
+ u"__wsanchez__"
+ )
+ yield pseudoGroup.setMembers([delegate1, delegate2])
+
+ # Verify the assignments were made
+ txn = self.store.newTransaction()
+ delegates = (yield delegatesOf(txn, delegator, True))
+ self.assertEquals(
+ set(["__sagen__", "__cdaboo__"]),
+ set([d.uid for d in delegates])
+ )
+ yield txn.commit()
+
+ # Set a different group of assignments:
+ yield pseudoGroup.setMembers([delegate2])
+
+ # Verify the assignments were made
+ txn = self.store.newTransaction()
+ delegates = (yield delegatesOf(txn, delegator, True))
+ self.assertEquals(
+ set(["__cdaboo__"]),
+ set([d.uid for d in delegates])
+ )
+ yield txn.commit()
+
+
@inlineCallbacks
def test_indirectDelegation(self):
- store = self.storeUnderTest()
- txn = store.newTransaction()
+ txn = self.store.newTransaction()
delegator = yield self.xmlService.recordWithUID(u"__wsanchez__")
delegate1 = yield self.xmlService.recordWithUID(u"__sagen__")
@@ -92,37 +170,41 @@
# Add group delegate, but before the group membership has been
# pulled in
yield addDelegate(txn, delegator, group1, True)
- delegates = (yield delegatesOf(txn, delegator, True))
+ # Passing expanded=False will return the group
+ delegates = (yield delegatesOf(txn, delegator, True, expanded=False))
+ self.assertEquals(1, len(delegates))
+ self.assertEquals(delegates[0].uid, u"__top_group_1__")
+ # Passing expanded=True will return not the group -- it only returns
+ # non-groups
+ delegates = (yield delegatesOf(txn, delegator, True, expanded=True))
self.assertEquals(0, len(delegates))
# Now refresh the group and there will be 3 delegates (contained
# within 2 nested groups)
# guid = "49b350c69611477b94d95516b13856ab"
- yield self.groupCacher.refreshGroup(txn, group1.guid)
- yield self.groupCacher.refreshGroup(txn, group2.guid)
- delegates = (yield delegatesOf(txn, delegator, True))
+ yield self.groupCacher.refreshGroup(txn, group1.uid)
+ yield self.groupCacher.refreshGroup(txn, group2.uid)
+ delegates = (yield delegatesOf(txn, delegator, True, expanded=True))
self.assertEquals(
- set(["sagen", "cdaboo", "glyph"]),
- set([d.shortNames[0] for d in delegates])
+ set(["__sagen__", "__cdaboo__", "__glyph__"]),
+ set([d.uid for d in delegates])
)
delegators = (yield delegatedTo(txn, delegate1, True))
- self.assertEquals(["wsanchez"], [d.shortNames[0] for d in delegators])
+ self.assertEquals(["__wsanchez__"], [d.uid for d in delegators])
# Verify we can ask for all delegated-to groups
yield addDelegate(txn, delegator, group2, True)
- groups = (yield allGroupDelegates(txn))
+ groups = (yield txn.allGroupDelegates())
self.assertEquals(
- set([
- UUID("49b350c69611477b94d95516b13856ab"),
- UUID("86144f73345a409782f1b782672087c7")
- ]), set(groups))
+ set([u'__sub_group_1__', u'__top_group_1__']), set(groups)
+ )
# Delegate to a user who is already indirectly delegated-to
yield addDelegate(txn, delegator, delegate1, True)
- delegates = (yield delegatesOf(txn, delegator, True))
+ delegates = (yield delegatesOf(txn, delegator, True, expanded=True))
self.assertEquals(
- set(["sagen", "cdaboo", "glyph"]),
- set([d.shortNames[0] for d in delegates])
+ set(["__sagen__", "__cdaboo__", "__glyph__"]),
+ set([d.uid for d in delegates])
)
# Add a member to the group; they become a delegate
@@ -131,35 +213,38 @@
record = (
yield self.xmlService.recordWithShortName(RecordType.user, name)
)
- newSet.add(record.guid)
- groupID, name, membershipHash = (yield txn.groupByGUID(group1.guid))
+ newSet.add(record.uid)
+ groupID, name, membershipHash = (yield txn.groupByUID(group1.uid))
numAdded, numRemoved = (
yield self.groupCacher.synchronizeMembers(txn, groupID, newSet)
)
- delegates = (yield delegatesOf(txn, delegator, True))
+ delegates = (yield delegatesOf(txn, delegator, True, expanded=True))
self.assertEquals(
- set(["sagen", "cdaboo", "glyph", "dre"]),
- set([d.shortNames[0] for d in delegates])
+ set(["__sagen__", "__cdaboo__", "__glyph__", "__dre__"]),
+ set([d.uid for d in delegates])
)
# Remove delegate access from the top group
yield removeDelegate(txn, delegator, group1, True)
- delegates = (yield delegatesOf(txn, delegator, True))
+ delegates = (yield delegatesOf(txn, delegator, True, expanded=True))
self.assertEquals(
- set(["sagen", "cdaboo"]),
- set([d.shortNames[0] for d in delegates])
+ set(["__sagen__", "__cdaboo__"]),
+ set([d.uid for d in delegates])
)
# Remove delegate access from the sub group
yield removeDelegate(txn, delegator, group2, True)
- delegates = (yield delegatesOf(txn, delegator, True))
+ delegates = (yield delegatesOf(txn, delegator, True, expanded=True))
self.assertEquals(
- set(["sagen"]),
- set([d.shortNames[0] for d in delegates])
+ set(["__sagen__"]),
+ set([d.uid for d in delegates])
)
+ yield txn.commit()
+
+
testXMLConfig = """<?xml version="1.0" encoding="utf-8"?>
<directory realm="xyzzy">
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/test_groups.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/test_groups.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/test_groups.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -24,7 +24,6 @@
from twisted.internet.defer import inlineCallbacks
from twistedcaldav.test.util import StoreTestCase
from txdav.common.icommondatastore import NotFoundError
-from uuid import UUID
class GroupCacherTest(StoreTestCase):
@@ -69,22 +68,20 @@
txn = store.newTransaction()
record = yield self.xmlService.recordWithUID(u"__top_group_1__")
- yield self.groupCacher.refreshGroup(txn, record.guid)
+ yield self.groupCacher.refreshGroup(txn, record.uid)
- groupID, name, membershipHash = (yield txn.groupByGUID(record.guid))
- self.assertEquals(membershipHash, "4b0e162f2937f0f3daa6d10e5a6a6c33")
+ groupID, name, membershipHash = (yield txn.groupByUID(record.uid))
- groupGUID, name, membershipHash = (yield txn.groupByID(groupID))
- self.assertEquals(groupGUID, record.guid)
- self.assertEquals(name, "Top Group 1")
- self.assertEquals(membershipHash, "4b0e162f2937f0f3daa6d10e5a6a6c33")
+ self.assertEquals(membershipHash, "f380860ff5e02c2433fbd4b5ed3e090c")
+ groupUID, name, membershipHash = (yield txn.groupByID(groupID))
+ self.assertEquals(groupUID, record.uid)
+ self.assertEquals(name, u"Top Group 1")
+ self.assertEquals(membershipHash, "f380860ff5e02c2433fbd4b5ed3e090c")
+
members = (yield txn.membersOfGroup(groupID))
self.assertEquals(
- set([UUID("9064df911dbc4e079c2b6839b0953876"),
- UUID("4ad155cbae9b475f986ce08a7537893e"),
- UUID("3bdcb95484d54f6d8035eac19a6d6e1f"),
- UUID("7d45cb10479e456bb54d528958c5734b")]),
+ set([u'__cdaboo__', u'__glyph__', u'__sagen__', u'__wsanchez__']),
members
)
@@ -97,8 +94,8 @@
# sagen is in the top group, even though it's actually one level
# removed
record = yield self.xmlService.recordWithUID(u"__sagen__")
- groups = (yield self.groupCacher.cachedGroupsFor(txn, record.guid))
- self.assertEquals(set([groupID]), groups)
+ groups = (yield self.groupCacher.cachedGroupsFor(txn, record.uid))
+ self.assertEquals(set([u"__top_group_1__"]), groups)
@inlineCallbacks
@@ -113,9 +110,9 @@
txn = store.newTransaction()
# Refresh the group so it's assigned a group_id
- guid = UUID("49b350c69611477b94d95516b13856ab")
- yield self.groupCacher.refreshGroup(txn, guid)
- groupID, name, membershipHash = (yield txn.groupByGUID(guid))
+ uid = u"__top_group_1__"
+ yield self.groupCacher.refreshGroup(txn, uid)
+ groupID, name, membershipHash = (yield txn.groupByUID(uid))
# Remove two members, and add one member
newSet = set()
@@ -126,7 +123,7 @@
name
)
)
- newSet.add(record.guid)
+ newSet.add(record.uid)
numAdded, numRemoved = (
yield self.groupCacher.synchronizeMembers(
txn, groupID, newSet
@@ -159,12 +156,12 @@
# Non-existent groupID
self.failUnlessFailure(txn.groupByID(42), NotFoundError)
- guid = UUID("49b350c69611477b94d95516b13856ab")
- hash = "4b0e162f2937f0f3daa6d10e5a6a6c33"
- yield self.groupCacher.refreshGroup(txn, guid)
- groupID, name, membershipHash = (yield txn.groupByGUID(guid))
+ uid = u"__top_group_1__"
+ hash = "f380860ff5e02c2433fbd4b5ed3e090c"
+ yield self.groupCacher.refreshGroup(txn, uid)
+ groupID, name, membershipHash = (yield txn.groupByUID(uid))
results = (yield txn.groupByID(groupID))
- self.assertEquals([guid, "Top Group 1", hash], results)
+ self.assertEquals((uid, u"Top Group 1", hash), results)
@inlineCallbacks
@@ -177,32 +174,31 @@
self.assertEquals(oldExternalAssignments, {})
newAssignments = {
- UUID("3BDCB954-84D5-4F6D-8035-EAC19A6D6E1F"):
- (None, UUID("49B350C6-9611-477B-94D9-5516B13856AB"))
+ u"__wsanchez__": (None, u"__top_group_1__")
}
yield self.groupCacher.applyExternalAssignments(txn, newAssignments)
oldExternalAssignments = (yield txn.externalDelegates())
self.assertEquals(
oldExternalAssignments,
{
- UUID("3BDCB954-84D5-4F6D-8035-EAC19A6D6E1F"):
+ u"__wsanchez__":
(
None,
- UUID("49B350C6-9611-477B-94D9-5516B13856AB")
+ u"__top_group_1__"
)
}
)
newAssignments = {
- UUID("7D45CB10-479E-456B-B54D-528958C5734B"):
+ u"__cdaboo__":
(
- UUID("86144F73-345A-4097-82F1-B782672087C7"),
+ u"__sub_group_1__",
None
),
- UUID("3BDCB954-84D5-4F6D-8035-EAC19A6D6E1F"):
+ u"__wsanchez__":
(
- UUID("86144F73-345A-4097-82F1-B782672087C7"),
- UUID("49B350C6-9611-477B-94D9-5516B13856AB")
+ u"__sub_group_1__",
+ u"__top_group_1__"
),
}
yield self.groupCacher.applyExternalAssignments(txn, newAssignments)
@@ -210,14 +206,14 @@
self.assertEquals(
oldExternalAssignments,
{
- UUID('3bdcb954-84d5-4f6d-8035-eac19a6d6e1f'):
+ u"__wsanchez__":
(
- UUID('86144f73-345a-4097-82f1-b782672087c7'),
- UUID('49b350c6-9611-477b-94d9-5516b13856ab')
+ u"__sub_group_1__",
+ u"__top_group_1__"
),
- UUID('7d45cb10-479e-456b-b54d-528958c5734b'):
+ u"__cdaboo__":
(
- UUID('86144f73-345a-4097-82f1-b782672087c7'),
+ u"__sub_group_1__",
None
)
}
@@ -228,44 +224,44 @@
allGroupDelegates,
set(
[
- UUID('49b350c6-9611-477b-94d9-5516b13856ab'),
- UUID('86144f73-345a-4097-82f1-b782672087c7')
+ u"__top_group_1__",
+ u"__sub_group_1__"
]
)
)
# Fault in the read-only group
- yield self.groupCacher.refreshGroup(txn, UUID('86144f73-345a-4097-82f1-b782672087c7'))
+ yield self.groupCacher.refreshGroup(txn, u"__sub_group_1__")
# Wilfredo should have Sagen and Daboo as read-only delegates
delegates = (yield txn.delegates(
- UUID("3BDCB954-84D5-4F6D-8035-EAC19A6D6E1F"), False)
+ u"__wsanchez__", False, expanded=True)
)
self.assertEquals(
delegates,
set(
[
- UUID('4ad155cb-ae9b-475f-986c-e08a7537893e'),
- UUID('7d45cb10-479e-456b-b54d-528958c5734b')
+ u"__sagen__",
+ u"__cdaboo__"
]
)
)
# Fault in the read-write group
- yield self.groupCacher.refreshGroup(txn, UUID('49b350c6-9611-477b-94d9-5516b13856ab'))
+ yield self.groupCacher.refreshGroup(txn, u"__top_group_1__")
# Wilfredo should have 4 users as read-write delegates
delegates = (yield txn.delegates(
- UUID("3BDCB954-84D5-4F6D-8035-EAC19A6D6E1F"), True)
+ u"__wsanchez__", True, expanded=True)
)
self.assertEquals(
delegates,
set(
[
- UUID('3bdcb954-84d5-4f6d-8035-eac19a6d6e1f'),
- UUID('4ad155cb-ae9b-475f-986c-e08a7537893e'),
- UUID('7d45cb10-479e-456b-b54d-528958c5734b'),
- UUID('9064df91-1dbc-4e07-9c2b-6839b0953876')
+ u"__wsanchez__",
+ u"__sagen__",
+ u"__cdaboo__",
+ u"__glyph__"
]
)
)
@@ -275,9 +271,9 @@
# Now, remove some external assignments
#
newAssignments = {
- UUID("3BDCB954-84D5-4F6D-8035-EAC19A6D6E1F"):
+ u"__wsanchez__":
(
- UUID("86144F73-345A-4097-82F1-B782672087C7"),
+ u"__sub_group_1__",
None
),
}
@@ -286,9 +282,9 @@
self.assertEquals(
oldExternalAssignments,
{
- UUID('3bdcb954-84d5-4f6d-8035-eac19a6d6e1f'):
+ u"__wsanchez__":
(
- UUID('86144f73-345a-4097-82f1-b782672087c7'),
+ u"__sub_group_1__",
None
),
}
@@ -299,28 +295,28 @@
allGroupDelegates,
set(
[
- UUID('86144f73-345a-4097-82f1-b782672087c7')
+ u"__sub_group_1__"
]
)
)
# Wilfredo should have Sagen and Daboo as read-only delegates
delegates = (yield txn.delegates(
- UUID("3BDCB954-84D5-4F6D-8035-EAC19A6D6E1F"), False)
+ u"__wsanchez__", False, expanded=True)
)
self.assertEquals(
delegates,
set(
[
- UUID('4ad155cb-ae9b-475f-986c-e08a7537893e'),
- UUID('7d45cb10-479e-456b-b54d-528958c5734b')
+ u"__sagen__",
+ u"__cdaboo__"
]
)
)
# Wilfredo should have no read-write delegates
delegates = (yield txn.delegates(
- UUID("3BDCB954-84D5-4F6D-8035-EAC19A6D6E1F"), True)
+ u"__wsanchez__", True, expanded=True)
)
self.assertEquals(
delegates,
@@ -333,7 +329,7 @@
allGroupDelegates,
set(
[
- UUID('86144f73-345a-4097-82f1-b782672087c7')
+ u"__sub_group_1__"
]
)
)
@@ -424,7 +420,6 @@
<record type="user">
<uid>__wsanchez__</uid>
- <guid>3BDCB954-84D5-4F6D-8035-EAC19A6D6E1F</guid>
<short-name>wsanchez</short-name>
<short-name>wilfredo_sanchez</short-name>
<full-name>Wilfredo Sanchez</full-name>
@@ -435,7 +430,6 @@
<record type="user">
<uid>__glyph__</uid>
- <guid>9064DF91-1DBC-4E07-9C2B-6839B0953876</guid>
<short-name>glyph</short-name>
<full-name>Glyph Lefkowitz</full-name>
<password>hpylg</password>
@@ -445,7 +439,6 @@
<record type="user">
<uid>__sagen__</uid>
- <guid>4AD155CB-AE9B-475F-986C-E08A7537893E</guid>
<short-name>sagen</short-name>
<full-name>Morgen Sagen</full-name>
<password>negas</password>
@@ -455,7 +448,6 @@
<record type="user">
<uid>__cdaboo__</uid>
- <guid>7D45CB10-479E-456B-B54D-528958C5734B</guid>
<short-name>cdaboo</short-name>
<full-name>Cyrus Daboo</full-name>
<password>suryc</password>
@@ -464,7 +456,6 @@
<record type="user">
<uid>__dre__</uid>
- <guid>CFC88493-DBFF-42B9-ADC7-9B3DA0B0769B</guid>
<short-name>dre</short-name>
<full-name>Andre LaBranche</full-name>
<password>erd</password>
@@ -474,7 +465,6 @@
<record type="group">
<uid>__top_group_1__</uid>
- <guid>49B350C6-9611-477B-94D9-5516B13856AB</guid>
<short-name>top-group-1</short-name>
<full-name>Top Group 1</full-name>
<email>topgroup1 at example.com</email>
@@ -485,7 +475,6 @@
<record type="group">
<uid>__sub_group_1__</uid>
- <guid>86144F73-345A-4097-82F1-B782672087C7</guid>
<short-name>sub-group-1</short-name>
<full-name>Sub Group 1</full-name>
<email>subgroup1 at example.com</email>
Copied: CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/test_util.py (from rev 12898, CalendarServer/branches/users/sagen/move2who-2/txdav/who/test/test_util.py)
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/test_util.py (rev 0)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/who/test/test_util.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -0,0 +1,96 @@
+##
+# Copyright (c) 2013 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+"""
+txdav.who.util tests
+"""
+
+import os
+
+from txdav.who.util import directoryFromConfig
+from twisted.trial.unittest import TestCase
+from twistedcaldav.config import ConfigDict
+from twisted.python.filepath import FilePath
+from txdav.who.augment import AugmentedDirectoryService
+from twext.who.aggregate import DirectoryService as AggregateDirectoryService
+
+
+class StubStore(object):
+ pass
+
+
+
+class UtilTest(TestCase):
+
+ def setUp(self):
+ sourceDir = FilePath(__file__).parent().child("accounts")
+ self.serverRoot = os.path.abspath(self.mktemp())
+ os.mkdir(self.serverRoot)
+ self.dataRoot = os.path.join(self.serverRoot, "data")
+ if not os.path.exists(self.dataRoot):
+ os.makedirs(self.dataRoot)
+ destDir = FilePath(self.dataRoot)
+
+ accounts = destDir.child("accounts.xml")
+ sourceAccounts = sourceDir.child("accounts.xml")
+ accounts.setContent(sourceAccounts.getContent())
+
+ resources = destDir.child("resources.xml")
+ sourceResources = sourceDir.child("resources.xml")
+ resources.setContent(sourceResources.getContent())
+
+ augments = destDir.child("augments.xml")
+ sourceAugments = sourceDir.child("augments.xml")
+ augments.setContent(sourceAugments.getContent())
+
+
+ def test_directoryFromConfig(self):
+
+ config = ConfigDict(
+ {
+ "DataRoot": self.dataRoot,
+ "DirectoryService": {
+ "Enabled": True,
+ "type": "XML",
+ "params": {
+ "xmlFile": "accounts.xml",
+ "recordTypes": ["users", "groups"],
+ },
+ },
+ "ResourceService": {
+ "Enabled": True,
+ "type": "XML",
+ "params": {
+ "xmlFile": "resources.xml",
+ "recordTypes": ["locations", "resources"],
+ },
+ },
+ "AugmentService": {
+ "Enabled": True,
+ # FIXME: This still uses an actual class name:
+ "type": "twistedcaldav.directory.augment.AugmentXMLDB",
+ "params": {
+ "xmlFiles": ["augments.xml"],
+ },
+ },
+ }
+ )
+
+ store = StubStore()
+ service = directoryFromConfig(config, store=store)
+ self.assertTrue(isinstance(service, AugmentedDirectoryService))
+ self.assertTrue(isinstance(service._directory, AggregateDirectoryService))
+ self.assertEquals(len(service._directory.services), 3)
Copied: CalendarServer/branches/users/sagen/move2who-3/txdav/who/util.py (from rev 12898, CalendarServer/branches/users/sagen/move2who-2/txdav/who/util.py)
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/who/util.py (rev 0)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/who/util.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -0,0 +1,162 @@
+##
+# Copyright (c) 2006-2014 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+
+import os
+from twext.python.log import Logger
+from twisted.cred.credentials import UsernamePassword
+from twext.who.aggregate import DirectoryService as AggregateDirectoryService
+from txdav.who.augment import AugmentedDirectoryService
+
+from calendarserver.tap.util import getDBPool, storeFromConfig
+from twext.who.idirectory import RecordType, DirectoryConfigurationError
+from twext.who.ldap import DirectoryService as LDAPDirectoryService
+from twext.who.util import ConstantsContainer
+from twisted.python.filepath import FilePath
+from twisted.python.reflect import namedClass
+from twistedcaldav.config import fullServerPath
+from txdav.who.delegates import DirectoryService as DelegateDirectoryService
+from txdav.who.idirectory import RecordType as CalRecordType
+from txdav.who.xml import DirectoryService as XMLDirectoryService
+
+log = Logger()
+
+
+def directoryFromConfig(config, store=None):
+ """
+ Return a directory service based on the config. If you want to go through
+ AMP to talk to one of these as a client, instantiate
+ txdav.dps.client.DirectoryService
+ """
+
+ # MOVE2WHO FIXME: this needs to talk to its own separate database. In fact,
+ # don't pass store=None if you already have called storeFromConfig()
+ # within this process. Pass the existing store in here.
+ if store is None:
+ pool, txnFactory = getDBPool(config)
+ store = storeFromConfig(config, txnFactory, None)
+
+ aggregatedServices = []
+
+
+ for serviceKey in ("DirectoryService", "ResourceService"):
+ serviceValue = config.get(serviceKey, None)
+
+ if not serviceValue.Enabled:
+ continue
+
+ directoryType = serviceValue.type.lower()
+ params = serviceValue.params
+
+ if "xml" in directoryType:
+ xmlFile = params.xmlFile
+ xmlFile = fullServerPath(config.DataRoot, xmlFile)
+ if not xmlFile or not os.path.exists(xmlFile):
+ log.error("Path not found for XML directory: {p}", p=xmlFile)
+ fp = FilePath(xmlFile)
+ directory = XMLDirectoryService(fp)
+
+ elif "opendirectory" in directoryType:
+ from twext.who.opendirectory import DirectoryService as ODDirectoryService
+ directory = ODDirectoryService()
+
+ elif "ldap" in directoryType:
+ if params.credentials.dn and params.credentials.password:
+ creds = UsernamePassword(params.credentials.dn,
+ params.credentials.password)
+ else:
+ creds = None
+ directory = LDAPDirectoryService(
+ params.uri,
+ params.rdnSchema.base,
+ creds=creds
+ )
+
+ else:
+ log.error("Invalid DirectoryType: {dt}", dt=directoryType)
+ raise DirectoryConfigurationError
+
+ # Set the appropriate record types on each service
+ types = []
+ for recordTypeName in params.recordTypes:
+ recordType = {
+ "users": RecordType.user,
+ "groups": RecordType.group,
+ "locations": CalRecordType.location,
+ "resources": CalRecordType.resource,
+ "addresses": CalRecordType.address,
+ }.get(recordTypeName, None)
+ if recordType is None:
+ log.error("Invalid Record Type: {rt}", rt=recordTypeName)
+ raise DirectoryConfigurationError
+ if recordType in types:
+ log.error("Duplicate Record Type: {rt}", rt=recordTypeName)
+ raise DirectoryConfigurationError
+ types.append(recordType)
+
+ directory.recordType = ConstantsContainer(types)
+ aggregatedServices.append(directory)
+
+ #
+ # Setup the Augment Service
+ #
+ if config.AugmentService.type:
+ augmentClass = namedClass(config.AugmentService.type)
+ log.info(
+ "Configuring augment service of type: {augmentClass}",
+ augmentClass=augmentClass
+ )
+ try:
+ augmentService = augmentClass(**config.AugmentService.params)
+ except IOError:
+ log.error("Could not start augment service")
+ raise
+ else:
+ augmentService = None
+
+ userDirectory = None
+ for directory in aggregatedServices:
+ if RecordType.user in directory.recordTypes():
+ userDirectory = directory
+ break
+ else:
+ log.error("No directory service set up for users")
+ raise DirectoryConfigurationError
+
+ delegateDirectory = DelegateDirectoryService(
+ userDirectory.realmName,
+ store
+ )
+ aggregatedServices.append(delegateDirectory)
+
+ aggregateDirectory = AggregateDirectoryService(
+ userDirectory.realmName, aggregatedServices
+ )
+ try:
+ augmented = AugmentedDirectoryService(
+ aggregateDirectory, store, augmentService
+ )
+
+ # The delegate directory needs a way to look up user/group records
+ # so hand it a reference to the augmented directory.
+ # FIXME: is there a better pattern to use here?
+ delegateDirectory.setMasterDirectory(augmented)
+
+ except Exception as e:
+ log.error("Could not create directory service", error=e)
+ raise
+
+ return augmented
Modified: CalendarServer/branches/users/sagen/move2who-3/txdav/who/xml.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txdav/who/xml.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txdav/who/xml.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -143,10 +143,14 @@
(BaseDirectoryService.recordType, RecordType)
)
- fieldName = ConstantsContainer(
- (BaseDirectoryService.fieldName, FieldName)
- )
+ # MOVE2WHO: Wilfredo had added augment fields into xml, which does make
+ # some sense, but for backwards compatibility right now I will take those
+ # out, and rely on a separate augment service
+ # fieldName = ConstantsContainer(
+ # (BaseDirectoryService.fieldName, FieldName)
+ # )
+
# XML schema constants
element = ConstantsContainer(
Modified: CalendarServer/branches/users/sagen/move2who-3/txweb2/channel/http.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txweb2/channel/http.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txweb2/channel/http.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -137,10 +137,10 @@
subclass, it can parse either the client side or the server side of the
connection.
"""
-
+
# Class config:
parseCloseAsEnd = False
-
+
# Instance vars
chunkedIn = False
headerlen = 0
@@ -173,12 +173,12 @@
# channel.pauseProducing()
# channel.resumeProducing()
# channel.stopProducing()
-
-
+
+
def __init__(self, channel):
self.inHeaders = http_headers.Headers()
self.channel = channel
-
+
def lineReceived(self, line):
if self.chunkedIn:
# Parsing a chunked input
@@ -208,7 +208,7 @@
self.chunkedIn = 1
elif self.chunkedIn == 3:
# TODO: support Trailers (maybe! but maybe not!)
-
+
# After getting the final "0" chunk we're here, and we *EAT MERCILESSLY*
# any trailer headers sent, and wait for the blank line to terminate the
# request.
@@ -237,7 +237,7 @@
self.headerlen += len(line)
if self.headerlen > self.channel.maxHeaderLength:
self._abortWithError(responsecode.BAD_REQUEST, 'Headers too long.')
-
+
if line[0] in ' \t':
# Append a header continuation
self.partialHeader += line
@@ -262,7 +262,7 @@
# NOTE: in chunked mode, self.length is the size of the current chunk,
# so we still have more to read.
self.chunkedIn = 2 # Read next chunksize
-
+
channel.setLineMode(extraneous)
@@ -293,13 +293,13 @@
# Set connection parameters from headers
self.setConnectionParams(connHeaders)
self.connHeaders = connHeaders
-
+
def allContentReceived(self):
self.finishedReading = True
self.channel.requestReadFinished(self)
self.handleContentComplete()
-
-
+
+
def splitConnectionHeaders(self):
"""
Split off connection control headers from normal headers.
@@ -382,7 +382,7 @@
# Okay, now implement section 4.4 Message Length to determine
# how to find the end of the incoming HTTP message.
transferEncoding = connHeaders.getHeader('transfer-encoding')
-
+
if transferEncoding:
if transferEncoding[-1] == 'chunked':
# Chunked
@@ -394,7 +394,7 @@
# client->server data. (Well..it could actually, since TCP has half-close
# but the HTTP spec says it can't, so we'll pretend it's right.)
self._abortWithError(responsecode.BAD_REQUEST, "Transfer-Encoding received without chunked in last position.")
-
+
# TODO: support gzip/etc encodings.
# FOR NOW: report an error if the client uses any encodings.
# They shouldn't, because we didn't send a TE: header saying it's okay.
@@ -423,23 +423,23 @@
# Set the calculated persistence
self.channel.setReadPersistent(readPersistent)
-
+
def abortParse(self):
# If we're erroring out while still reading the request
if not self.finishedReading:
self.finishedReading = True
self.channel.setReadPersistent(False)
self.channel.requestReadFinished(self)
-
+
# producer interface
def pauseProducing(self):
if not self.finishedReading:
self.channel.pauseProducing()
-
+
def resumeProducing(self):
if not self.finishedReading:
self.channel.resumeProducing()
-
+
def stopProducing(self):
if not self.finishedReading:
self.channel.stopProducing()
@@ -449,13 +449,13 @@
It is responsible for all the low-level connection oriented behavior.
Thus, it takes care of keep-alive, de-chunking, etc., and passes
the non-connection headers on to the user-level Request object."""
-
+
command = path = version = None
queued = 0
request = None
-
+
out_version = "HTTP/1.1"
-
+
def __init__(self, channel, queued=0):
HTTPParser.__init__(self, channel)
self.queued=queued
@@ -466,14 +466,14 @@
self.transport = StringTransport()
else:
self.transport = self.channel.transport
-
+
# set the version to a fallback for error generation
self.version = (1,0)
def gotInitialLine(self, initialLine):
parts = initialLine.split()
-
+
# Parse the initial request line
if len(parts) != 3:
if len(parts) == 1:
@@ -490,9 +490,9 @@
raise ValueError()
except ValueError:
self._abortWithError(responsecode.BAD_REQUEST, "Unknown protocol: %s" % strversion)
-
+
self.version = protovers[1:3]
-
+
# Ensure HTTP 0 or HTTP 1.
if self.version[0] > 1:
self._abortWithError(responsecode.HTTP_VERSION_NOT_SUPPORTED, 'Only HTTP 0.9 and HTTP 1.x are supported.')
@@ -511,18 +511,18 @@
def processRequest(self):
self.request.process()
-
+
def handleContentChunk(self, data):
self.request.handleContentChunk(data)
-
+
def handleContentComplete(self):
self.request.handleContentComplete()
-
+
############## HTTPChannelRequest *RESPONSE* methods #############
producer = None
chunkedOut = False
finished = False
-
+
##### Request Callbacks #####
def writeIntermediateResponse(self, code, headers=None):
if self.version >= (1,1):
@@ -530,15 +530,15 @@
def writeHeaders(self, code, headers):
self._writeHeaders(code, headers, True)
-
+
def _writeHeaders(self, code, headers, addConnectionHeaders):
# HTTP 0.9 doesn't have headers.
if self.version[0] == 0:
return
-
+
l = []
code_message = responsecode.RESPONSES.get(code, "Unknown Status")
-
+
l.append('%s %s %s\r\n' % (self.out_version, code,
code_message))
if headers is not None:
@@ -557,16 +557,16 @@
else:
# Cannot use persistent connections if we can't do chunking
self.channel.dropQueuedRequests()
-
+
if self.channel.isLastRequest(self):
l.append("%s: %s\r\n" % ('Connection', 'close'))
elif self.version < (1,1):
l.append("%s: %s\r\n" % ('Connection', 'Keep-Alive'))
-
+
l.append("\r\n")
self.transport.writeSequence(l)
-
-
+
+
def write(self, data):
if not data:
return
@@ -574,17 +574,17 @@
self.transport.writeSequence(("%X\r\n" % len(data), data, "\r\n"))
else:
self.transport.write(data)
-
+
def finish(self):
"""We are finished writing data."""
if self.finished:
warnings.warn("Warning! request.finish called twice.", stacklevel=2)
return
-
+
if self.chunkedOut:
# write last chunk and closing CRLF
self.transport.write("0\r\n\r\n")
-
+
self.finished = True
if not self.queued:
self._cleanup()
@@ -596,7 +596,7 @@
the writing side alone. This is mostly for internal use by
the HTTP request parsing logic, so that it can call an error
page generator.
-
+
Otherwise, completely shut down the connection.
"""
self.abortParse()
@@ -604,7 +604,7 @@
if self.producer:
self.producer.stopProducing()
self.unregisterProducer()
-
+
self.finished = True
if self.queued:
self.transport.reset()
@@ -617,14 +617,14 @@
def getRemoteHost(self):
return self.channel.transport.getPeer()
-
+
##### End Request Callbacks #####
def _abortWithError(self, errorcode, text=''):
"""Handle low level protocol errors."""
headers = http_headers.Headers()
headers.setHeader('content-length', len(text)+1)
-
+
self.abortConnection(closeWrite=False)
self.writeHeaders(errorcode, headers)
self.write(text)
@@ -632,7 +632,7 @@
self.finish()
log.warn("Aborted request (%d) %s" % (errorcode, text))
raise AbortedException
-
+
def _cleanup(self):
"""Called when have finished responding and are no longer queued."""
if self.producer:
@@ -640,7 +640,7 @@
self.unregisterProducer()
self.channel.requestWriteFinished(self)
del self.transport
-
+
# methods for channel - end users should not use these
def noLongerQueued(self):
@@ -674,12 +674,12 @@
def registerProducer(self, producer, streaming):
"""Register a producer.
"""
-
+
if self.producer:
raise ValueError, "registering producer %s before previous one (%s) was unregistered" % (producer, self.producer)
-
+
self.producer = producer
-
+
if self.queued:
producer.pauseProducing()
else:
@@ -698,7 +698,7 @@
self.producer = None
if self.request:
self.request.connectionLost(reason)
-
+
class HTTPChannel(basic.LineReceiver, policies.TimeoutMixin, object):
"""A receiver for HTTP requests. Handles splitting up the connection
for the multiple HTTPChannelRequests that may be in progress on this
@@ -714,11 +714,11 @@
the client.
"""
-
+
implements(interfaces.IHalfCloseableProtocol)
-
+
## Configuration parameters. Set in instances or subclasses.
-
+
# How many simultaneous requests to handle.
maxPipeline = 4
@@ -736,35 +736,35 @@
# Allow persistent connections?
allowPersistentConnections = True
-
+
# ChannelRequest
chanRequestFactory = HTTPChannelRequest
requestFactory = http.Request
-
-
+
+
_first_line = 2
readPersistent = PERSIST_PIPELINE
-
+
_readLost = False
_writeLost = False
-
+
_abortTimer = None
chanRequest = None
def _callLater(self, secs, fun):
reactor.callLater(secs, fun)
-
+
def __init__(self):
# the request queue
self.requests = []
-
+
def connectionMade(self):
self._secure = interfaces.ISSLTransport(self.transport, None) is not None
address = self.transport.getHost()
self._host = _cachedGetHostByAddr(address.host)
self.setTimeout(self.inputTimeOut)
self.factory.addConnectedChannel(self)
-
+
def lineReceived(self, line):
if self._first_line:
self.setTimeout(self.inputTimeOut)
@@ -779,13 +779,13 @@
if not line and self._first_line == 1:
self._first_line = 2
return
-
+
self._first_line = 0
-
+
if not self.allowPersistentConnections:
# Don't allow a second request
self.readPersistent = False
-
+
try:
self.chanRequest = self.chanRequestFactory(self, len(self.requests))
self.requests.append(self.chanRequest)
@@ -801,7 +801,7 @@
def lineLengthExceeded(self, line):
if self._first_line:
# Fabricate a request object to respond to the line length violation.
- self.chanRequest = self.chanRequestFactory(self,
+ self.chanRequest = self.chanRequestFactory(self,
len(self.requests))
self.requests.append(self.chanRequest)
self.chanRequest.gotInitialLine("GET fake HTTP/1.0")
@@ -809,7 +809,7 @@
self.chanRequest.lineLengthExceeded(line, self._first_line)
except AbortedException:
pass
-
+
def rawDataReceived(self, data):
self.setTimeout(self.inputTimeOut)
try:
@@ -821,17 +821,17 @@
if(self.readPersistent is PERSIST_NO_PIPELINE or
len(self.requests) >= self.maxPipeline):
self.pauseProducing()
-
+
# reset state variables
self._first_line = 1
self.chanRequest = None
self.setLineMode()
-
+
# Set an idle timeout, in case this request takes a long
# time to finish generating output.
if len(self.requests) > 0:
self.setTimeout(self.idleTimeOut)
-
+
def _startNextRequest(self):
# notify next request, if present, it can start writing
del self.requests[0]
@@ -840,7 +840,7 @@
self.transport.loseConnection()
elif self.requests:
self.requests[0].noLongerQueued()
-
+
# resume reading if allowed to
if(not self._readLost and
self.readPersistent is not PERSIST_NO_PIPELINE and
@@ -866,11 +866,11 @@
for request in self.requests[1:]:
request.connectionLost(None)
del self.requests[1:]
-
+
def isLastRequest(self, request):
# Is this channel handling the last possible request
return not self.readPersistent and self.requests[-1] == request
-
+
def requestWriteFinished(self, request):
"""Called by first request in queue when it is done."""
if request != self.requests[0]: raise TypeError
@@ -878,7 +878,7 @@
# Don't del because we haven't finished cleanup, so,
# don't want queue len to be 0 yet.
self.requests[0] = None
-
+
if self.readPersistent or len(self.requests) > 1:
# Do this in the next reactor loop so as to
# not cause huge call stacks with fast
@@ -910,26 +910,26 @@
self._abortTimer = None
self.transport.loseConnection()
return
-
+
# If between requests, drop connection
# when all current requests have written their data.
self._readLost = True
if not self.requests:
# No requests in progress, lose now.
self.transport.loseConnection()
-
+
# If currently in the process of reading a request, this is
# probably a client abort, so lose the connection.
if self.chanRequest:
self.transport.loseConnection()
-
+
def connectionLost(self, reason):
self.factory.removeConnectedChannel(self)
self._writeLost = True
self.readConnectionLost()
self.setTimeout(None)
-
+
# Tell all requests to abort.
for request in self.requests:
if request is not None:
@@ -963,7 +963,7 @@
"""
protocol = HTTPChannel
-
+
protocolArgs = None
def __init__(self, requestFactory, maxRequests=600, **kwargs):
@@ -977,9 +977,9 @@
def buildProtocol(self, addr):
if self.outstandingRequests >= self.maxRequests:
return OverloadedServerProtocol()
-
+
p = protocol.ServerFactory.buildProtocol(self, addr)
-
+
for arg,value in self.protocolArgs.iteritems():
setattr(p, arg, value)
return p
@@ -1050,19 +1050,19 @@
return p
class HTTPLoggingChannelRequest(HTTPChannelRequest):
-
+
class TransportLoggingWrapper(object):
-
+
def __init__(self, transport, logData):
-
+
self.transport = transport
self.logData = logData
-
+
def write(self, data):
if self.logData is not None and data:
self.logData.append(data)
self.transport.write(data)
-
+
def writeSequence(self, seq):
if self.logData is not None and seq:
self.logData.append(''.join(seq))
@@ -1075,7 +1075,7 @@
def __init__(self):
self.request = []
self.response = []
-
+
def __init__(self, channel, queued=0):
super(HTTPLoggingChannelRequest, self).__init__(channel, queued)
@@ -1093,7 +1093,7 @@
super(HTTPLoggingChannelRequest, self).gotInitialLine(initialLine)
def lineReceived(self, line):
-
+
if self.logData is not None:
# We don't want to log basic credentials
loggedLine = line
@@ -1105,13 +1105,13 @@
super(HTTPLoggingChannelRequest, self).lineReceived(line)
def handleContentChunk(self, data):
-
+
if self.logData is not None:
self.logData.request.append(data)
super(HTTPLoggingChannelRequest, self).handleContentChunk(data)
-
+
def handleContentComplete(self):
-
+
if self.logData is not None:
doneTime = time.time()
self.logData.request.append("\r\n\r\n>>>> Request complete at: %.3f (elapsed: %.1f ms)" % (doneTime, 1000 * (doneTime - self.startTime),))
@@ -1124,7 +1124,7 @@
super(HTTPLoggingChannelRequest, self).writeHeaders(code, headers)
def finish(self):
-
+
super(HTTPLoggingChannelRequest, self).finish()
if self.logData is not None:
Modified: CalendarServer/branches/users/sagen/move2who-3/txweb2/dav/resource.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txweb2/dav/resource.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txweb2/dav/resource.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -997,6 +997,7 @@
if the authentication scheme is unsupported, or the
credentials provided by the request are not valid.
"""
+
# Bypass normal authentication if its already been done (by SACL check)
if (
hasattr(request, "authnUser") and
@@ -1134,7 +1135,7 @@
# The default behaviour is no ACL; we should inherit from the parent
# collection.
#
- return element.ACL()
+ return succeed(element.ACL())
def setAccessControlList(self, acl):
@@ -1360,6 +1361,7 @@
@return: a L{Deferred} that callbacks with C{None} or errbacks
with an L{AccessDeniedError}
"""
+
if principal is None:
principal = self.currentPrincipal(request)
@@ -1509,7 +1511,7 @@
# If we get to the root without any ACLs, then use the default.
acl = self.defaultRootAccessControlList()
else:
- acl = self.defaultAccessControlList()
+ acl = yield self.defaultAccessControlList()
# Dynamically update privileges for those ace's that are inherited.
if inheritance:
@@ -1618,6 +1620,7 @@
return []
+ @inlineCallbacks
def principalsForAuthID(self, request, authid):
"""
Return authentication and authorization principal identifiers
@@ -1637,16 +1640,16 @@
HTTPError(responsecode.FORBIDDEN) if the principal isn't
found.
"""
- authnPrincipal = self.findPrincipalForAuthID(authid)
+ authnPrincipal = yield self.findPrincipalForAuthID(authid)
if authnPrincipal is None:
- return succeed((None, None))
+ returnValue((None, None))
- d = self.authorizationPrincipal(request, authid, authnPrincipal)
- d.addCallback(lambda authzPrincipal: (authnPrincipal, authzPrincipal))
- return d
+ authzPrincipal = yield self.authorizationPrincipal(request, authid, authnPrincipal)
+ returnValue((authnPrincipal, authzPrincipal))
+ @inlineCallbacks
def findPrincipalForAuthID(self, authid):
"""
Return authentication and authorization principal identifiers
@@ -1662,10 +1665,10 @@
found return None.
"""
for collection in self.principalCollections():
- principal = collection.principalForUser(authid)
+ principal = yield collection.principalForUser(authid)
if principal is not None:
- return principal
- return None
+ returnValue(principal)
+ returnValue(None)
def authorizationPrincipal(self, request, authid, authnPrincipal):
Modified: CalendarServer/branches/users/sagen/move2who-3/txweb2/server.py
===================================================================
--- CalendarServer/branches/users/sagen/move2who-3/txweb2/server.py 2014-03-13 20:34:06 UTC (rev 12898)
+++ CalendarServer/branches/users/sagen/move2who-3/txweb2/server.py 2014-03-13 20:40:10 UTC (rev 12899)
@@ -192,7 +192,7 @@
error.defaultErrorHandler, defaultHeadersFilter]
def __init__(self, *args, **kw):
-
+
self.timeStamps = [("t", time.time(),)]
if kw.has_key('site'):
@@ -308,10 +308,10 @@
clients into using an inappropriate scheme for subsequent requests. What we should do is
take the port number from the Host header or request-URI and map that to the scheme that
matches the service we configured to listen on that port.
-
+
@param port: the port number to test
@type port: C{int}
-
+
@return: C{True} if scheme is https (secure), C{False} otherwise
@rtype: C{bool}
"""
@@ -322,7 +322,7 @@
return True
elif port in self.site.BindSSLPorts:
return True
-
+
return False
def _fixupURLParts(self):
@@ -558,7 +558,7 @@
break
else:
postSegments.insert(0, preSegments.pop())
-
+
if cachedParent is None:
cachedParent = self.site.resource
postSegments = segments[1:]
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20140313/1a31701e/attachment-0001.html>
More information about the calendarserver-changes
mailing list