[CalendarServer-changes] [15761] CalendarServer/branches/users/cdaboo/better-next-job
source_changes at macosforge.org
source_changes at macosforge.org
Tue Jul 12 19:23:31 PDT 2016
Revision: 15761
http://trac.calendarserver.org//changeset/15761
Author: cdaboo at apple.com
Date: 2016-07-12 19:23:31 -0700 (Tue, 12 Jul 2016)
Log Message:
-----------
Modified Paths:
--------------
CalendarServer/branches/users/cdaboo/better-next-job/bin/_build.sh
CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tap/caldav.py
CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tools/gateway.py
CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tools/principals.py
CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tools/test/test_principals.py
CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tools/util.py
CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/webadmin/delegation.py
CalendarServer/branches/users/cdaboo/better-next-job/conf/caldavd-stdconfig.plist
CalendarServer/branches/users/cdaboo/better-next-job/twistedcaldav/stdconfig.py
CalendarServer/branches/users/cdaboo/better-next-job/txdav/caldav/icalendardirectoryservice.py
CalendarServer/branches/users/cdaboo/better-next-job/txdav/dps/client.py
CalendarServer/branches/users/cdaboo/better-next-job/txdav/dps/commands.py
CalendarServer/branches/users/cdaboo/better-next-job/txdav/dps/server.py
CalendarServer/branches/users/cdaboo/better-next-job/txdav/dps/test/test_client.py
CalendarServer/branches/users/cdaboo/better-next-job/txdav/who/augment.py
CalendarServer/branches/users/cdaboo/better-next-job/txdav/who/directory.py
CalendarServer/branches/users/cdaboo/better-next-job/txdav/who/test/test_directory.py
Property Changed:
----------------
CalendarServer/branches/users/cdaboo/better-next-job/
Property changes on: CalendarServer/branches/users/cdaboo/better-next-job
___________________________________________________________________
Modified: svn:mergeinfo
- /CalDAVTester/trunk:11193-11198
/CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/release/CalendarServer-4.3-dev:10180-10190,10192
/CalendarServer/branches/release/CalendarServer-5.1-dev:11846
/CalendarServer/branches/release/CalendarServer-5.2-dev:11972,12357-12358,12794,12814
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/cross-pod-sharing:12038-12191
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/fix-no-ischedule:11607-11871
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
/CalendarServer/branches/users/cdaboo/json:11622-11912
/CalendarServer/branches/users/cdaboo/managed-attachments:9985-10145
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/newlog:15301
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/performance-tweaks:11824-11836
/CalendarServer/branches/users/cdaboo/pod2pod-migration:14338-14520
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/reverse-proxy-pods:11875-11900
/CalendarServer/branches/users/cdaboo/scheduling-queue-refresh:11783-12557
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/sharing-in-the-store:11935-12016
/CalendarServer/branches/users/cdaboo/store-scheduling:10876-11129
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/cdaboo/update-packages:15273-15284
/CalendarServer/branches/users/gaya/cleanrevisions:12152-12334
/CalendarServer/branches/users/gaya/groupsharee2:13669-13773
/CalendarServer/branches/users/gaya/sharedgroupfixes:12120-12142
/CalendarServer/branches/users/gaya/sharedgroups-3:11088-11204
/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/digest-auth-redux:10624-10635
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/enforce-max-requests:11640-11643
/CalendarServer/branches/users/glyph/hang-fix:11465-11491
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/launchd-wrapper-bis:11413-11436
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/log-cleanups:11691-11731
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/one-home-list-api:10048-10073
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/q:9560-9688
/CalendarServer/branches/users/glyph/queue-locking-and-timing:10204-10289
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/start-service-start-loop:11060-11065
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/unshare-when-access-revoked:10562-10595
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/warning-cleanups:11347-11357
/CalendarServer/branches/users/glyph/whenNotProposed:11881-11897
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/move2who:12819-12860
/CalendarServer/branches/users/sagen/move2who-2:12861-12898
/CalendarServer/branches/users/sagen/move2who-3:12899-12913
/CalendarServer/branches/users/sagen/move2who-4:12914-13157
/CalendarServer/branches/users/sagen/move2who-5:13158-13163
/CalendarServer/branches/users/sagen/newcua:13309-13327
/CalendarServer/branches/users/sagen/newcua-1:13328-13330
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/recordtypes:13648-13656
/CalendarServer/branches/users/sagen/recordtypes-2:13657
/CalendarServer/branches/users/sagen/request-socket:14748-14767
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/sagen/testing:10827-10851,10853-10855
/CalendarServer/branches/users/sagen/trashcan:14185-14269
/CalendarServer/branches/users/sagen/trashcan-2:14270-14324
/CalendarServer/branches/users/sagen/trashcan-3:14325-14450
/CalendarServer/branches/users/sagen/trashcan-4:14451-14471
/CalendarServer/branches/users/sagen/trashcan-5:14471-14555
/CalendarServer/branches/users/wsanchez/psycopg2cffi:14427-14439
/CalendarServer/branches/users/wsanchez/transations:5515-5593
/CalendarServer/trunk:15717-15725,15729-15747,15751-15752
+ /CalDAVTester/trunk:11193-11198
/CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/release/CalendarServer-4.3-dev:10180-10190,10192
/CalendarServer/branches/release/CalendarServer-5.1-dev:11846
/CalendarServer/branches/release/CalendarServer-5.2-dev:11972,12357-12358,12794,12814
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/cross-pod-sharing:12038-12191
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/fix-no-ischedule:11607-11871
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
/CalendarServer/branches/users/cdaboo/json:11622-11912
/CalendarServer/branches/users/cdaboo/managed-attachments:9985-10145
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/newlog:15301
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/performance-tweaks:11824-11836
/CalendarServer/branches/users/cdaboo/pod2pod-migration:14338-14520
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/reverse-proxy-pods:11875-11900
/CalendarServer/branches/users/cdaboo/scheduling-queue-refresh:11783-12557
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/sharing-in-the-store:11935-12016
/CalendarServer/branches/users/cdaboo/store-scheduling:10876-11129
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/cdaboo/update-packages:15273-15284
/CalendarServer/branches/users/gaya/cleanrevisions:12152-12334
/CalendarServer/branches/users/gaya/groupsharee2:13669-13773
/CalendarServer/branches/users/gaya/sharedgroupfixes:12120-12142
/CalendarServer/branches/users/gaya/sharedgroups-3:11088-11204
/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/digest-auth-redux:10624-10635
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/enforce-max-requests:11640-11643
/CalendarServer/branches/users/glyph/hang-fix:11465-11491
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/launchd-wrapper-bis:11413-11436
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/log-cleanups:11691-11731
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/one-home-list-api:10048-10073
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/q:9560-9688
/CalendarServer/branches/users/glyph/queue-locking-and-timing:10204-10289
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/start-service-start-loop:11060-11065
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/unshare-when-access-revoked:10562-10595
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/warning-cleanups:11347-11357
/CalendarServer/branches/users/glyph/whenNotProposed:11881-11897
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/move2who:12819-12860
/CalendarServer/branches/users/sagen/move2who-2:12861-12898
/CalendarServer/branches/users/sagen/move2who-3:12899-12913
/CalendarServer/branches/users/sagen/move2who-4:12914-13157
/CalendarServer/branches/users/sagen/move2who-5:13158-13163
/CalendarServer/branches/users/sagen/newcua:13309-13327
/CalendarServer/branches/users/sagen/newcua-1:13328-13330
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/recordtypes:13648-13656
/CalendarServer/branches/users/sagen/recordtypes-2:13657
/CalendarServer/branches/users/sagen/request-socket:14748-14767
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/sagen/testing:10827-10851,10853-10855
/CalendarServer/branches/users/sagen/trashcan:14185-14269
/CalendarServer/branches/users/sagen/trashcan-2:14270-14324
/CalendarServer/branches/users/sagen/trashcan-3:14325-14450
/CalendarServer/branches/users/sagen/trashcan-4:14451-14471
/CalendarServer/branches/users/sagen/trashcan-5:14471-14555
/CalendarServer/branches/users/wsanchez/psycopg2cffi:14427-14439
/CalendarServer/branches/users/wsanchez/transations:5515-5593
/CalendarServer/trunk:15717-15725,15729-15747,15751-15752,15754-15757
Modified: CalendarServer/branches/users/cdaboo/better-next-job/bin/_build.sh
===================================================================
--- CalendarServer/branches/users/cdaboo/better-next-job/bin/_build.sh 2016-07-13 02:18:08 UTC (rev 15760)
+++ CalendarServer/branches/users/cdaboo/better-next-job/bin/_build.sh 2016-07-13 02:23:31 UTC (rev 15761)
@@ -168,6 +168,13 @@
use_openssl="false"
;;
esac;
+ else
+ case "$(uname -s)" in
+ Darwin)
+ # Needed to build OpenSSL 64-bit on OS X
+ export KERNEL_BITS=64
+ ;;
+ esac
fi;
conditional_set requirements "${default_requirements}"
@@ -490,7 +497,7 @@
if [ ${use_openssl} == "true" ]; then
ruler;
- local min_ssl_version="9470095"; # OpenSSL 0.9.8zf
+ local min_ssl_version="268443791"; # OpenSSL 1.0.2h
local ssl_version="$(c_macro openssl/ssl.h OPENSSL_VERSION_NUMBER)";
if [ -z "${ssl_version}" ]; then ssl_version="0x0"; fi;
@@ -499,13 +506,13 @@
if [ "${ssl_version}" -ge "${min_ssl_version}" ]; then
using_system "OpenSSL";
else
- local v="0.9.8zh";
+ local v="1.0.2h";
local n="openssl";
local p="${n}-${v}";
# use 'config' instead of 'configure'; 'make' instead of 'jmake'.
# also pass 'shared' to config to build shared libs.
- c_dependency -c "config" -s "3ff71636bea85a99f4d76a10d119c09bda0421e3" \
+ c_dependency -c "config" -s "577585f5f5d299c44dd3c993d3c0ac7a219e4949" \
-p "make depend" -b "make" \
"openssl" "${p}" \
"http://www.openssl.org/source/${p}.tar.gz" "shared";
Modified: CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tap/caldav.py
===================================================================
--- CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tap/caldav.py 2016-07-13 02:18:08 UTC (rev 15760)
+++ CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tap/caldav.py 2016-07-13 02:23:31 UTC (rev 15761)
@@ -245,6 +245,7 @@
self.logRotateLength = logRotateLength
self.logMaxFiles = logMaxFiles
self.logRotateOnStart = logRotateOnStart
+ self.name = "elms"
def setServiceParent(self, app):
@@ -280,7 +281,7 @@
# The ConnectionService is a MultiService which bundles all the connection
# services together for the purposes of being able to stop them and wait
# for all of their connections to close before shutting down.
- connectionServiceName = "ConnectionService"
+ connectionServiceName = "cs"
def __init__(self, logObserver):
self.logObserver = logObserver # accesslog observer
@@ -292,6 +293,7 @@
config.ErrorLogMaxRotatedFiles,
config.ErrorLogRotateOnStart,
)
+ self.name = "cds"
def privilegedStartService(self):
@@ -862,6 +864,11 @@
frame=frame
)
+ if config.Manhole.Enabled:
+ namespace= dict({service.name: service})
+ for n, s in service.namedServices.iteritems():
+ namespace[n] = s
+ self._makeManhole(namespace=namespace, parent=service)
return service
@@ -898,6 +905,7 @@
result = self.requestProcessingService(options, store, logObserver)
if pool is not None:
+ pool.setName("db")
pool.setServiceParent(result)
if config.ControlSocket:
@@ -937,6 +945,7 @@
controlClient = ControlSocketConnectingService(
endpointFactory, controlSocketClient
)
+ controlClient.setName("control")
controlClient.setServiceParent(result)
# Optionally set up push notifications
@@ -947,6 +956,7 @@
pushSubService = ApplePushNotifierService.makeService(
config.Notifications.Services.APNS, store)
observers.append(pushSubService)
+ pushSubService.setName("APNS")
pushSubService.setServiceParent(result)
if config.Notifications.Services.AMP.Enabled:
pushSubService = AMPPushForwarder(controlSocketClient)
@@ -959,6 +969,7 @@
mailRetriever = MailRetriever(
store, directory, config.Scheduling.iMIP.Receiving
)
+ mailRetriever.setName("MailRetriever")
mailRetriever.setServiceParent(result)
else:
mailRetriever = None
@@ -988,37 +999,6 @@
txn._groupCacher = groupCacher
store.callWithNewTransactions(decorateTransaction)
-
- # Optionally enable Manhole access
- if config.Manhole.Enabled:
- try:
- from twisted.conch.manhole_tap import (
- makeService as manholeMakeService
- )
- portString = "tcp:{:d}:interface=127.0.0.1".format(
- config.Manhole.StartingPortNumber + int(config.LogID) + 1
- )
- manholeService = manholeMakeService({
- "sshPort": None,
- "telnetPort": portString,
- "namespace": {
- "config": config,
- "service": result,
- "store": store,
- "directory": directory,
- },
- "passwd": config.Manhole.PasswordFilePath,
- })
- manholeService.setServiceParent(result)
- # Using print(because logging isn't ready at this point)
- print("Manhole access enabled:", portString)
-
- except ImportError:
- print(
- "Manhole access could not enabled because "
- "manhole_tap could not be imported"
- )
-
return result
@@ -1181,10 +1161,12 @@
# 'SSL' tag on it, since that's the only time it's used.
contextFactory = None
- ReportingHTTPService(
+ reportingService = ReportingHTTPService(
requestFactory, int(config.MetaFD), contextFactory,
- usingSocketFile=config.SocketFiles.Enabled
- ).setServiceParent(connectionService)
+ usingSocketFile=config.SocketFiles.Enabled,
+ )
+ reportingService.setName("http-{}".format(int(config.MetaFD)))
+ reportingService.setServiceParent(connectionService)
else: # Not inheriting, therefore we open our own:
for bindAddress in self._allBindAddresses():
@@ -1211,6 +1193,8 @@
backlog=config.ListenBacklog,
inherit=False
)
+ httpsService.setName(
+ "https-{}:{}".format(bindAddress,int(port)))
httpsService.setServiceParent(connectionService)
for port in config.BindHTTPPorts:
@@ -1311,6 +1295,59 @@
Popen(memcachedArgv)
+ def _makeManhole(self, namespace=None, parent=None):
+ try:
+ import inspect
+ import objgraph
+ except ImportError:
+ pass
+ try:
+ if 'inspect' in locals():
+ namespace['ins'] = inspect
+ if 'objgraph' in locals():
+ namespace['og'] = objgraph
+ from pprint import pprint
+ namespace.update({
+ 'pp': pprint,
+ 'cfg': config,
+ })
+ from twisted.conch.manhole_tap import (
+ makeService as manholeMakeService
+ )
+ portOffset = 0 if config.LogID == '' else int(config.LogID) + 1
+ portString = "tcp:{:d}:interface=127.0.0.1".format(
+ config.Manhole.StartingPortNumber + portOffset
+ )
+ manholeService = manholeMakeService({
+ "passwd": config.Manhole.PasswordFilePath,
+ "telnetPort":
+ portString if config.Manhole.UseSSH is False else None,
+ "sshPort":
+ portString if config.Manhole.UseSSH is True else None,
+ "sshKeyDir": config.DataRoot,
+ "sshKeyName": config.Manhole.sshKeyName,
+ "sshKeySize": config.Manhole.sshKeySize,
+ "namespace": namespace,
+ })
+ manholeService.setName("manhole")
+ if parent is not None:
+ manholeService.setServiceParent(parent)
+ # Using print(because logging isn't ready at this point)
+ print("Manhole access enabled:", portString)
+ except ImportError:
+ print(
+ "Manhole access could not enabled because "
+ "manhole_tap could not be imported."
+ )
+ import platform
+ if platform.system() == "Darwin":
+ if config.Manhole.UseSSH:
+ print(
+ "Set Manhole.UseSSH to false or rebuild CS with the "
+ "USE_OPENSSL environment variable set."
+ )
+
+
def makeService_Single(self, options):
"""
Create a service to be used in a single-process, stand-alone
@@ -1332,6 +1369,7 @@
config.Notifications.Services.APNS, store
)
observers.append(pushSubService)
+ pushSubService.setName("APNS")
pushSubService.setServiceParent(result)
if config.Notifications.Services.AMP.Enabled:
pushSubService = AMPPushMaster(
@@ -1362,6 +1400,7 @@
mailRetriever = MailRetriever(
store, directory, config.Scheduling.iMIP.Receiving
)
+ mailRetriever.setName("mailRetriever")
mailRetriever.setServiceParent(result)
else:
mailRetriever = None
@@ -1401,35 +1440,6 @@
else:
groupCacher = None
- # Optionally enable Manhole access
- if config.Manhole.Enabled:
- try:
- from twisted.conch.manhole_tap import (
- makeService as manholeMakeService
- )
- portString = "tcp:{:d}:interface=127.0.0.1".format(
- config.Manhole.StartingPortNumber
- )
- manholeService = manholeMakeService({
- "sshPort": None,
- "telnetPort": portString,
- "namespace": {
- "config": config,
- "service": result,
- "store": store,
- "directory": directory,
- },
- "passwd": config.Manhole.PasswordFilePath,
- })
- manholeService.setServiceParent(result)
- # Using print(because logging isn't ready at this point)
- print("Manhole access enabled:", portString)
- except ImportError:
- print(
- "Manhole access could not enabled because "
- "manhole_tap could not be imported"
- )
-
def decorateTransaction(txn):
txn._pushDistributor = pushDistributor
txn._rootResource = result.rootResource
@@ -1526,6 +1536,7 @@
config.ErrorLogMaxRotatedFiles,
config.ErrorLogRotateOnStart,
)
+ svc.setName("agent")
svc.setServiceParent(agentLoggingService)
return agentLoggingService
@@ -1582,6 +1593,7 @@
dbtype=DatabaseType(dialect, paramstyle, dbfeatures),
maxConnections=config.MaxDBConnectionsPerPool
)
+ cp.setName("db")
cp.setServiceParent(ms)
store = storeFromConfigWithoutDPS(config, cp.connection)
@@ -1655,6 +1667,7 @@
UpgradeReleaseLockStep(store)
)
+ pps.setName("pre")
pps.setServiceParent(ms)
return ms
@@ -1784,6 +1797,7 @@
monitor = DelayedStartupProcessMonitor()
s.processMonitor = monitor
+ monitor.setName("pm")
monitor.setServiceParent(s)
if config.MemoryLimiter.Enabled:
@@ -1791,6 +1805,7 @@
monitor, config.MemoryLimiter.Seconds,
config.MemoryLimiter.Bytes, config.MemoryLimiter.ResidentOnly
)
+ memoryLimiter.setName("ml")
memoryLimiter.setServiceParent(s)
# Maybe spawn memcached through a ProcessMonitor
@@ -1877,32 +1892,6 @@
statsService.setName("tcp-stats")
statsService.setServiceParent(s)
- # Optionally enable Manhole access
- if config.Manhole.Enabled:
- try:
- from twisted.conch.manhole_tap import (
- makeService as manholeMakeService
- )
- portString = "tcp:{:d}:interface=127.0.0.1".format(
- config.Manhole.StartingPortNumber
- )
- manholeService = manholeMakeService({
- "sshPort": None,
- "telnetPort": portString,
- "namespace": {
- "config": config,
- "service": s,
- },
- "passwd": config.Manhole.PasswordFilePath,
- })
- manholeService.setServiceParent(s)
- # Using print(because logging isn't ready at this point)
- print("Manhole access enabled:", portString)
- except ImportError:
- print(
- "Manhole access could not enabled because "
- "manhole_tap could not be imported"
- )
# Finally, let's get the real show on the road. Create a service that
@@ -1944,11 +1933,13 @@
else:
dispenser = None
multi = MultiService()
+ multi.setName("multi")
pool.setServiceParent(multi)
spawner = SlaveSpawnerService(
self, monitor, dispenser, dispatcher, stats, options["config"],
inheritFDs=inheritFDs, inheritSSLFDs=inheritSSLFDs
)
+ spawner.setName("spawner")
spawner.setServiceParent(multi)
if config.UseMetaFD:
cl.setServiceParent(multi)
@@ -1961,6 +1952,7 @@
mailRetriever = MailRetriever(
store, directory, config.Scheduling.iMIP.Receiving
)
+ mailRetriever.setName("MailRetriever")
mailRetriever.setServiceParent(multi)
else:
mailRetriever = None
@@ -1993,6 +1985,7 @@
ssvc = self.storageService(
spawnerSvcCreator, None, uid, gid
)
+ ssvc.setName("ssvc")
ssvc.setServiceParent(s)
return s
Modified: CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tools/gateway.py
===================================================================
--- CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tools/gateway.py 2016-07-13 02:18:08 UTC (rev 15760)
+++ CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tools/gateway.py 2016-07-13 02:23:31 UTC (rev 15761)
@@ -35,16 +35,14 @@
WRITABLE_CONFIG_KEYS, setKeyPath, getKeyPath, flattenDictionary,
WritableConfig
)
-from calendarserver.tools.principals import (
- getProxies, setProxies
-)
from calendarserver.tools.purge import (
WorkerService, PurgeOldEventsService,
DEFAULT_BATCH_SIZE, DEFAULT_RETAIN_DAYS,
PrincipalPurgeWork
)
from calendarserver.tools.util import (
- recordForPrincipalID, autoDisableMemcached
+ recordForPrincipalID, autoDisableMemcached,
+ getProxies, setProxies
)
from pycalendar.datetime import DateTime
from twext.who.directory import DirectoryRecord
Modified: CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tools/principals.py
===================================================================
--- CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tools/principals.py 2016-07-13 02:18:08 UTC (rev 15760)
+++ CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tools/principals.py 2016-07-13 02:23:31 UTC (rev 15761)
@@ -32,7 +32,7 @@
from calendarserver.tools.cmdline import utilityMain, WorkerService
from calendarserver.tools.util import (
- recordForPrincipalID, prettyRecord
+ recordForPrincipalID, prettyRecord, action_addProxy, action_removeProxy
)
from twext.who.directory import DirectoryRecord
from twext.who.idirectory import RecordType, InvalidDirectoryRecordError
@@ -40,8 +40,7 @@
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twistedcaldav.config import config
from twistedcaldav.cache import MemcacheChangeNotifier
-from txdav.who.delegates import Delegates, RecordType as DelegateRecordType, \
- CachingDelegates
+from txdav.who.delegates import CachingDelegates
from txdav.who.idirectory import AutoScheduleMode
from txdav.who.groups import GroupCacherPollingWork
@@ -604,108 +603,11 @@
- at inlineCallbacks
-def _addRemoveProxy(msg, fn, store, record, proxyType, *proxyIDs):
- directory = store.directoryService()
- readWrite = (proxyType == "write")
- for proxyID in proxyIDs:
- proxyRecord = yield recordForPrincipalID(directory, proxyID)
- if proxyRecord is None:
- print("Invalid principal ID: %s" % (proxyID,))
- else:
- txn = store.newTransaction()
- yield fn(txn, record, proxyRecord, readWrite)
- yield txn.commit()
- print(
- "{msg} {proxy} as a {proxyType} proxy for {record}".format(
- msg=msg, proxy=prettyRecord(proxyRecord),
- proxyType=proxyType, record=prettyRecord(record)
- )
- )
- at inlineCallbacks
-def action_addProxy(store, record, proxyType, *proxyIDs):
- if config.GroupCaching.Enabled and config.GroupCaching.UseDirectoryBasedDelegates:
- if record.recordType in (
- record.service.recordType.location,
- record.service.recordType.resource,
- ):
- print("You are not allowed to add proxies for locations or resources via command line when their proxy assignments come from the directory service.")
- returnValue(None)
- yield _addRemoveProxy("Added", Delegates.addDelegate, store, record, proxyType, *proxyIDs)
-
-
-
@inlineCallbacks
-def action_removeProxy(store, record, *proxyIDs):
- if config.GroupCaching.Enabled and config.GroupCaching.UseDirectoryBasedDelegates:
- if record.recordType in (
- record.service.recordType.location,
- record.service.recordType.resource,
- ):
- print("You are not allowed to remove proxies for locations or resources via command line when their proxy assignments come from the directory service.")
- returnValue(None)
-
- # Write
- yield _addRemoveProxy("Removed", Delegates.removeDelegate, store, record, "write", *proxyIDs)
- # Read
- yield _addRemoveProxy("Removed", Delegates.removeDelegate, store, record, "read", *proxyIDs)
-
-
-
- at inlineCallbacks
-def setProxies(record, readProxyRecords, writeProxyRecords):
- """
- Set read/write proxies en masse for a record
- @param record: L{IDirectoryRecord}
- @param readProxyRecords: a list of records
- @param writeProxyRecords: a list of records
- """
-
- proxyTypes = [
- (DelegateRecordType.readDelegateGroup, readProxyRecords),
- (DelegateRecordType.writeDelegateGroup, writeProxyRecords),
- ]
- for recordType, proxyRecords in proxyTypes:
- if proxyRecords is None:
- continue
- proxyGroup = yield record.service.recordWithShortName(
- recordType, record.uid
- )
- yield proxyGroup.setMembers(proxyRecords)
-
-
-
- at inlineCallbacks
-def getProxies(record):
- """
- Returns a tuple containing the records for read proxies and write proxies
- of the given record
- """
-
- allProxies = {
- DelegateRecordType.readDelegateGroup: [],
- DelegateRecordType.writeDelegateGroup: [],
- }
- for recordType in allProxies.iterkeys():
- proxyGroup = yield record.service.recordWithShortName(
- recordType, record.uid
- )
- allProxies[recordType] = yield proxyGroup.members()
-
- returnValue(
- (
- allProxies[DelegateRecordType.readDelegateGroup],
- allProxies[DelegateRecordType.writeDelegateGroup]
- )
- )
-
-
-
- at inlineCallbacks
def action_listGroupMembers(store, record):
members = yield record.members()
if members:
@@ -871,17 +773,10 @@
)
)
- # Get original fields
- newFields = record.fields.copy()
+ yield record.setAutoScheduleMode(autoScheduleMode)
- # Set new values
- newFields[record.service.fieldName.autoScheduleMode] = autoScheduleMode
- updatedRecord = DirectoryRecord(record.service, newFields)
- yield record.service.updateRecords([updatedRecord], create=False)
-
-
@inlineCallbacks
def action_setAutoAcceptGroup(store, record, autoAcceptGroup):
if record.recordType == RecordType.group:
Modified: CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tools/test/test_principals.py
===================================================================
--- CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tools/test/test_principals.py 2016-07-13 02:18:08 UTC (rev 15760)
+++ CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tools/test/test_principals.py 2016-07-13 02:23:31 UTC (rev 15761)
@@ -19,8 +19,11 @@
from twistedcaldav.stdconfig import config
from calendarserver.tools.principals import (
parseCreationArgs, matchStrings,
- recordForPrincipalID, getProxies, setProxies
+ recordForPrincipalID
)
+from calendarserver.tools.util import (
+ getProxies, setProxies
+)
from twext.python.filepath import CachingFilePath as FilePath
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, Deferred, returnValue
Modified: CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tools/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tools/util.py 2016-07-13 02:18:08 UTC (rev 15760)
+++ CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/tools/util.py 2016-07-13 02:23:31 UTC (rev 15761)
@@ -30,7 +30,6 @@
import socket
from pwd import getpwnam
from grp import getgrnam
-from uuid import UUID
from calendarserver.tools import diagnose
@@ -41,9 +40,6 @@
from twext.python.log import Logger
from twisted.internet.defer import inlineCallbacks, returnValue
-from txdav.xml import element as davxml
-
-
from twistedcaldav import memcachepool
from txdav.base.propertystore.base import PropertyName
from txdav.xml import element
@@ -52,7 +48,9 @@
from twext.who.idirectory import RecordType
from txdav.who.idirectory import RecordType as CalRecordType
+from txdav.who.delegates import Delegates, RecordType as DelegateRecordType
+
log = Logger()
@@ -272,8 +270,6 @@
returnValue((yield directory.principalCollection.principalForShortName(recordType, shortName)))
try:
- UUID(principalID)
-
if checkOnly:
returnValue(None)
@@ -343,129 +339,110 @@
raise ValueError("Invalid principal identifier: %s" % (principalID,))
+ at inlineCallbacks
+def _addRemoveProxy(msg, fn, store, record, proxyType, *proxyIDs):
+ directory = store.directoryService()
+ readWrite = (proxyType == "write")
+ for proxyID in proxyIDs:
+ proxyRecord = yield recordForPrincipalID(directory, proxyID)
+ if proxyRecord is None:
+ print("Invalid principal ID: %s" % (proxyID,))
+ else:
+ txn = store.newTransaction()
+ yield fn(txn, record, proxyRecord, readWrite)
+ yield txn.commit()
+ print(
+ "{msg} {proxy} as a {proxyType} proxy for {record}".format(
+ msg=msg, proxy=prettyRecord(proxyRecord),
+ proxyType=proxyType, record=prettyRecord(record)
+ )
+ )
-def proxySubprincipal(principal, proxyType):
- return principal.getChild("calendar-proxy-" + proxyType)
-
@inlineCallbacks
-def action_addProxyPrincipal(rootResource, directory, store, principal, proxyType, proxyPrincipal):
- try:
- (yield addProxy(rootResource, directory, store, principal, proxyType, proxyPrincipal))
- print("Added %s as a %s proxy for %s" % (
- prettyPrincipal(proxyPrincipal), proxyType,
- prettyPrincipal(principal)))
- except ProxyError, e:
- print("Error:", e)
- except ProxyWarning, e:
- print(e)
+def action_addProxy(store, record, proxyType, *proxyIDs):
+ if config.GroupCaching.Enabled and config.GroupCaching.UseDirectoryBasedDelegates:
+ if record.recordType in (
+ record.service.recordType.location,
+ record.service.recordType.resource,
+ ):
+ print("You are not allowed to add proxies for locations or resources via command line when their proxy assignments come from the directory service.")
+ returnValue(None)
+ yield _addRemoveProxy("Added", Delegates.addDelegate, store, record, proxyType, *proxyIDs)
+
@inlineCallbacks
-def action_removeProxyPrincipal(rootResource, directory, store, principal, proxyPrincipal, **kwargs):
- try:
- removed = (yield removeProxy(
- rootResource, directory, store,
- principal, proxyPrincipal, **kwargs
- ))
- if removed:
- print("Removed %s as a proxy for %s" % (
- prettyPrincipal(proxyPrincipal),
- prettyPrincipal(principal)))
- except ProxyError, e:
- print("Error:", e)
- except ProxyWarning, e:
- print(e)
+def action_removeProxy(store, record, *proxyIDs):
+ if config.GroupCaching.Enabled and config.GroupCaching.UseDirectoryBasedDelegates:
+ if record.recordType in (
+ record.service.recordType.location,
+ record.service.recordType.resource,
+ ):
+ print("You are not allowed to remove proxies for locations or resources via command line when their proxy assignments come from the directory service.")
+ returnValue(None)
+ # Write
+ yield _addRemoveProxy("Removed", Delegates.removeDelegate, store, record, "write", *proxyIDs)
+ # Read
+ yield _addRemoveProxy("Removed", Delegates.removeDelegate, store, record, "read", *proxyIDs)
@inlineCallbacks
-def addProxy(rootResource, directory, store, principal, proxyType, proxyPrincipal):
- proxyURL = proxyPrincipal.url()
+def setProxies(record, readProxyRecords, writeProxyRecords):
+ """
+ Set read/write proxies en masse for a record
+ @param record: L{IDirectoryRecord}
+ @param readProxyRecords: a list of records
+ @param writeProxyRecords: a list of records
+ """
- subPrincipal = yield proxySubprincipal(principal, proxyType)
- if subPrincipal is None:
- raise ProxyError(
- "Unable to edit %s proxies for %s\n" % (
- proxyType,
- prettyPrincipal(principal)
- )
+ proxyTypes = [
+ (DelegateRecordType.readDelegateGroup, readProxyRecords),
+ (DelegateRecordType.writeDelegateGroup, writeProxyRecords),
+ ]
+ for recordType, proxyRecords in proxyTypes:
+ if proxyRecords is None:
+ continue
+ proxyGroup = yield record.service.recordWithShortName(
+ recordType, record.uid
)
+ yield proxyGroup.setMembers(proxyRecords)
- membersProperty = (yield subPrincipal.readProperty(davxml.GroupMemberSet, None))
- for memberURL in membersProperty.children:
- if str(memberURL) == proxyURL:
- raise ProxyWarning("%s is already a %s proxy for %s" % (
- prettyPrincipal(proxyPrincipal), proxyType,
- prettyPrincipal(principal)))
- else:
- memberURLs = list(membersProperty.children)
- memberURLs.append(davxml.HRef(proxyURL))
- membersProperty = davxml.GroupMemberSet(*memberURLs)
- (yield subPrincipal.writeProperty(membersProperty, None))
+ at inlineCallbacks
+def getProxies(record):
+ """
+ Returns a tuple containing the records for read proxies and write proxies
+ of the given record
+ """
- proxyTypes = ["read", "write"]
- proxyTypes.remove(proxyType)
+ allProxies = {
+ DelegateRecordType.readDelegateGroup: [],
+ DelegateRecordType.writeDelegateGroup: [],
+ }
+ for recordType in allProxies.iterkeys():
+ proxyGroup = yield record.service.recordWithShortName(
+ recordType, record.uid
+ )
+ allProxies[recordType] = yield proxyGroup.members()
- yield action_removeProxyPrincipal(
- rootResource, directory, store,
- principal, proxyPrincipal, proxyTypes=proxyTypes
+ returnValue(
+ (
+ allProxies[DelegateRecordType.readDelegateGroup],
+ allProxies[DelegateRecordType.writeDelegateGroup]
+ )
)
- # Schedule work the PeerConnectionPool will pick up as overdue
- def groupPollNow(txn):
- from txdav.who.groups import GroupCacherPollingWork
- return GroupCacherPollingWork.reschedule(txn, 0, force=True)
- yield store.inTransaction("addProxy groupPollNow", groupPollNow)
+def proxySubprincipal(principal, proxyType):
+ return principal.getChild("calendar-proxy-" + proxyType)
- at inlineCallbacks
-def removeProxy(rootResource, directory, store, principal, proxyPrincipal, **kwargs):
- removed = False
- proxyTypes = kwargs.get("proxyTypes", ("read", "write"))
- for proxyType in proxyTypes:
- proxyURL = proxyPrincipal.url()
- subPrincipal = yield proxySubprincipal(principal, proxyType)
- if subPrincipal is None:
- raise ProxyError(
- "Unable to edit %s proxies for %s\n" % (
- proxyType,
- prettyPrincipal(principal)
- )
- )
-
- membersProperty = (yield subPrincipal.readProperty(davxml.GroupMemberSet, None))
-
- memberURLs = [
- m for m in membersProperty.children
- if str(m) != proxyURL
- ]
-
- if len(memberURLs) == len(membersProperty.children):
- # No change
- continue
- else:
- removed = True
-
- membersProperty = davxml.GroupMemberSet(*memberURLs)
- (yield subPrincipal.writeProperty(membersProperty, None))
-
- if removed:
- # Schedule work the PeerConnectionPool will pick up as overdue
- def groupPollNow(txn):
- from txdav.who.groups import GroupCacherPollingWork
- return GroupCacherPollingWork.reschedule(txn, 0, force=True)
- yield store.inTransaction("removeProxy groupPollNow", groupPollNow)
- returnValue(removed)
-
-
-
def prettyPrincipal(principal):
return prettyRecord(principal.record)
Modified: CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/webadmin/delegation.py
===================================================================
--- CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/webadmin/delegation.py 2016-07-13 02:18:08 UTC (rev 15760)
+++ CalendarServer/branches/users/cdaboo/better-next-job/calendarserver/webadmin/delegation.py 2016-07-13 02:23:31 UTC (rev 15761)
@@ -27,8 +27,8 @@
import urlparse
from calendarserver.tools.util import (
- principalForPrincipalID, proxySubprincipal, action_addProxyPrincipal,
- action_removeProxyPrincipal
+ recordForPrincipalID, proxySubprincipal, action_addProxy,
+ action_removeProxy, principalForPrincipalID
)
from twistedcaldav.config import config
@@ -49,6 +49,15 @@
from twext.who.idirectory import RecordType
from txdav.who.idirectory import RecordType as CalRecordType, AutoScheduleMode
+allowedAutoScheduleModes = {
+ "default": None,
+ "none": AutoScheduleMode.none,
+ "accept-always": AutoScheduleMode.accept,
+ "decline-always": AutoScheduleMode.decline,
+ "accept-if-free": AutoScheduleMode.acceptIfFree,
+ "decline-if-busy": AutoScheduleMode.declineIfBusy,
+ "automatic": AutoScheduleMode.acceptIfFreeDeclineIfBusy,
+}
class WebAdminPage(Element):
"""
@@ -221,8 +230,8 @@
self.principalResource = principalResource
self.adminResource = adminResource
self.proxySearch = proxySearch
- record = principalResource.record
- tag.fillSlots(resourceTitle=recordTitle(record),
+ self.record = principalResource.record
+ tag.fillSlots(resourceTitle=recordTitle(self.record),
resourceId=resourceId,
davPropertyName=davPropertyName,
proxySearch=proxySearch)
@@ -283,9 +292,9 @@
Renderer which elides its tag for non-resource-type principals.
"""
if (
- self.principalResource.record.recordType.description != "user" and
- self.principalResource.record.recordType.description != "group" or
- self.principalResource.record.recordType.description == "user" and
+ self.record.recordType.description != "user" and
+ self.record.recordType.description != "group" or
+ self.record.recordType.description == "user" and
config.Scheduling.Options.AutoSchedule.AllowUsers
):
return tag
@@ -293,99 +302,91 @@
@renderer
- @inlineCallbacks
def isAutoSchedule(self, request, tag):
"""
Renderer which sets the 'selected' attribute on its tag if the resource
is auto-schedule.
"""
- if (yield self.principalResource.getAutoScheduleMode()) is not AutoScheduleMode.none:
+ if self.record.autoScheduleMode is not AutoScheduleMode.none:
tag(selected='selected')
- returnValue(tag)
+ return tag
@renderer
- @inlineCallbacks
def isntAutoSchedule(self, request, tag):
"""
Renderer which sets the 'selected' attribute on its tag if the resource
is not auto-schedule.
"""
- if (yield self.principalResource.getAutoScheduleMode()) is AutoScheduleMode.none:
+ if self.record.autoScheduleMode is AutoScheduleMode.none:
tag(selected='selected')
- returnValue(tag)
+ return tag
@renderer
- @inlineCallbacks
def autoScheduleModeNone(self, request, tag):
"""
Renderer which sets the 'selected' attribute on its tag based on the resource
auto-schedule-mode.
"""
- if (yield self.principalResource.getAutoScheduleMode()) is AutoScheduleMode.none:
+ if self.record.autoScheduleMode is AutoScheduleMode.none:
tag(selected='selected')
- returnValue(tag)
+ return tag
@renderer
- @inlineCallbacks
def autoScheduleModeAcceptAlways(self, request, tag):
"""
Renderer which sets the 'selected' attribute on its tag based on the resource
auto-schedule-mode.
"""
- if (yield self.principalResource.getAutoScheduleMode()) is AutoScheduleMode.accept:
+ if self.record.autoScheduleMode is AutoScheduleMode.accept:
tag(selected='selected')
- returnValue(tag)
+ return tag
@renderer
- @inlineCallbacks
def autoScheduleModeDeclineAlways(self, request, tag):
"""
Renderer which sets the 'selected' attribute on its tag based on the resource
auto-schedule-mode.
"""
- if (yield self.principalResource.getAutoScheduleMode()) is AutoScheduleMode.decline:
+ if self.record.autoScheduleMode is AutoScheduleMode.decline:
tag(selected='selected')
- returnValue(tag)
+ return tag
@renderer
- @inlineCallbacks
def autoScheduleModeAcceptIfFree(self, request, tag):
"""
Renderer which sets the 'selected' attribute on its tag based on the resource
auto-schedule-mode.
"""
- if (yield self.principalResource.getAutoScheduleMode()) is AutoScheduleMode.acceptIfFree:
+ if self.record.autoScheduleMode is AutoScheduleMode.acceptIfFree:
tag(selected='selected')
- returnValue(tag)
+ return tag
@renderer
- @inlineCallbacks
def autoScheduleModeDeclineIfBusy(self, request, tag):
"""
Renderer which sets the 'selected' attribute on its tag based on the resource
auto-schedule-mode.
"""
- if (yield self.principalResource.getAutoScheduleMode()) is AutoScheduleMode.declineIfBusy:
+ if self.record.autoScheduleMode is AutoScheduleMode.declineIfBusy:
tag(selected='selected')
- returnValue(tag)
+ return tag
@renderer
- @inlineCallbacks
def autoScheduleModeAutomatic(self, request, tag):
"""
Renderer which sets the 'selected' attribute on its tag based on the resource
auto-schedule-mode.
"""
- if (yield self.principalResource.getAutoScheduleMode()) is AutoScheduleMode.acceptIfFreeDeclineIfBusy:
+ if self.record.autoScheduleMode is AutoScheduleMode.acceptIfFreeDeclineIfBusy:
tag(selected='selected')
- returnValue(tag)
+ return tag
_matrix = None
@@ -634,9 +635,9 @@
@inlineCallbacks
- def resourceActions(self, request, principal):
+ def resourceActions(self, request, record):
"""
- Take all actions on the given principal based on the given request.
+ Take all actions on the given record based on the given request.
"""
def queryValue(arg):
@@ -651,45 +652,32 @@
matches.append(key[len(arg):])
return matches
- autoSchedule = queryValue("autoSchedule")
autoScheduleMode = queryValue("autoScheduleMode")
makeReadProxies = queryValues("mkReadProxy|")
makeWriteProxies = queryValues("mkWriteProxy|")
removeProxies = queryValues("rmProxy|")
- # Update the auto-schedule value if specified.
- if autoSchedule is not None and (autoSchedule == "true" or
- autoSchedule == "false"):
+ # Update the auto-schedule-mode value if specified.
+ if autoScheduleMode:
if (
- principal.record.recordType != RecordType.user and
- principal.record.recordType != RecordType.group or
- principal.record.recordType == RecordType.user and
+ record.recordType != RecordType.user and
+ record.recordType != RecordType.group or
+ record.recordType == RecordType.user and
config.Scheduling.Options.AutoSchedule.AllowUsers
):
- (yield principal.setAutoSchedule(autoSchedule == "true"))
- (yield principal.setAutoScheduleMode(autoScheduleMode))
+ autoScheduleMode = allowedAutoScheduleModes[autoScheduleMode]
+ yield record.setAutoScheduleMode(autoScheduleMode)
+ record.autoScheduleMode = autoScheduleMode
# Update the proxies if specified.
- for proxyId in removeProxies:
- proxy = yield self.getResourceById(request, proxyId)
- yield action_removeProxyPrincipal(
- self.root, self.directory, self.store,
- principal, proxy, proxyTypes=["read", "write"]
- )
+ if removeProxies:
+ yield action_removeProxy(self.store, record, *removeProxies)
- for proxyId in makeReadProxies:
- proxy = yield self.getResourceById(request, proxyId)
- yield action_addProxyPrincipal(
- self.root, self.directory, self.store,
- principal, "read", proxy
- )
+ if makeReadProxies:
+ yield action_addProxy(self.store, record, "read", *makeReadProxies)
- for proxyId in makeWriteProxies:
- proxy = yield self.getResourceById(request, proxyId)
- yield action_addProxyPrincipal(
- self.root, self.directory, self.store,
- principal, "write", proxy
- )
+ if makeWriteProxies:
+ yield action_addProxy(self.store, record, "write", *makeWriteProxies)
@inlineCallbacks
@@ -700,8 +688,8 @@
"""
resourceId = request.args.get('resourceId', [''])[0]
if resourceId:
- principal = yield self.getResourceById(request, resourceId)
- yield self.resourceActions(request, principal)
+ record = yield recordForPrincipalID(self.directory, resourceId)
+ yield self.resourceActions(request, record)
htmlContent = yield flattenString(request, WebAdminPage(self))
response = Response()
response.stream = MemoryStream(htmlContent)
Modified: CalendarServer/branches/users/cdaboo/better-next-job/conf/caldavd-stdconfig.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/better-next-job/conf/caldavd-stdconfig.plist 2016-07-13 02:18:08 UTC (rev 15760)
+++ CalendarServer/branches/users/cdaboo/better-next-job/conf/caldavd-stdconfig.plist 2016-07-13 02:23:31 UTC (rev 15761)
@@ -2123,14 +2123,28 @@
<key>Enabled</key>
<false/>
+ <!-- Set to False for telnet -->
+ <key>UseSSH</key>
+ <true/>
+
+ <!-- Master listens here, children increment -->
<key>StartingPortNumber</key>
<integer>5000</integer>
+ <!-- Directory Proxy listens here -->
<key>DPSPortNumber</key>
<integer>4999</integer>
+ <!-- Path to password file with lines of user:pass -->
<key>PasswordFilePath</key>
<string></string>
+
+ <!-- Relative to DataRoot -->
+ <key>sshKeyName</key>
+ <string>manhole.key</string>
+
+ <key>sshKeySize</key>
+ <integer>4096</integer>
</dict>
<key>EnableKeepAlive</key>
Modified: CalendarServer/branches/users/cdaboo/better-next-job/twistedcaldav/stdconfig.py
===================================================================
--- CalendarServer/branches/users/cdaboo/better-next-job/twistedcaldav/stdconfig.py 2016-07-13 02:18:08 UTC (rev 15760)
+++ CalendarServer/branches/users/cdaboo/better-next-job/twistedcaldav/stdconfig.py 2016-07-13 02:23:31 UTC (rev 15761)
@@ -1108,9 +1108,12 @@
"Manhole": {
"Enabled": False,
- "StartingPortNumber": 5000,
- "DPSPortNumber": 4999,
- "PasswordFilePath": "",
+ "UseSSH": True, # Set to False for telnet
+ "StartingPortNumber": 5000, # Master listens here, children increment
+ "DPSPortNumber": 4999, # Directory Proxy listens here
+ "PasswordFilePath": "", # Path to password file with lines of user:pass
+ "sshKeyName": "manhole.key", # Relative to DataRoot
+ "sshKeySize": 4096,
},
"EnableKeepAlive": False,
Modified: CalendarServer/branches/users/cdaboo/better-next-job/txdav/caldav/icalendardirectoryservice.py
===================================================================
--- CalendarServer/branches/users/cdaboo/better-next-job/txdav/caldav/icalendardirectoryservice.py 2016-07-13 02:18:08 UTC (rev 15760)
+++ CalendarServer/branches/users/cdaboo/better-next-job/txdav/caldav/icalendardirectoryservice.py 2016-07-13 02:23:31 UTC (rev 15761)
@@ -106,6 +106,15 @@
@rtype: C{bool}
"""
+ def setAutoScheduleMode(autoScheduleMode): #@NoSelf
+ """
+ Sets the mode of automatic scheduling used for this record.
+
+ @param autoScheduleMode: the new mode
+ @type autoScheduleMode: L{AutoScheduleMode}
+ """
+
+
def isProxyFor(other): #@NoSelf
"""
Test whether the record is a calendar user proxy for the specified record.
Modified: CalendarServer/branches/users/cdaboo/better-next-job/txdav/dps/client.py
===================================================================
--- CalendarServer/branches/users/cdaboo/better-next-job/txdav/dps/client.py 2016-07-13 02:18:08 UTC (rev 15760)
+++ CalendarServer/branches/users/cdaboo/better-next-job/txdav/dps/client.py 2016-07-13 02:23:31 UTC (rev 15761)
@@ -44,7 +44,8 @@
WikiAccessForUIDCommand, ContinuationCommand,
StatsCommand, ExternalDelegatesCommand, ExpandedMemberUIDsCommand,
AddMembersCommand, RemoveMembersCommand,
- UpdateRecordsCommand, ExpandedMembersCommand, FlushCommand
+ UpdateRecordsCommand, ExpandedMembersCommand, FlushCommand,
+ SetAutoScheduleModeCommand
)
from txdav.who.delegates import RecordType as DelegatesRecordType
from txdav.who.directory import (
@@ -422,6 +423,14 @@
)
+ def setAutoScheduleMode(self, record, autoScheduleMode):
+ return self._sendCommand(
+ SetAutoScheduleModeCommand,
+ uid=record.uid.encode("utf-8"),
+ autoScheduleMode=autoScheduleMode.name,
+ )
+
+
@inlineCallbacks
def flush(self):
try:
Modified: CalendarServer/branches/users/cdaboo/better-next-job/txdav/dps/commands.py
===================================================================
--- CalendarServer/branches/users/cdaboo/better-next-job/txdav/dps/commands.py 2016-07-13 02:18:08 UTC (rev 15760)
+++ CalendarServer/branches/users/cdaboo/better-next-job/txdav/dps/commands.py 2016-07-13 02:23:31 UTC (rev 15761)
@@ -265,7 +265,16 @@
]
+class SetAutoScheduleModeCommand(amp.Command):
+ arguments = [
+ ('uid', amp.String()),
+ ('autoScheduleMode', amp.String()),
+ ]
+ response = [
+ ('success', amp.Boolean()),
+ ]
+
class FlushCommand(amp.Command):
arguments = []
response = [
Modified: CalendarServer/branches/users/cdaboo/better-next-job/txdav/dps/server.py
===================================================================
--- CalendarServer/branches/users/cdaboo/better-next-job/txdav/dps/server.py 2016-07-13 02:18:08 UTC (rev 15760)
+++ CalendarServer/branches/users/cdaboo/better-next-job/txdav/dps/server.py 2016-07-13 02:23:31 UTC (rev 15761)
@@ -45,8 +45,10 @@
WikiAccessForUIDCommand, ContinuationCommand,
ExternalDelegatesCommand, StatsCommand, ExpandedMemberUIDsCommand,
AddMembersCommand, RemoveMembersCommand,
- UpdateRecordsCommand, FlushCommand, # RemoveRecordsCommand,
+ UpdateRecordsCommand, FlushCommand, SetAutoScheduleModeCommand,
+ # RemoveRecordsCommand,
)
+from txdav.who.idirectory import AutoScheduleMode
from txdav.who.wiki import WikiAccessLevel
from zope.interface import implementer
@@ -540,6 +542,21 @@
returnValue(response)
+ @SetAutoScheduleModeCommand.responder
+ @inlineCallbacks
+ def setAutoScheduleMode(self, uid, autoScheduleMode):
+ uid = uid.decode("utf-8")
+ record = yield self._directory.recordWithUID(uid)
+ autoScheduleMode = autoScheduleMode.decode("utf-8")
+ autoScheduleMode = AutoScheduleMode.lookupByName(autoScheduleMode)
+ yield self._directory.setAutoScheduleMode(record, autoScheduleMode)
+ response = {
+ "success": True
+ }
+ returnValue(response)
+
+
+
@GroupsCommand.responder
@inlineCallbacks
def groups(self, uid):
@@ -851,8 +868,12 @@
config.Manhole.DPSPortNumber
)
manholeService = manholeMakeService({
- "sshPort": None,
- "telnetPort": portString,
+ "sshPort": portString if config.Manhole.UseSSH is True else None,
+ "telnetPort": portString if config.Manhole.UseSSH is False else None,
+ "sshKeyDir": config.DataRoot,
+ "sshKeyName": "manhole.key",
+ "sshKeySize": 4096,
+ "passwd": config.Manhole.PasswordFilePath,
"namespace": {
"config": config,
"service": dpsService,
Modified: CalendarServer/branches/users/cdaboo/better-next-job/txdav/dps/test/test_client.py
===================================================================
--- CalendarServer/branches/users/cdaboo/better-next-job/txdav/dps/test/test_client.py 2016-07-13 02:18:08 UTC (rev 15760)
+++ CalendarServer/branches/users/cdaboo/better-next-job/txdav/dps/test/test_client.py 2016-07-13 02:23:31 UTC (rev 15761)
@@ -36,6 +36,7 @@
from txdav.who.test.support import (
TestRecord, CalendarInMemoryDirectoryService
)
+from txdav.who.idirectory import AutoScheduleMode
testMode = "xml" # "xml" or "od"
@@ -442,6 +443,22 @@
@inlineCallbacks
+ def test_setAutoScheduleMode(self):
+ """
+ Verify setAutoSchedule works across DPS
+ """
+ record = yield self.client.recordWithUID(u"75EA36BE-F71B-40F9-81F9-CF59BF40CA8F")
+ # Defaults to automatic
+ self.assertEquals(record.autoScheduleMode, AutoScheduleMode.acceptIfFreeDeclineIfBusy)
+ # Change it to accept-if-busy
+ yield record.setAutoScheduleMode(AutoScheduleMode.acceptIfFree)
+ # Refetch it
+ record = yield self.client.recordWithUID(u"75EA36BE-F71B-40F9-81F9-CF59BF40CA8F")
+ # Verify it's changed
+ self.assertEquals(record.autoScheduleMode, AutoScheduleMode.acceptIfFree)
+
+
+ @inlineCallbacks
def test_uid(self):
record = (yield self.client.recordWithUID(self.wsanchezUID))
self.assertTrue(u"wsanchez" in record.shortNames)
Modified: CalendarServer/branches/users/cdaboo/better-next-job/txdav/who/augment.py
===================================================================
--- CalendarServer/branches/users/cdaboo/better-next-job/txdav/who/augment.py 2016-07-13 02:18:08 UTC (rev 15760)
+++ CalendarServer/branches/users/cdaboo/better-next-job/txdav/who/augment.py 2016-07-13 02:23:31 UTC (rev 15761)
@@ -471,7 +471,27 @@
returnValue(augmentedRecord)
+ @inlineCallbacks
+ def setAutoScheduleMode(self, record, autoScheduleMode):
+ augmentRecord = yield self._augmentDB.getAugmentRecord(
+ record.uid,
+ self.recordTypeToOldName(record.recordType)
+ )
+ if augmentRecord is not None:
+ autoScheduleMode = {
+ AutoScheduleMode.none: "none",
+ AutoScheduleMode.accept: "accept-always",
+ AutoScheduleMode.decline: "decline-always",
+ AutoScheduleMode.acceptIfFree: "accept-if-free",
+ AutoScheduleMode.declineIfBusy: "decline-if-busy",
+ AutoScheduleMode.acceptIfFreeDeclineIfBusy: "automatic",
+ }.get(autoScheduleMode)
+ augmentRecord.autoScheduleMode = autoScheduleMode
+ yield self._augmentDB.addAugmentRecords([augmentRecord])
+
+
+
class AugmentedDirectoryRecord(DirectoryRecord, CalendarDirectoryRecordMixin):
"""
Augmented directory record.
Modified: CalendarServer/branches/users/cdaboo/better-next-job/txdav/who/directory.py
===================================================================
--- CalendarServer/branches/users/cdaboo/better-next-job/txdav/who/directory.py 2016-07-13 02:18:08 UTC (rev 15760)
+++ CalendarServer/branches/users/cdaboo/better-next-job/txdav/who/directory.py 2016-07-13 02:23:31 UTC (rev 15761)
@@ -607,6 +607,10 @@
returnValue(autoScheduleMode)
+ def setAutoScheduleMode(self, autoScheduleMode):
+ return self.service.setAutoScheduleMode(self, autoScheduleMode)
+
+
@inlineCallbacks
def autoAcceptFromOrganizer(self, organizer):
try:
Modified: CalendarServer/branches/users/cdaboo/better-next-job/txdav/who/test/test_directory.py
===================================================================
--- CalendarServer/branches/users/cdaboo/better-next-job/txdav/who/test/test_directory.py 2016-07-13 02:18:08 UTC (rev 15760)
+++ CalendarServer/branches/users/cdaboo/better-next-job/txdav/who/test/test_directory.py 2016-07-13 02:23:31 UTC (rev 15761)
@@ -358,7 +358,22 @@
)
+ @inlineCallbacks
+ def test_setAutoScheduleMode(self):
+ """
+ Verify the record.setAutoScheduleMode( ) method
+ """
+ orion = yield self.directory.recordWithUID(u"orion")
+ # Defaults to automatic
+ self.assertEquals(orion.autoScheduleMode, AutoScheduleMode.acceptIfFreeDeclineIfBusy)
+ # Change it to decline-if-busy
+ yield orion.setAutoScheduleMode(AutoScheduleMode.declineIfBusy)
+ # Refetch it
+ orion = yield self.directory.recordWithUID(u"orion")
+ # Verify it's changed
+ self.assertEquals(orion.autoScheduleMode, AutoScheduleMode.declineIfBusy)
+
class DirectoryTestCaseFakeEmail(StoreTestCase):
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20160712/9858771f/attachment-0001.html>
More information about the calendarserver-changes
mailing list