[CalendarServer-changes] [5439] CalendarServer/branches/users/cdaboo/shared-calendars-5187
source_changes at macosforge.org
source_changes at macosforge.org
Wed Apr 7 12:27:36 PDT 2010
Revision: 5439
http://trac.macosforge.org/projects/calendarserver/changeset/5439
Author: cdaboo at apple.com
Date: 2010-04-07 12:27:31 -0700 (Wed, 07 Apr 2010)
Log Message:
-----------
Merge from trunk.
Modified Paths:
--------------
CalendarServer/branches/users/cdaboo/shared-calendars-5187/bin/caldavd
CalendarServer/branches/users/cdaboo/shared-calendars-5187/bin/calendarserver_purge_events
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/platform/darwin/_sacl.c
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/provision/root.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/provision/test/test_root.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/sidecar/task.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tap/caldav.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tap/test/test_caldav.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tap/util.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/gateway.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/principals.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/purge.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/gateway/caldavd.plist
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/principals/caldavd.plist
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/test_gateway.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/test_principals.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/test_purge.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/conf/caldavd-test.plist
CalendarServer/branches/users/cdaboo/shared-calendars-5187/conf/carddavd-test.plist
CalendarServer/branches/users/cdaboo/shared-calendars-5187/conf/resources/caldavd-resources.plist
CalendarServer/branches/users/cdaboo/shared-calendars-5187/contrib/launchd/addressbookserver.plist
CalendarServer/branches/users/cdaboo/shared-calendars-5187/contrib/launchd/calendarserver.plist
CalendarServer/branches/users/cdaboo/shared-calendars-5187/contrib/migration/59_calendarmigrator.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/doc/caldavd.8
CalendarServer/branches/users/cdaboo/shared-calendars-5187/doc/calendarserver_manage_principals.8
CalendarServer/branches/users/cdaboo/shared-calendars-5187/run
CalendarServer/branches/users/cdaboo/shared-calendars-5187/setup.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/support/Makefile.Apple
CalendarServer/branches/users/cdaboo/shared-calendars-5187/support/build.sh
CalendarServer/branches/users/cdaboo/shared-calendars-5187/test
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/internet/tcp.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/vcomponent.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/web2/channel/http.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/web2/dav/resource.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/web2/server.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twisted/plugins/kqueuereactor.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/config.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/aggregate.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/appleopendirectory.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/augment.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/cachingdirectory.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/calendaruserproxy.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/calendaruserproxyloader.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/opendirectorybacker.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/principal.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/accounts.xml
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/augments.xml
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/proxies.xml
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/resources/caldavd.plist
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/test_modify.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/test_opendirectory.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/test_principal.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/test_proxyprincipalmembers.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/xmlfile.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/extensions.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/mail.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/memcacheprops.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/method/report_common.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/notify.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/resource.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/sharing.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/static.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/stdconfig.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_config.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_index.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_mail.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/util.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/file.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/test_file.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/icalendarstore.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/idav.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/base.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/test/test_xattr.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/xattr.py
Added Paths:
-----------
CalendarServer/branches/users/cdaboo/shared-calendars-5187/bin/calendarserver_purge_principals
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/augments.xml
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/caldavd.plist
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/resources-locations.xml
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/users-groups.xml
CalendarServer/branches/users/cdaboo/shared-calendars-5187/doc/calendarserver_purge_principals.8
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/internet/kqreactor.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/internet/sendfdport.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/memcacheclient.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/sendfd.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/sendmsg.c
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/test/pullpipe.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/test/test_sendmsg.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/web2/metafd.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/1.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/2.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/3.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/24204e8682b99527cbda64d7423acda7.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/61038c41bd02ae5daf9f7fe9d54199fd.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/84be58ced1f1bb34057e1bd7e602c9c8.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/acc1015b7dc300c1b5665f6833960994.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b0d5785f275c064117ffd1fc20f4ed40.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b495c5dd5aa53392078eb43b1f906a80.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b88dd50941e4a31520ee396fd7894c96.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_empty/
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/resource.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/none.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/test/base.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/test/test_base.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/test/test_none.py
Removed Paths:
-------------
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/augments.xml
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/caldavd.plist
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/resources-locations.xml
CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/users-groups.xml
CalendarServer/branches/users/cdaboo/shared-calendars-5187/kqreactor.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/memcacheclient.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/cache.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_cache.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_static.py
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/1.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/2.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/3.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/24204e8682b99527cbda64d7423acda7.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/61038c41bd02ae5daf9f7fe9d54199fd.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/84be58ced1f1bb34057e1bd7e602c9c8.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/acc1015b7dc300c1b5665f6833960994.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b0d5785f275c064117ffd1fc20f4ed40.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b495c5dd5aa53392078eb43b1f906a80.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b88dd50941e4a31520ee396fd7894c96.ics
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_empty/
CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/home1/
Property Changed:
----------------
CalendarServer/branches/users/cdaboo/shared-calendars-5187/
Property changes on: CalendarServer/branches/users/cdaboo/shared-calendars-5187
___________________________________________________________________
Modified: svn:mergeinfo
- /CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/trunk:5188-5293
+ /CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/trunk:5188-5438
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/bin/caldavd
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/bin/caldavd 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/bin/caldavd 2010-04-07 19:27:31 UTC (rev 5439)
@@ -21,6 +21,7 @@
#PYTHONPATH
daemonize="";
+errorlogenabled="";
username="";
groupname="";
configfile="";
@@ -85,6 +86,7 @@
echo "Options:";
echo " -h Print this help and exit";
echo " -X Do not daemonize";
+ echo " -L Do not log errors to file; instead use stdout";
echo " -u User name to run as";
echo " -g Group name to run as";
echo " -f Configuration file to read";
@@ -97,11 +99,12 @@
exit 64;
}
-while getopts 'hXu:g:f:T:P:t:p:R:' option; do
+while getopts 'hXLu:g:f:T:P:t:p:R:' option; do
case "${option}" in
'?') usage; ;;
'h') usage -; exit 0; ;;
'X') daemonize="-n"; ;;
+ 'L') errorlogenabled="-o ErrorLogEnabled=False"; ;;
'f') configfile="-f ${OPTARG}"; ;;
'T') twistdpath="${OPTARG}"; ;;
'u') username="-u ${OPTARG}"; ;;
@@ -119,6 +122,4 @@
export PYTHONPATH
-echo exec "${python}" "${twistdpath}" "${twistd_reactor}" ${daemonize} ${username} ${groupname} "${plugin_name}" ${configfile} ${service_type} ${profile} "${child_reactor}";
-
-exec "${python}" "${twistdpath}" ${twistd_reactor} ${daemonize} ${username} ${groupname} "${plugin_name}" ${configfile} ${service_type} ${profile} ${child_reactor};
+exec "${python}" "${twistdpath}" ${twistd_reactor} ${daemonize} ${username} ${groupname} "${plugin_name}" ${configfile} ${service_type} ${errorlogenabled} ${profile} ${child_reactor};
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/bin/calendarserver_purge_events
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/bin/calendarserver_purge_events 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/bin/calendarserver_purge_events 2010-04-07 19:27:31 UTC (rev 5439)
@@ -40,5 +40,5 @@
sys.argv[1:1] = ["-f", join(home, "conf", "caldavd-dev.plist")]
- from calendarserver.tools.purge import main
- main()
+ from calendarserver.tools.purge import main_purge_events
+ main_purge_events()
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/bin/calendarserver_purge_principals (from rev 5438, CalendarServer/trunk/bin/calendarserver_purge_principals)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/bin/calendarserver_purge_principals (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/bin/calendarserver_purge_principals 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+##
+# Copyright (c) 2006-2010 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+import sys
+
+#PYTHONPATH
+
+if __name__ == "__main__":
+ if "PYTHONPATH" in globals():
+ sys.path.insert(0, PYTHONPATH)
+ else:
+ from os.path import dirname, abspath, join
+ from subprocess import Popen, PIPE
+
+ home = dirname(dirname(abspath(__file__)))
+ run = join(home, "run")
+
+ child = Popen((run, "-p"), stdout=PIPE)
+ path, stderr = child.communicate()
+
+ path = path.rstrip("\n")
+
+ if child.wait() == 0:
+ sys.path[0:0] = path.split(":")
+
+ sys.argv[1:1] = ["-f", join(home, "conf", "caldavd-dev.plist")]
+
+ from calendarserver.tools.purge import main_purge_principals
+ main_purge_principals()
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/platform/darwin/_sacl.c
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/platform/darwin/_sacl.c 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/platform/darwin/_sacl.c 2010-04-07 19:27:31 UTC (rev 5439)
@@ -19,6 +19,8 @@
#include <membership.h>
int mbr_check_service_membership(const uuid_t user, const char* servicename, int* ismember);
+int mbr_user_name_to_uuid(const char* name, uuid_t uu);
+int mbr_group_name_to_uuid(const char* name, uuid_t uu);
/*
CheckSACL(userOrGroupName, service)
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/provision/root.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/provision/root.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/provision/root.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -20,7 +20,6 @@
]
from twisted.internet.defer import inlineCallbacks, returnValue
-from twisted.cred.error import LoginFailed, UnauthorizedLogin
from twext.web2 import responsecode
from twext.web2.dav import davxml
from twext.web2.http import HTTPError, StatusResponse
@@ -34,11 +33,6 @@
from twistedcaldav.extensions import DirectoryPrincipalPropertySearchMixIn
from twistedcaldav.extensions import ReadOnlyResourceMixIn
from twistedcaldav.config import config
-from twistedcaldav.cache import _CachedResponseResource
-from twistedcaldav.cache import MemcacheResponseCache, MemcacheChangeNotifier
-from twistedcaldav.cache import DisabledCache
-from twistedcaldav.static import CalendarHomeFile
-from twistedcaldav.directory.principal import DirectoryPrincipalResource
log = Logger()
@@ -63,14 +57,6 @@
self.contentFilters = []
- if config.Memcached.Pools.Default.ClientEnabled:
- self.responseCache = MemcacheResponseCache(self.fp)
-
- CalendarHomeFile.cacheNotifierFactory = MemcacheChangeNotifier
- DirectoryPrincipalResource.cacheNotifierFactory = MemcacheChangeNotifier
- else:
- self.responseCache = DisabledCache()
-
if config.ResponseCompression:
from twext.web2.filter import gzip
self.contentFilters.append((gzip.gzipfilter, True))
@@ -260,30 +246,6 @@
request.extendedLogItems = {}
request.extendedLogItems["xff"] = remote_ip[0]
-# if request.method == "PROPFIND" and not getattr(request, "notInCache", False) and len(segments) > 1:
-# try:
-# authnUser, authzUser = (yield self.authenticate(request))
-# request.authnUser = authnUser
-# request.authzUser = authzUser
-# except (UnauthorizedLogin, LoginFailed):
-# response = (yield UnauthorizedResponse.makeResponse(
-# request.credentialFactories,
-# request.remoteAddr
-# ))
-# raise HTTPError(response)
-#
-# try:
-# if not getattr(request, "checkingCache", False):
-# request.checkingCache = True
-# response = (yield self.responseCache.getResponseForRequest(request))
-# if response is None:
-# request.notInCache = True
-# raise KeyError("Not found in cache.")
-#
-# returnValue((_CachedResponseResource(response), []))
-# except KeyError:
-# pass
-
child = (yield super(RootResource, self).locateChild(request, segments))
returnValue(child)
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/provision/test/test_root.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/provision/test/test_root.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/provision/test/test_root.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -35,7 +35,6 @@
from twistedcaldav.directory.test.test_xmlfile import xmlFile, augmentsFile
from calendarserver.provision.root import RootResource
-from twistedcaldav.config import config
from twistedcaldav.directory import augment
class FakeCheckSACL(object):
@@ -56,26 +55,24 @@
def setUp(self):
super(RootTests, self).setUp()
- # XXX make sure that the config hooks have been run, so that we get the
- # default RootResourceACL key is set and traversal works. This is not
- # great, and the ACLs supported by the root resource should really be
- # an _attribute_ on the root resource. -glyph
- config.update({})
-
self.docroot = self.mktemp()
os.mkdir(self.docroot)
- RootResource.CheckSACL = FakeCheckSACL(sacls={
- 'calendar': ['dreid']})
+ RootResource.CheckSACL = FakeCheckSACL(sacls={"calendar": ["dreid"]})
- directory = XMLDirectoryService({'xmlFile' : xmlFile})
- augment.AugmentService = augment.AugmentXMLDB(xmlFiles=(augmentsFile.path,))
+ directory = XMLDirectoryService({"xmlFile" : xmlFile})
+ augment.AugmentService = augment.AugmentXMLDB(
+ xmlFiles=(augmentsFile.path,)
+ )
- principals = DirectoryPrincipalProvisioningResource('/principals/', directory)
+ principals = DirectoryPrincipalProvisioningResource(
+ "/principals/",
+ directory
+ )
root = RootResource(self.docroot, principalCollections=[principals])
- root.putChild('principals',
+ root.putChild("principals",
principals)
portal = Portal(auth.DavRealm())
@@ -97,16 +94,18 @@
"""
@inlineCallbacks
- def issueRequest(self, segments, method='GET'):
+ def issueRequest(self, segments, method="GET"):
"""
Get a resource from a particular path from the root URI, and return a
Deferred which will fire with (something adaptable to) an HTTP response
object.
"""
- request = SimpleRequest(self.site, method, ('/'.join([''] + segments)))
+ request = SimpleRequest(self.site, method, ("/".join([""] + segments)))
rsrc = self.root
while segments:
- rsrc, segments = (yield maybeDeferred(rsrc.locateChild, request, segments))
+ rsrc, segments = (yield maybeDeferred(
+ rsrc.locateChild, request, segments
+ ))
result = yield rsrc.renderHTTP(request)
returnValue(result)
@@ -118,9 +117,8 @@
OPTIONS request should include a DAV header that mentions the
addressbook capability.
"""
- self.patch(config, 'EnableCardDAV', True)
- response = yield self.issueRequest([''], 'OPTIONS')
- self.assertIn('addressbook', response.headers.getHeader('DAV'))
+ response = yield self.issueRequest([""], "OPTIONS")
+ self.assertIn("addressbook", response.headers.getHeader("DAV"))
@@ -139,13 +137,18 @@
"GET",
"/principals/")
- resrc, segments = (yield maybeDeferred(self.root.locateChild, request, ['principals']))
+ resrc, segments = (yield maybeDeferred(
+ self.root.locateChild, request, ["principals"]
+ ))
- resrc, segments = (yield maybeDeferred(resrc.locateChild, request, ['principals']))
+ resrc, segments = (yield maybeDeferred(
+ resrc.locateChild, request, ["principals"]
+ ))
self.failUnless(
isinstance(resrc, DirectoryPrincipalProvisioningResource),
- "Did not get a DirectoryPrincipalProvisioningResource: %s" % (resrc,)
+ "Did not get a DirectoryPrincipalProvisioningResource: %s"
+ % (resrc,)
)
self.assertEquals(segments, [])
@@ -165,23 +168,38 @@
"GET",
"/principals/",
headers=http_headers.Headers({
- 'Authorization': ['basic', '%s' % (
- 'dreid:dierd'.encode('base64'),)]}))
+ "Authorization": [
+ "basic",
+ "%s" % ("dreid:dierd".encode("base64"),)
+ ]
+ })
+ )
- resrc, segments = (yield maybeDeferred(self.root.locateChild, request, ['principals']))
+ resrc, segments = (yield maybeDeferred(
+ self.root.locateChild, request, ["principals"]
+ ))
- resrc, segments = (yield maybeDeferred(resrc.locateChild, request, ['principals']))
+ resrc, segments = (yield maybeDeferred(
+ resrc.locateChild, request, ["principals"]
+ ))
self.failUnless(
isinstance(resrc, DirectoryPrincipalProvisioningResource),
- "Did not get a DirectoryPrincipalProvisioningResource: %s" % (resrc,)
+ "Did not get a DirectoryPrincipalProvisioningResource: %s"
+ % (resrc,)
)
self.assertEquals(segments, [])
- self.assertEquals(request.authzUser,
- davxml.Principal(
- davxml.HRef('/principals/__uids__/5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1/')))
+ self.assertEquals(
+ request.authzUser,
+ davxml.Principal(
+ davxml.HRef(
+ "/principals/__uids__/"
+ "5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1/"
+ )
+ )
+ )
@inlineCallbacks
def test_notInSacls(self):
@@ -198,13 +216,21 @@
"GET",
"/principals/",
headers=http_headers.Headers({
- 'Authorization': ['basic', '%s' % (
- 'wsanchez:zehcnasw'.encode('base64'),)]}))
+ "Authorization": [
+ "basic",
+ "%s" % ("wsanchez:zehcnasw".encode("base64"),)
+ ]
+ })
+ )
- resrc, segments = (yield maybeDeferred(self.root.locateChild, request, ['principals']))
+ resrc, segments = (yield maybeDeferred(
+ self.root.locateChild, request, ["principals"]
+ ))
try:
- resrc, segments = (yield maybeDeferred(resrc.locateChild, request, ['principals']))
+ resrc, segments = (yield maybeDeferred(
+ resrc.locateChild, request, ["principals"]
+ ))
except HTTPError, e:
self.assertEquals(e.response.code, 403)
@@ -218,15 +244,23 @@
"""
self.root.resource.useSacls = True
- request = SimpleRequest(self.site,
- "GET",
- "/principals/")
+ request = SimpleRequest(
+ self.site,
+ "GET",
+ "/principals/"
+ )
- resrc, segments = (yield maybeDeferred(self.root.locateChild, request, ['principals']))
+ resrc, segments = (yield maybeDeferred(
+ self.root.locateChild, request, ["principals"]
+ ))
try:
- resrc, segments = (yield maybeDeferred(resrc.locateChild, request, ['principals']))
- raise AssertionError(("RootResource.locateChild did not return an error"))
+ resrc, segments = (yield maybeDeferred(
+ resrc.locateChild, request, ["principals"]
+ ))
+ raise AssertionError(
+ "RootResource.locateChild did not return an error"
+ )
except HTTPError, e:
self.assertEquals(e.response.code, 401)
@@ -245,13 +279,17 @@
"GET",
"/principals/",
headers=http_headers.Headers({
- 'Authorization': ['basic', '%s' % (
- 'dreid:dreid'.encode('base64'),)]}))
+ "Authorization": ["basic", "%s" % (
+ "dreid:dreid".encode("base64"),)]}))
- resrc, segments = (yield maybeDeferred(self.root.locateChild, request, ['principals']))
+ resrc, segments = (yield maybeDeferred(
+ self.root.locateChild, request, ["principals"]
+ ))
try:
- resrc, segments = (yield maybeDeferred(resrc.locateChild, request, ['principals']))
+ resrc, segments = (yield maybeDeferred(
+ resrc.locateChild, request, ["principals"]
+ ))
except HTTPError, e:
self.assertEquals(e.response.code, 401)
@@ -260,7 +298,8 @@
response = IResponse(response)
if response.code != responsecode.FORBIDDEN:
- self.fail("Incorrect response for DELETE /: %s" % (response.code,))
+ self.fail("Incorrect response for DELETE /: %s"
+ % (response.code,))
request = SimpleRequest(self.site, "DELETE", "/")
return self.send(request, do_test)
@@ -270,7 +309,8 @@
response = IResponse(response)
if response.code != responsecode.FORBIDDEN:
- self.fail("Incorrect response for COPY /: %s" % (response.code,))
+ self.fail("Incorrect response for COPY /: %s"
+ % (response.code,))
request = SimpleRequest(
self.site,
@@ -285,7 +325,8 @@
response = IResponse(response)
if response.code != responsecode.FORBIDDEN:
- self.fail("Incorrect response for MOVE /: %s" % (response.code,))
+ self.fail("Incorrect response for MOVE /: %s"
+ % (response.code,))
request = SimpleRequest(
self.site,
@@ -295,79 +336,7 @@
)
return self.send(request, do_test)
-class SACLCacheTests(RootTests):
-
- class StubResponseCacheResource(object):
- def __init__(self):
- self.cache = {}
- self.responseCache = self
- self.cacheHitCount = 0
- def getResponseForRequest(self, request):
- if str(request) in self.cache:
- self.cacheHitCount += 1
- return self.cache[str(request)]
-
-
- def cacheResponseForRequest(self, request, response):
- self.cache[str(request)] = response
- return response
-
- def setUp(self):
- super(SACLCacheTests, self).setUp()
- self.root.resource.responseCache = SACLCacheTests.StubResponseCacheResource()
-
- def test_PROPFIND(self):
- self.root.resource.useSacls = True
-
- body = """<?xml version="1.0" encoding="utf-8" ?>
-<D:propfind xmlns:D="DAV:">
-<D:prop>
-<D:getetag/>
-<D:displayname/>
-</D:prop>
-</D:propfind>
-"""
-
- request = SimpleRequest(
- self.site,
- "PROPFIND",
- "/principals/users/dreid/",
- headers=http_headers.Headers({
- 'Authorization': ['basic', '%s' % ('dreid:dierd'.encode('base64'),)],
- 'Content-Type': 'application/xml; charset="utf-8"',
- 'Depth':'1',
- }),
- content=body
- )
-
- def gotResponse1(response):
- if response.code != responsecode.MULTI_STATUS:
- self.fail("Incorrect response for PROPFIND /principals/: %s" % (response.code,))
-
- request = SimpleRequest(
- self.site,
- "PROPFIND",
- "/principals/users/dreid/",
- headers=http_headers.Headers({
- 'Authorization': ['basic', '%s' % ('dreid:dierd'.encode('base64'),)],
- 'Content-Type': 'application/xml; charset="utf-8"',
- 'Depth':'1',
- }),
- content=body
- )
-
- d = self.send(request, gotResponse2)
- return d
-
- def gotResponse2(response):
- if response.code != responsecode.MULTI_STATUS:
- self.fail("Incorrect response for PROPFIND /principals/: %s" % (response.code,))
- self.assertEqual(self.root.resource.responseCache.cacheHitCount, 0)
-
- d = self.send(request, gotResponse1)
- return d
-
class WikiTests(RootTests):
@inlineCallbacks
@@ -379,6 +348,10 @@
request = SimpleRequest(self.site, "GET", "/principals/")
- resrc, segments = (yield maybeDeferred(self.root.locateChild, request, ['principals']))
- resrc, segments = (yield maybeDeferred(resrc.locateChild, request, ['principals']))
+ resrc, segments = (yield maybeDeferred(
+ self.root.locateChild, request, ["principals"]
+ ))
+ resrc, segments = (yield maybeDeferred(
+ resrc.locateChild, request, ["principals"]
+ ))
self.assertTrue(request.checkedWiki)
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/sidecar/task.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/sidecar/task.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/sidecar/task.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -175,7 +175,7 @@
def __init__(self, root):
self.root = root
self.directory = root.directory
- self.seconds = 5 # How often to check for new tasks in incomingDir
+ self.seconds = 30 # How often to check for new tasks in incomingDir
self.taskDir = os.path.join(config.DataRoot, "tasks")
# New task files are placed into "incoming"
self.incomingDir = os.path.join(self.taskDir, "incoming")
@@ -196,7 +196,7 @@
def periodic(self, first=False):
- log.debug("Checking for tasks")
+ # log.debug("Checking for tasks")
deferreds = []
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tap/caldav.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tap/caldav.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tap/caldav.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -35,10 +35,12 @@
from zope.interface import implements
-from twisted.python.log import FileLogObserver
+from twisted.python.log import FileLogObserver, ILogObserver
+from twisted.python.logfile import LogFile
from twisted.python.usage import Options, UsageError
+from twisted.python.reflect import namedClass
from twisted.plugin import IPlugin
-from twisted.internet.reactor import callLater, spawnProcess
+from twisted.internet.reactor import callLater, addSystemEventTrigger
from twisted.internet.process import ProcessExitedAlready
from twisted.internet.protocol import Protocol, Factory
from twisted.application.internet import TCPServer, UNIXServer
@@ -51,6 +53,7 @@
from twext.python.log import logLevelForNamespace, setLogLevelForNamespace
from twext.internet.ssl import ChainingOpenSSLContextFactory
from twext.internet.tcp import MaxAcceptTCPServer, MaxAcceptSSLServer
+
from twext.web2.channel.http import LimitingHTTPFactory, SSLRedirectRequest
try:
@@ -63,6 +66,8 @@
from twistedcaldav.config import ConfigurationError
from twistedcaldav.config import config
from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
+from twistedcaldav.directory import calendaruserproxy
+from twistedcaldav.directory.calendaruserproxyloader import XMLCalendarUserProxyLoader
from twistedcaldav.localization import processLocalizationFiles
from twistedcaldav.mail import IMIPReplyInboxResource
from twistedcaldav.static import CalendarHomeProvisioningFile
@@ -72,6 +77,8 @@
from twistedcaldav.upgrade import upgradeData
from twistedcaldav.util import getNCPU
+from twext.web2.metafd import ConnectionLimiter, ReportingHTTPService
+
try:
from twistedcaldav.authkerb import NegotiateCredentialFactory
NegotiateCredentialFactory # pacify pyflakes
@@ -103,9 +110,30 @@
def __init__(self, logObserver):
self.logger = logObserver
-class CalDAVService (MultiService):
+
+class ErrorLoggingMultiService(MultiService):
+ """ Registers a rotating file logger for error logging, iff
+ config.ErrorLogEnabled is True. """
+
+ def setServiceParent(self, app):
+ MultiService.setServiceParent(self, app)
+
+ if config.ErrorLogEnabled:
+ errorLogFile = LogFile.fromFullPath(
+ config.ErrorLogFile,
+ rotateLength=config.ErrorLogRotateMB * 1024 * 1024,
+ maxRotatedFiles=config.ErrorLogMaxRotatedFiles
+ )
+ errorLogObserver = FileLogObserver(errorLogFile).emit
+
+ # Registering ILogObserver with the Application object
+ # gets our observer picked up within AppLogger.start( )
+ app.setComponent(ILogObserver, errorLogObserver)
+
+
+class CalDAVService (ErrorLoggingMultiService):
def __init__(self, logObserver):
- self.logObserver = logObserver
+ self.logObserver = logObserver # accesslog observer
MultiService.__init__(self)
def privilegedStartService(self):
@@ -237,14 +265,6 @@
if gid and gid != os.getgid():
gottaBeRoot()
- #
- # Ignore the logfile parameter if not daemonized and log to stdout.
- #
- if self.parent["nodaemon"]:
- self.parent["logfile"] = None
- else:
- self.parent["logfile"] = config.ErrorLogFile
-
self.parent["pidfile"] = config.PIDFile
@@ -426,20 +446,38 @@
if config.ProcessType in ('Combined', 'Single'):
+ # Memcached is not needed for the "master" process
+ if config.ProcessType in ('Combined',):
+ config.Memcached.Pools.Default.ClientEnabled = False
+
+ # Note: if the master process ever needs access to memcached
+ # we'll either have to start memcached prior to the
+ # updateProxyDB call below, or disable memcached
+ # client config only while updateProxyDB is running.
+
# Process localization string files
processLocalizationFiles(config.Localization)
# Now do any on disk upgrades we might need.
- # Memcache isn't running at this point, so temporarily change
- # the config so nobody tries to talk to it while upgrading
- memcacheSetting = config.Memcached.Pools.Default.ClientEnabled
- config.Memcached.Pools.Default.ClientEnabled = False
upgradeData(config)
- config.Memcached.Pools.Default.ClientEnabled = memcacheSetting
+ # Make sure proxies get initialized
+ if config.ProxyLoadFromFile:
+ def _doProxyUpdate():
+ proxydbClass = namedClass(config.ProxyDBService.type)
+ calendaruserproxy.ProxyDBService = proxydbClass(**config.ProxyDBService.params)
+ loader = XMLCalendarUserProxyLoader(config.ProxyLoadFromFile)
+ return loader.updateProxyDB()
+ addSystemEventTrigger("after", "startup", _doProxyUpdate)
- service = serviceMethod(options)
+
+ try:
+ service = serviceMethod(options)
+ except ConfigurationError, e:
+ sys.stderr.write("Configuration error: %s\n" % (e,))
+ sys.exit(1)
+
#
# Note: if there is a stopped process in the same session
# as the calendar server and the calendar server is the
@@ -482,6 +520,20 @@
return service
+ def createContextFactory(self):
+ """
+ Create an SSL context factory for use with any SSL socket talking to
+ this server.
+ """
+ return ChainingOpenSSLContextFactory(
+ config.SSLPrivateKey,
+ config.SSLCertificate,
+ certificateChainFile=config.SSLAuthorityChain,
+ passwdCallback=getSSLPassphrase,
+ sslmethod=getattr(OpenSSL.SSL, config.SSLMethod),
+ )
+
+
def makeService_Slave(self, options):
#
# Change default log level to "info" as its useful to have
@@ -520,7 +572,7 @@
config.AccessLogFile,
)
- self.log_info("Configuring log observer: %s" % (logObserver,))
+ self.log_info("Configuring access log observer: %s" % (logObserver,))
service = CalDAVService(logObserver)
@@ -549,21 +601,16 @@
redirectFactory.maxRequests = configDict.MaxRequests
redirectFactory.maxAccepts = configDict.MaxAccepts
- config.addPostUpdateHook(updateFactory)
+ config.addPostUpdateHooks((updateFactory,))
if config.InheritFDs or config.InheritSSLFDs:
+ # Inherit sockets to call accept() on them individually.
for fd in config.InheritSSLFDs:
fd = int(fd)
try:
- contextFactory = ChainingOpenSSLContextFactory(
- config.SSLPrivateKey,
- config.SSLCertificate,
- certificateChainFile=config.SSLAuthorityChain,
- passwdCallback=getSSLPassphrase,
- sslmethod=getattr(OpenSSL.SSL, config.SSLMethod),
- )
+ contextFactory = self.createContextFactory()
except SSLError, e:
log.error("Unable to set up SSL context factory: %s" % (e,))
else:
@@ -589,7 +636,16 @@
inherit=True
).setServiceParent(service)
+ elif config.MetaFD:
+ # Inherit a single socket to receive accept()ed connections via
+ # recvmsg() and SCM_RIGHTS.
+ fd = int(config.MetaFD)
+
+ ReportingHTTPService(
+ site, fd, self.createContextFactory()
+ ).setServiceParent(service)
+
else: # Not inheriting, therefore we open our own:
if not config.BindAddresses:
@@ -617,13 +673,7 @@
% (bindAddress, port))
try:
- contextFactory = ChainingOpenSSLContextFactory(
- config.SSLPrivateKey,
- config.SSLCertificate,
- certificateChainFile=config.SSLAuthorityChain,
- passwdCallback=getSSLPassphrase,
- sslmethod=getattr(OpenSSL.SSL, config.SSLMethod),
- )
+ contextFactory = self.createContextFactory()
except SSLError, e:
self.log_error("Unable to set up SSL context factory: %s"
% (e,))
@@ -670,7 +720,7 @@
makeService_Single = makeService_Slave
def makeService_Combined(self, options):
- s = MultiService()
+ s = ErrorLoggingMultiService()
# Make sure no old socket files are lying around.
self.deleteStaleSocketFiles()
@@ -682,7 +732,10 @@
RotatingFileAccessLoggingObserver(config.AccessLogFile)
)
if config.GroupName:
- gid = getgrnam(config.GroupName).gr_gid
+ try:
+ gid = getgrnam(config.GroupName).gr_gid
+ except KeyError, e:
+ raise ConfigurationError("Invalid group name: %s" % (config.GroupName,))
else:
gid = os.getgid()
if config.ControlSocket:
@@ -744,7 +797,13 @@
inheritFDs = []
inheritSSLFDs = []
- s._inheritedSockets = [] # keep a reference to these so they don't close
+ if config.UseMetaFD:
+ cl = ConnectionLimiter(config.MaxAccepts,
+ (config.MaxRequests *
+ config.MultiProcess.ProcessCount))
+ cl.setServiceParent(s)
+ else:
+ s._inheritedSockets = [] # keep a reference to these so they don't close
for bindAddress in config.BindAddresses:
if config.BindHTTPPorts:
@@ -763,40 +822,46 @@
elif config.SSLPort != 0:
config.BindSSLPorts = [config.SSLPort]
- def _openSocket(addr, port):
- log.info("Opening socket for inheritance at %s:%d" % (addr, port))
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sock.setblocking(0)
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- sock.bind((addr, port))
- sock.listen(config.ListenBacklog)
- s._inheritedSockets.append(sock)
- return sock
+ if config.UseMetaFD:
+ for ports, description in [(config.BindSSLPorts, "SSL"),
+ (config.BindHTTPPorts, "TCP")]:
+ for port in ports:
+ cl.addPortService(description, port, bindAddress, config.ListenBacklog)
+ else:
+ def _openSocket(addr, port):
+ log.info("Opening socket for inheritance at %s:%d" % (addr, port))
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.setblocking(0)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock.bind((addr, port))
+ sock.listen(config.ListenBacklog)
+ s._inheritedSockets.append(sock)
+ return sock
- for portNum in config.BindHTTPPorts:
- sock = _openSocket(bindAddress, int(portNum))
- inheritFDs.append(sock.fileno())
+ for portNum in config.BindHTTPPorts:
+ sock = _openSocket(bindAddress, int(portNum))
+ inheritFDs.append(sock.fileno())
- for portNum in config.BindSSLPorts:
- sock = _openSocket(bindAddress, int(portNum))
- inheritSSLFDs.append(sock.fileno())
+ for portNum in config.BindSSLPorts:
+ sock = _openSocket(bindAddress, int(portNum))
+ inheritSSLFDs.append(sock.fileno())
-
for p in xrange(0, config.MultiProcess.ProcessCount):
+ if config.UseMetaFD:
+ extraArgs = dict(dispatcher=cl.dispatcher)
+ else:
+ extraArgs = dict(inheritFDs=inheritFDs,
+ inheritSSLFDs=inheritSSLFDs)
process = TwistdSlaveProcess(
sys.argv[0],
self.tapname,
options["config"],
p,
config.BindAddresses,
- inheritFDs=inheritFDs,
- inheritSSLFDs=inheritSSLFDs
+ **extraArgs
)
-
monitor.addProcessObject(process, parentEnv)
-
-
for name, pool in config.Memcached.Pools.items():
if pool.ServerEnabled:
self.log_info("Adding memcached service for pool: %s" % (name,))
@@ -920,10 +985,28 @@
class TwistdSlaveProcess(object):
+ """
+ A L{TwistdSlaveProcess} is information about how to start a slave process
+ running a C{twistd} plugin, to be used by
+ L{DelayedStartupProcessMonitor.addProcessObject}.
+
+ @ivar inheritFDs: File descriptors to be inherited for calling accept() on
+ in the subprocess.
+ @type inheritFDs: C{list} of C{int}, or C{None}
+
+ @ivar inheritSSLFDs: File descriptors to be inherited for calling accept()
+ on in the subprocess, and speaking TLS on the resulting sockets.
+ @type inheritSSLFDs: C{list} of C{int}, or C{None}
+
+ @ivar dispatcher: a socket dispatcher to generate an inherited port from,
+ or C{None}.
+
+ @type dispatcher: L{InheritedSocketDispatcher} or C{NoneType}
+ """
prefix = "caldav"
def __init__(self, twistd, tapname, configFile, id, interfaces,
- inheritFDs=None, inheritSSLFDs=None):
+ inheritFDs=None, inheritSSLFDs=None, dispatcher=None):
self.twistd = twistd
@@ -932,16 +1015,52 @@
self.configFile = configFile
self.id = id
+ def emptyIfNone(x):
+ if x is None:
+ return []
+ else:
+ return x
+ self.inheritFDs = emptyIfNone(inheritFDs)
+ self.inheritSSLFDs = emptyIfNone(inheritSSLFDs)
+ self.metaSocket = None
+ self.dispatcher = dispatcher
- self.inheritFDs = inheritFDs
- self.inheritSSLFDs = inheritSSLFDs
-
self.interfaces = interfaces
def getName(self):
return '%s-%s' % (self.prefix, self.id)
+
+ def getMetaDescriptor(self):
+ """
+ Get the meta-socket file descriptor to inherit.
+ """
+ if self.metaSocket is None:
+ self.metaSocket = self.dispatcher.addSocket()
+ return self.metaSocket.fileno()
+
+
+ def getFileDescriptors(self):
+ """
+ @return: a mapping of file descriptor numbers for the new (child)
+ process to file descriptor numbers in the current (master) process.
+ """
+ fds = {}
+ maybeMetaFD = []
+ if self.dispatcher is not None:
+ maybeMetaFD.append(self.getMetaDescriptor())
+ for fd in self.inheritSSLFDs + self.inheritFDs + maybeMetaFD:
+ fds[fd] = fd
+ return fds
+
+
def getCommandLine(self):
+ """
+ @return: a list of command-line arguments, including the executable to
+ be used to start this subprocess.
+
+ @rtype: C{list} of C{str}
+ """
args = [sys.executable, self.twistd]
if config.UserName:
@@ -965,6 +1084,7 @@
"-o", "BindAddresses=%s" % (",".join(self.interfaces),),
"-o", "PIDFile=None",
"-o", "ErrorLogFile=None",
+ "-o", "ErrorLogEnabled=False",
"-o", "LogID=%s" % (self.id,),
"-o", "MultiProcess/ProcessCount=%d"
% (config.MultiProcess.ProcessCount,),
@@ -982,6 +1102,13 @@
"-o", "InheritSSLFDs=%s" % (",".join(map(str, self.inheritSSLFDs)),)
])
+ if self.dispatcher is not None:
+ # XXX this FD is never closed in the parent. should it be?
+ # (should they *all* be?) -glyph
+ args.extend([
+ "-o", "MetaFD=%s" % (self.getMetaDescriptor(),)
+ ])
+
return args
@@ -996,17 +1123,46 @@
config.ControlPort = self._port.getHost().port
class DelayedStartupProcessMonitor(procmon.ProcessMonitor):
+ """
+ A L{DelayedStartupProcessMonitor} is a L{procmon.ProcessMonitor} that
+ defers building its command lines until the service is actually ready to
+ start. It also specializes process-starting to allow for process objects
+ to determine their arguments as they are started up rather than entirely
+ ahead of time.
+ @ivar processObjects: a C{list} of L{TwistdSlaveProcess} to add using
+ C{self.addProcess} when this service starts up.
+
+ @ivar _extraFDs: a mapping from process names to extra file-descriptor
+ maps. (By default, all processes will have the standard stdio mapping,
+ so all file descriptors here should be >2.) This is updated during
+ L{DelayedStartupProcessMonitor.startService}, by inspecting the result
+ of L{TwistdSlaveProcess.getFileDescriptors}.
+
+ @ivar reactor: an L{IReactorProcess} for spawning processes, defaulting to
+ the global reactor.
+ """
+
def __init__(self, *args, **kwargs):
procmon.ProcessMonitor.__init__(self, *args, **kwargs)
-
- # processObjects stores TwistdSlaveProcesses which need to have their
- # command-lines determined just in time
self.processObjects = []
+ self._extraFDs = {}
+ from twisted.internet import reactor
+ self.reactor = reactor
+
def addProcessObject(self, process, env):
+ """
+ Add a process object to be run when this service is started.
+
+ @param env: a dictionary of environment variables.
+
+ @param process: a L{TwistdSlaveProcesses} object to be started upon
+ service startup.
+ """
self.processObjects.append((process, env))
+
def startService(self):
Service.startService(self)
@@ -1014,11 +1170,13 @@
# processes to procmon. This step must be done prior to setting
# active to 1
for processObject, env in self.processObjects:
+ name = processObject.getName()
self.addProcess(
- processObject.getName(),
+ name,
processObject.getCommandLine(),
env=env
)
+ self._extraFDs[name] = processObject.getFileDescriptors()
self.active = 1
delay = 0
@@ -1084,18 +1242,15 @@
childFDs = { 0 : "w", 1 : "r", 2 : "r" }
- # Examine args for -o InheritFDs= and -o InheritSSLFDs=
- # Add any file descriptors listed in those args to the childFDs
- # dictionary so those don't get closed across the spawn.
- for i in xrange(len(args)-1):
- if args[i] == "-o" and args[i+1].startswith("Inherit"):
- for fd in map(int, args[i+1].split("=")[1].split(",")):
- childFDs[fd] = fd
+ childFDs.update(self._extraFDs.get(name, {}))
- spawnProcess(p, args[0], args, uid=uid, gid=gid, env=env,
- childFDs=childFDs)
+ self.reactor.spawnProcess(
+ p, args[0], args, uid=uid, gid=gid, env=env,
+ childFDs=childFDs
+ )
+
class DelayedStartupLineLogger(object):
"""
A line logger that can handle very long lines.
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tap/test/test_caldav.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tap/test/test_caldav.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tap/test/test_caldav.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -21,6 +21,8 @@
from os.path import dirname, abspath
+from zope.interface import implements
+
from twisted.trial.unittest import TestCase as BaseTestCase
from twisted.python.threadable import isInIOThread
@@ -30,7 +32,8 @@
from twisted.python import log
from twisted.internet.protocol import ServerFactory
-from twisted.internet.defer import Deferred
+from twisted.internet.defer import Deferred, inlineCallbacks
+from twisted.internet.interfaces import IProcessTransport, IReactorProcess
from twisted.application.service import IService
from twisted.application import internet
@@ -50,16 +53,76 @@
from twistedcaldav.directory.directory import UnknownRecordTypeError
from twistedcaldav.test.util import TestCase
-from calendarserver.tap.caldav import (CalDAVOptions, CalDAVServiceMaker,
- CalDAVService, GroupOwnedUNIXServer,
- DelayedStartupProcessMonitor,
- DelayedStartupLineLogger)
+from calendarserver.tap.caldav import (
+ CalDAVOptions, CalDAVServiceMaker, CalDAVService, GroupOwnedUNIXServer,
+ DelayedStartupProcessMonitor, DelayedStartupLineLogger, TwistdSlaveProcess
+)
# Points to top of source tree.
sourceRoot = dirname(dirname(dirname(dirname(abspath(__file__)))))
+class NotAProcessTransport(object):
+ """
+ Simple L{IProcessTransport} stub.
+ """
+ implements(IProcessTransport)
+
+ def __init__(self, processProtocol, executable, args, env, path,
+ uid, gid, usePTY, childFDs):
+ """
+ Hold on to all the attributes passed to spawnProcess.
+ """
+ self.processProtocol = processProtocol
+ self.executable = executable
+ self.args = args
+ self.env = env
+ self.path = path
+ self.uid = uid
+ self.gid = gid
+ self.usePTY = usePTY
+ self.childFDs = childFDs
+
+
+class InMemoryProcessSpawner(object):
+ """
+ Stub out L{IReactorProcess.spawnProcess} so that we can examine the
+ interaction of L{DelayedStartupProcessMonitor} and the reactor.
+ """
+ implements(IReactorProcess)
+
+ def __init__(self):
+ """
+ Create some storage to hold on to all the fake processes spawned.
+ """
+ self.processTransports = []
+ self.waiting = []
+
+ def waitForOneProcess(self):
+ """
+ Return a L{Deferred} which will fire when spawnProcess has been
+ invoked, with the L{IProcessTransport}.
+ """
+ d = Deferred()
+ self.waiting.append(d)
+ return d
+
+ def spawnProcess(self, processProtocol, executable, args=(), env={},
+ path=None, uid=None, gid=None, usePTY=0,
+ childFDs=None):
+
+ transport = NotAProcessTransport(
+ processProtocol, executable, args, env, path, uid, gid, usePTY,
+ childFDs
+ )
+ self.processTransports.append(transport)
+ if self.waiting:
+ self.waiting.pop(0).callback(transport)
+ return transport
+
+
+
class TestCalDAVOptions (CalDAVOptions):
"""
A fake implementation of CalDAVOptions that provides
@@ -136,13 +199,11 @@
argv = [
"-f", myConfigFile,
- "-o", "ErrorLogFile=/dev/null",
"-o", "PIDFile=/dev/null",
]
self.config.parseOptions(argv)
- self.assertEquals(self.config.parent["logfile"], "/dev/null")
self.assertEquals(self.config.parent["pidfile"], "/dev/null")
def test_specifyConfigFile(self):
@@ -233,7 +294,8 @@
self.config.SudoersFile = ""
- self.config.update(self.configOptions if self.configOptions else {})
+ if self.configOptions:
+ self.config.update(self.configOptions)
os.mkdir(self.config.ServerRoot)
os.mkdir(os.path.join(self.config.ServerRoot, self.config.DocumentRoot))
@@ -799,6 +861,8 @@
class DummyProcessObject(object):
"""
Simple stub for the Process Object API that will run a test script.
+
+ This is a stand in for L{TwistdSlaveProcess}.
"""
def __init__(self, scriptname, *args):
@@ -810,9 +874,17 @@
"""
Get the command line to invoke this script.
"""
- return [sys.executable, FilePath(__file__).sibling(self.scriptname).path] + self.args
+ return [sys.executable,
+ FilePath(__file__).sibling(self.scriptname).path] + self.args
+ def getFileDescriptors(self):
+ """
+ Return a dummy, empty mapping of file descriptors.
+ """
+ return {}
+
+
def getName(self):
"""
Get a dummy name.
@@ -874,3 +946,79 @@
return d
+ @inlineCallbacks
+ def test_acceptDescriptorInheritance(self):
+ """
+ If a L{TwistdSlaveProcess} specifies some file descriptors to be
+ inherited, they should be inherited by the subprocess.
+ """
+ dspm = DelayedStartupProcessMonitor()
+ dspm.reactor = InMemoryProcessSpawner()
+
+ # Most arguments here will be ignored, so these are bogus values.
+ slave = TwistdSlaveProcess(
+ twistd = "bleh",
+ tapname = "caldav",
+ configFile = "/does/not/exist",
+ id = 10,
+ interfaces = '127.0.0.1',
+ inheritFDs = [3, 7],
+ inheritSSLFDs = [19, 25],
+ )
+
+ dspm.addProcessObject(slave, {})
+ dspm.startService()
+ self.addCleanup(dspm.consistency.cancel)
+ # We can easily stub out spawnProcess, because caldav calls it, but a
+ # bunch of callLater calls are buried in procmon itself, so we need to
+ # use the real clock.
+ oneProcessTransport = yield dspm.reactor.waitForOneProcess()
+ self.assertEquals(oneProcessTransport.childFDs,
+ {0: 'w', 1: 'r', 2: 'r',
+ 3: 3, 7: 7,
+ 19: 19, 25: 25})
+ @inlineCallbacks
+ def test_metaDescriptorInheritance(self):
+ """
+ If a L{TwistdSlaveProcess} specifies a meta-file-descriptor to be
+ inherited, it should be inherited by the subprocess, and a
+ configuration argument should be passed that indicates to the
+ subprocess.
+ """
+ dspm = DelayedStartupProcessMonitor()
+ dspm.reactor = InMemoryProcessSpawner()
+ class FakeFD:
+ def __init__(self, n):
+ self.fd = n
+ def fileno(self):
+ return self.fd
+
+ class FakeDispatcher:
+ n = 3
+ def addSocket(self):
+ self.n += 1
+ return FakeFD(self.n)
+
+ # Most arguments here will be ignored, so these are bogus values.
+ slave = TwistdSlaveProcess(
+ twistd = "bleh",
+ tapname = "caldav",
+ configFile = "/does/not/exist",
+ id = 10,
+ interfaces = '127.0.0.1',
+ dispatcher = FakeDispatcher()
+ )
+
+ dspm.addProcessObject(slave, {})
+ dspm.startService()
+ self.addCleanup(dspm.consistency.cancel)
+ oneProcessTransport = yield dspm.reactor.waitForOneProcess()
+ self.assertIn("MetaFD=4", oneProcessTransport.args)
+ self.assertEquals(
+ oneProcessTransport.args[oneProcessTransport.args.index("MetaFD=4")-1],
+ '-o',
+ "MetaFD argument was not passed as an option"
+ )
+ self.assertEquals(oneProcessTransport.childFDs,
+ {0: 'w', 1: 'r', 2: 'r',
+ 4: 4})
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tap/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tap/util.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tap/util.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -38,7 +38,6 @@
from twistedcaldav import memcachepool
from twistedcaldav.directory import augment, calendaruserproxy
from twistedcaldav.directory.aggregate import AggregateDirectoryService
-from twistedcaldav.directory.calendaruserproxyloader import XMLCalendarUserProxyLoader
from twistedcaldav.directory.digest import QopDigestCredentialFactory
from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
from twistedcaldav.directory.sudo import SudoDirectoryService
@@ -180,16 +179,6 @@
raise
#
- # Make sure proxies get initialized
- #
- if config.ProxyLoadFromFile:
- def _doProxyUpdate():
- loader = XMLCalendarUserProxyLoader(config.ProxyLoadFromFile)
- return loader.updateProxyDB()
-
- reactor.addSystemEventTrigger("after", "startup", _doProxyUpdate)
-
- #
# Configure Memcached Client Pool
#
memcachepool.installPools(
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/gateway.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/gateway.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/gateway.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -213,6 +213,8 @@
respondWithError(str(e))
return
+ kwargs['guid'] = record.guid
+
principal = self.dir.principalCollection.principalForRecord(record)
(yield principal.setAutoSchedule(command.get('AutoSchedule', True)))
@@ -289,6 +291,8 @@
respondWithError(str(e))
return
+ kwargs['guid'] = record.guid
+
principal = self.dir.principalCollection.principalForRecord(record)
(yield principal.setAutoSchedule(command.get('AutoSchedule', True)))
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/principals.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/principals.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/principals.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -51,7 +51,7 @@
name = os.path.basename(sys.argv[0])
print "usage: %s [options] action_flags principal [principal ...]" % (name,)
print " %s [options] --list-principal-types" % (name,)
- #print " %s [options] --list-principals type" % (name,)
+ print " %s [options] --list-principals type" % (name,)
print ""
print " Performs the given actions against the giving principals."
print ""
@@ -65,9 +65,9 @@
print " -f --config <path>: Specify caldavd.plist configuration path"
print ""
print "actions:"
- #print " --search <search-string>: search for matching resources"
+ print " --search <search-string>: search for matching principals"
print " --list-principal-types: list all of the known principal types"
- #print " --list-principals=type: list all principals of the given type"
+ print " --list-principals type: list all principals of the given type"
print " --read-property=property: read DAV property (eg.: {DAV:}group-member-set)"
print " --list-read-proxies: list proxies with read-only access"
print " --list-write-proxies: list proxies with read-write access"
@@ -89,7 +89,7 @@
sys.argv[1:], "hf:P:", [
"help",
"config=",
- #"search=",
+ "search=",
"list-principal-types",
"list-principals=",
"read-property=",
@@ -112,6 +112,7 @@
configFileName = None
listPrincipalTypes = False
listPrincipals = None
+ searchPrincipals = None
principalActions = []
for opt, arg in optargs:
@@ -127,6 +128,9 @@
elif opt in ("", "--list-principals"):
listPrincipals = arg
+ elif opt in ("", "--search"):
+ searchPrincipals = arg
+
elif opt in ("", "--read-property"):
try:
qname = sname2qname(arg)
@@ -223,39 +227,49 @@
return
- if listPrincipals:
+ elif listPrincipals:
if args:
usage("Too many arguments")
try:
- for record in config.directory.listRecords(listPrincipals):
- print record
+ results = [(record.fullName, record.guid) for record in config.directory.listRecords(listPrincipals)]
+ results.sort()
+ for name, guid in results:
+ print '%s %s' % (name, guid)
except UnknownRecordTypeError, e:
usage(e)
return
- #
- # Do a quick sanity check that arguments look like principal
- # identifiers.
- #
- if not args:
- usage("No principals specified.")
+ elif searchPrincipals:
+ params = (runSearch, searchPrincipals)
- for arg in args:
- try:
- principalForPrincipalID(arg, checkOnly=True)
- except ValueError, e:
- abort(e)
+ else:
+ #
+ # Do a quick sanity check that arguments look like principal
+ # identifiers.
+ #
+ if not args:
+ usage("No principals specified.")
+ for arg in args:
+ try:
+ principalForPrincipalID(arg, checkOnly=True)
+ except ValueError, e:
+ abort(e)
+
+ params = (runPrincipalActions, args, principalActions)
+
#
# Start the reactor
#
- reactor.callLater(0, run, args, principalActions)
+ reactor.callLater(0, *params)
reactor.run()
+
+
@inlineCallbacks
-def run(principalIDs, actions):
+def runPrincipalActions(principalIDs, actions):
try:
for principalID in principalIDs:
# Resolve the given principal IDs to principals
@@ -279,7 +293,44 @@
#
reactor.stop()
+ at inlineCallbacks
+def runSearch(searchTerm):
+ try:
+ fields = []
+ for fieldName in ("fullName", "firstName", "lastName", "emailAddresses"):
+ fields.append((fieldName, searchTerm, True, "contains"))
+
+ records = list((yield config.directory.recordsMatchingFields(fields)))
+ if records:
+ records.sort(key=operator.attrgetter('fullName'))
+ print "%d matches found:" % (len(records),)
+ for record in records:
+ print "\n%s (%s)" % (record.fullName,
+ { "users" : "User",
+ "groups" : "Group",
+ "locations" : "Place",
+ "resources" : "Resource",
+ }.get(record.recordType),
+ )
+ print " GUID: %s" % (record.guid,)
+ print " Record name(s): %s" % (", ".join(record.shortNames),)
+ if record.authIDs:
+ print " Auth ID(s): %s" % (", ".join(record.authIDs),)
+ if record.emailAddresses:
+ print " Email(s): %s" % (", ".join(record.emailAddresses),)
+ else:
+ print "No matches found"
+
+ print ""
+
+ finally:
+ #
+ # Stop the reactor
+ #
+ reactor.stop()
+
+
def principalForPrincipalID(principalID, checkOnly=False, directory=None):
# Allow a directory parameter to be passed in, but default to config.directory
@@ -486,46 +537,26 @@
)
@inlineCallbacks
-def _run(directory, root, optargs, principalIDs):
+def action_searchPrincipals(principal, *proxyTypes):
+ for proxyType in proxyTypes:
+ subPrincipal = proxySubprincipal(principal, proxyType)
+ if subPrincipal is None:
+ print "No %s proxies for %s" % (proxyType, principal)
+ continue
- print sys.path
+ membersProperty = (yield subPrincipal.readProperty(davxml.GroupMemberSet, None))
- print ""
+ if membersProperty.children:
+ print "%s proxies for %s:" % (
+ {"read": "Read-only", "write": "Read/write"}[proxyType],
+ principal,
+ )
+ for member in membersProperty.children:
+ print " *", member
+ else:
+ print "No %s proxies for %s" % (proxyType, principal)
- for opt, arg in optargs:
- if opt in ("-s", "--search",):
- fields = []
- for fieldName in ("fullName", "firstName", "lastName",
- "emailAddresses"):
- fields.append((fieldName, arg, True, "contains"))
-
- records = list((yield config.directory.recordsMatchingFields(fields)))
- if records:
- records.sort(key=operator.attrgetter('fullName'))
- print "%d matches found:" % (len(records),)
- for record in records:
- print "\n%s (%s)" % (record.fullName,
- { "users" : "User",
- "groups" : "Group",
- "locations" : "Place",
- "resources" : "Resource",
- }.get(record.recordType),
- )
- print record.guid
- print " Record names: %s" % (", ".join(record.shortNames),)
- if record.authIDs:
- print " Auth IDs: %s" % (", ".join(record.authIDs),)
- if record.emailAddresses:
- print " Emails: %s" % (", ".join(record.emailAddresses),)
- else:
- print "No matches found"
-
- print ""
-
- # reactor.callLater(0, reactor.stop)
- reactor.stop()
-
def abort(msg, status=1):
sys.stdout.write("%s\n" % (msg,))
try:
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/purge.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/purge.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/purge.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -16,21 +16,23 @@
# limitations under the License.
##
-from pwd import getpwnam
-from twisted.python.util import switchUID
-from twistedcaldav.directory.directory import DirectoryError
-from grp import getgrnam
from calendarserver.tap.util import FakeRequest
from calendarserver.tap.util import getRootResource
+from calendarserver.tools.principals import removeProxy
from calendarserver.tools.util import loadConfig, setupMemcached, setupNotifications
-from datetime import date, timedelta
+from datetime import date, timedelta, datetime
from getopt import getopt, GetoptError
+from grp import getgrnam
+from pwd import getpwnam
from twext.python.log import Logger
+from twext.web2.dav import davxml
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.python.util import switchUID
from twistedcaldav import caldavxml
from twistedcaldav.caldavxml import TimeRange
from twistedcaldav.config import config, ConfigurationError
+from twistedcaldav.directory.directory import DirectoryError, DirectoryRecord
from twistedcaldav.method.delete_common import DeleteResource
from twistedcaldav.query import queryfilter
import os
@@ -38,7 +40,7 @@
log = Logger()
-def usage(e=None):
+def usage_purge_events(e=None):
name = os.path.basename(sys.argv[0])
print "usage: %s [options]" % (name,)
@@ -58,10 +60,64 @@
else:
sys.exit(0)
+def usage_purge_principal(e=None):
-def main():
+ name = os.path.basename(sys.argv[0])
+ print "usage: %s [options]" % (name,)
+ print ""
+ print " Remove a principal's events from the calendar server"
+ print ""
+ print "options:"
+ print " -f --config <path>: Specify caldavd.plist configuration path"
+ print " -h --help: print this help and exit"
+ print " -n --dry-run: only calculate how many events to purge"
+ print " -v --verbose: print progress information"
+ print ""
+ if e:
+ sys.exit(64)
+ else:
+ sys.exit(0)
+
+
+
+def shared_main(configFileName, method, *args, **kwds):
+
try:
+ loadConfig(configFileName)
+
+ # Shed privileges
+ if config.UserName and config.GroupName and os.getuid() == 0:
+ uid = getpwnam(config.UserName).pw_uid
+ gid = getgrnam(config.GroupName).gr_gid
+ switchUID(uid, uid, gid)
+
+ os.umask(config.umask)
+
+ try:
+ rootResource = getRootResource(config)
+ directory = rootResource.getDirectory()
+ except DirectoryError, e:
+ print "Error: %s" % (e,)
+ return
+ setupMemcached(config)
+ setupNotifications(config)
+ except ConfigurationError, e:
+ print "Error: %s" % (e,)
+ return
+
+
+ #
+ # Start the reactor
+ #
+ reactor.callLater(0.1, callThenStop, method, directory,
+ rootResource, *args, **kwds)
+
+ reactor.run()
+
+def main_purge_events():
+
+ try:
(optargs, args) = getopt(
sys.argv[1:], "d:f:hnv", [
"days=",
@@ -72,7 +128,7 @@
],
)
except GetoptError, e:
- usage(e)
+ usage_purge_events(e)
#
# Get configuration
@@ -84,14 +140,14 @@
for opt, arg in optargs:
if opt in ("-h", "--help"):
- usage()
+ usage_purge_events()
elif opt in ("-d", "--days"):
try:
days = int(arg)
except ValueError, e:
print "Invalid value for --days: %s" % (arg,)
- usage(e)
+ usage_purge_events(e)
elif opt in ("-v", "--verbose"):
verbose = True
@@ -105,46 +161,59 @@
else:
raise NotImplementedError(opt)
- try:
- loadConfig(configFileName)
+ cutoff = (date.today()-timedelta(days=days)).strftime("%Y%m%dT000000Z")
- # Shed privileges
- if config.UserName and config.GroupName and os.getuid() == 0:
- uid = getpwnam(config.UserName).pw_uid
- gid = getgrnam(config.GroupName).gr_gid
- switchUID(uid, uid, gid)
+ shared_main(configFileName, purgeOldEvents, cutoff, verbose=verbose,
+ dryrun=dryrun)
- os.umask(config.umask)
- try:
- rootResource = getRootResource(config)
- directory = rootResource.getDirectory()
- except DirectoryError, e:
- print "Error: %s" % (e,)
- return
- setupMemcached(config)
- setupNotifications(config)
- except ConfigurationError, e:
- print "Error: %s" % (e,)
- return
+def main_purge_principals():
- cutoff = (date.today() - timedelta(days=days)).strftime("%Y%m%dT000000Z")
+ try:
+ (optargs, args) = getopt(
+ sys.argv[1:], "f:hnv", [
+ "dry-run",
+ "config=",
+ "help",
+ "verbose",
+ ],
+ )
+ except GetoptError, e:
+ usage_purge_principal(e)
#
- # Start the reactor
+ # Get configuration
#
- reactor.callLater(0.1, purgeThenStop, directory, rootResource, cutoff,
- verbose=verbose, dryrun=dryrun)
+ configFileName = None
+ dryrun = False
+ verbose = False
- reactor.run()
+ for opt, arg in optargs:
+ if opt in ("-h", "--help"):
+ usage_purge_principal()
+ elif opt in ("-v", "--verbose"):
+ verbose = True
+
+ elif opt in ("-n", "--dry-run"):
+ dryrun = True
+
+ elif opt in ("-f", "--config"):
+ configFileName = arg
+
+ else:
+ raise NotImplementedError(opt)
+
+ # args is a list of guids
+
+ shared_main(configFileName, purgeGUIDs, args, verbose=verbose, dryrun=dryrun)
+
+
@inlineCallbacks
-def purgeThenStop(directory, rootResource, cutoff, verbose=False, dryrun=False):
- exitCode = 0
+def callThenStop(method, *args, **kwds):
try:
- count = (yield purgeOldEvents(directory, rootResource, cutoff,
- verbose=verbose, dryrun=dryrun))
- if dryrun:
+ count = (yield method(*args, **kwds))
+ if kwds.get("dryrun", False):
print "Would have purged %d events" % (count,)
else:
print "Purged %d events" % (count,)
@@ -167,6 +236,9 @@
print "Scanning calendar homes ...",
records = []
+ calendars = root.getChild("calendars")
+ uidsFPath = calendars.fp.child("__uids__")
+
if uidsFPath.exists():
for firstFPath in uidsFPath.children():
if len(firstFPath.basename()) == 2:
@@ -177,6 +249,7 @@
record = directory.recordWithUID(uid)
if record is not None:
records.append(record)
+
if verbose:
print "%d calendar homes found" % (len(records),)
@@ -215,6 +288,8 @@
# ...and ignore those that appear *after* the given cutoff
for name, uid, type in collection.index().indexedSearch(filter):
+ if isinstance(name, unicode):
+ name = name.encode("utf-8")
if name in resources:
resources.remove(name)
@@ -227,7 +302,8 @@
)
try:
if not dryrun:
- (yield deleteResource(root, collection, resource, uri))
+ (yield deleteResource(root, collection, resource,
+ uri, record.guid))
eventCount += 1
homeEventCount += 1
except Exception, e:
@@ -240,12 +316,116 @@
returnValue(eventCount)
-def deleteResource(root, collection, resource, uri):
+def deleteResource(root, collection, resource, uri, guid, implicit=False):
request = FakeRequest(root, "DELETE", uri)
+ request.authnUser = request.authzUser = davxml.Principal(
+ davxml.HRef.fromString("/principals/__uids__/%s/" % (guid,))
+ )
# TODO: this seems hacky, even for a stub request:
request._rememberResource(resource, uri)
deleter = DeleteResource(request, resource, uri,
- collection, "infinity", allowImplicitSchedule=False)
+ collection, "infinity", allowImplicitSchedule=implicit)
return deleter.run()
+
+
+ at inlineCallbacks
+def purgeGUIDs(directory, root, guids, verbose=False, dryrun=False):
+ total = 0
+
+ for guid in guids:
+ count, allAssignments[guid] = (yield purgeGUID(guid, directory, root,
+ verbose=verbose, dryrun=dryrun))
+ total += count
+
+
+ # TODO: figure out what to do with the purged proxy assignments...
+ # ...print to stdout?
+ # ...save in a file?
+
+ returnValue(total)
+
+
+ at inlineCallbacks
+def purgeGUID(guid, directory, root, verbose=False, dryrun=False):
+
+ # Does the record exist?
+ record = directory.recordWithGUID(guid)
+ if record is None:
+ # The user has already been removed from the directory service. We
+ # need to fashion a temporary, fake record
+
+ # FIXME: probaby want a more elegant way to accomplish this,
+ # since it requires the aggregate directory to examine these first:
+ record = DirectoryRecord(directory, "users", guid, shortNames=(guid,),
+ enabledForCalendaring=True)
+ record.enabled = True
+ directory._tmpRecords["shortNames"][guid] = record
+ directory._tmpRecords["guids"][guid] = record
+
+ principalCollection = directory.principalCollection
+ principal = principalCollection.principalForRecord(record)
+ calendarHome = principal.calendarHome()
+
+ # Anything in the past is left alone
+ now = datetime.utcnow().strftime("%Y%m%dT%H%M%SZ")
+ filter = caldavxml.Filter(
+ caldavxml.ComponentFilter(
+ caldavxml.ComponentFilter(
+ TimeRange(start=now,),
+ name=("VEVENT", "VFREEBUSY", "VAVAILABILITY"),
+ ),
+ name="VCALENDAR",
+ )
+ )
+ filter = queryfilter.Filter(filter)
+
+ count = 0
+
+ for collName in calendarHome.listChildren():
+ collection = calendarHome.getChild(collName)
+ if collection.isCalendarCollection():
+
+ for name, uid, type in collection.index().indexedSearch(filter):
+ if isinstance(name, unicode):
+ name = name.encode("utf-8")
+ resource = collection.getChild(name)
+ uri = "/calendars/__uids__/%s/%s/%s" % (
+ record.uid,
+ collName,
+ name
+ )
+ if not dryrun:
+ (yield deleteResource(root, collection, resource,
+ uri, guid, implicit=True))
+ count += 1
+
+ if not dryrun:
+ assignments = (yield purgeProxyAssignments(principal))
+
+ returnValue((count, assignments))
+
+
+ at inlineCallbacks
+def purgeProxyAssignments(principal):
+
+ assignments = []
+
+ for proxyType in ("read", "write"):
+
+ proxyFor = (yield principal.proxyFor(proxyType == "write"))
+ for other in proxyFor:
+ assignments.append((principal.record.guid, proxyType, other.record.guid))
+ (yield removeProxy(other, principal))
+
+ subPrincipal = principal.getChild("calendar-proxy-" + proxyType)
+ proxies = (yield subPrincipal.readProperty(davxml.GroupMemberSet, None))
+ for other in proxies.children:
+ assignments.append((str(other).split("/")[3], proxyType, principal.record.guid))
+
+ (yield subPrincipal.writeProperty(davxml.GroupMemberSet(), None))
+
+ returnValue(assignments)
+
+
Deleted: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/augments.xml
===================================================================
--- CalendarServer/trunk/calendarserver/tools/test/deprovision/augments.xml 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/augments.xml 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,32 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-
-<!--
-Copyright (c) 2009-2010 Apple Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
- -->
-
-<!DOCTYPE accounts SYSTEM "../../../conf/auth/augments.dtd">
-
-<augments>
- <record>
- <uid>E9E78C86-4829-4520-A35D-70DDADAB2092</uid>
- <enable>true</enable>
- <enable-calendar>true</enable-calendar>
- </record>
- <record>
- <uid>291C2C29-B663-4342-8EA1-A055E6A04D65</uid>
- <enable>true</enable>
- <enable-calendar>true</enable-calendar>
- </record>
-</augments>
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/augments.xml (from rev 5438, CalendarServer/trunk/calendarserver/tools/test/deprovision/augments.xml)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/augments.xml (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/augments.xml 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+Copyright (c) 2009-2010 Apple Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+ -->
+
+<!DOCTYPE accounts SYSTEM "../../../conf/auth/augments.dtd">
+
+<augments>
+ <record>
+ <uid>E9E78C86-4829-4520-A35D-70DDADAB2092</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ </record>
+ <record>
+ <uid>291C2C29-B663-4342-8EA1-A055E6A04D65</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ </record>
+</augments>
Deleted: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/caldavd.plist
===================================================================
--- CalendarServer/trunk/calendarserver/tools/test/deprovision/caldavd.plist 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/caldavd.plist 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,767 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
- Copyright (c) 2006-2010 Apple Inc. All rights reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-
-<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
- <dict>
-
- <!--
- Public network address information
-
- This is the server's public network address, which is provided to
- clients in URLs and the like. It may or may not be the network
- address that the server is listening to directly, though it is by
- default. For example, it may be the address of a load balancer or
- proxy which forwards connections to the server.
- -->
-
- <!-- Network host name [empty = system host name] -->
- <key>ServerHostName</key>
- <string></string> <!-- The hostname clients use when connecting -->
-
- <!-- HTTP port [0 = disable HTTP] -->
- <key>HTTPPort</key>
- <integer>8008</integer>
-
- <!-- SSL port [0 = disable HTTPS] -->
- <!-- (Must also configure SSLCertificate and SSLPrivateKey below) -->
- <key>SSLPort</key>
- <integer>8443</integer>
-
- <!-- Redirect non-SSL ports to an SSL port -->
- <key>RedirectHTTPToHTTPS</key>
- <false/>
-
- <!--
- Network address configuration information
-
- This configures the actual network address that the server binds to.
- -->
-
- <!-- List of IP addresses to bind to [empty = all] -->
- <key>BindAddresses</key>
- <array>
- </array>
-
- <!-- List of port numbers to bind to for HTTP [empty = same as "Port"] -->
- <key>BindHTTPPorts</key>
- <array>
- </array>
-
- <!-- List of port numbers to bind to for SSL [empty = same as "SSLPort"] -->
- <key>BindSSLPorts</key>
- <array>
- </array>
-
-
- <!--
- Data Store
- -->
-
- <!-- Server root -->
- <key>ServerRoot</key>
- <string>%(ServerRoot)s</string>
-
- <!-- Data root -->
- <key>DataRoot</key>
- <string>Data</string>
-
- <!-- Document root -->
- <key>DocumentRoot</key>
- <string>Documents</string>
-
- <!-- Configuration root -->
- <key>ConfigRoot</key>
- <string>/etc/caldavd</string>
-
- <!-- Log root -->
- <key>LogRoot</key>
- <string>/var/log/caldavd</string>
-
- <!-- Run root -->
- <key>RunRoot</key>
- <string>/var/run</string>
-
- <!-- Child aliases -->
- <key>Aliases</key>
- <dict>
- <!--
- <key>foo</key>
- <dict>
- <key>path</key>
- <string>/path/to/foo</string>
- </dict>
- -->
- </dict>
-
- <!-- User quota (in bytes) -->
- <key>UserQuota</key>
- <integer>104857600</integer><!-- 100Mb -->
-
- <!-- Attachment size limit (in bytes) -->
- <key>MaximumAttachmentSize</key>
- <integer>1048576</integer><!-- 1Mb -->
-
- <!-- Maximum number of unique attendees per entire event -->
- <!-- 0 for no limit -->
- <key>MaxAttendeesPerInstance</key>
- <integer>100</integer>
-
- <!-- Maximum number of instances allowed for a single RRULE -->
- <!-- 0 for no limit -->
- <key>MaxInstancesForRRULE</key>
- <integer>400</integer>
-
-
- <!--
- Directory service
-
- A directory service provides information about principals (eg.
- users, groups, locations and resources) to the server.
-
- A variety of directory services are available for use.
- -->
-
- <!-- XML File Directory Service -->
- <key>DirectoryService</key>
- <dict>
- <key>type</key>
- <string>twistedcaldav.directory.xmlfile.XMLDirectoryService</string>
-
- <key>params</key>
- <dict>
- <key>xmlFile</key>
- <string>accounts.xml</string>
- <key>recordTypes</key>
- <array>
- <string>users</string>
- <string>groups</string>
- </array>
- </dict>
- </dict>
-
- <!-- XML File Resource Service -->
- <key>ResourceService</key>
- <dict>
- <key>Enabled</key>
- <true/>
- <key>type</key>
- <string>twistedcaldav.directory.xmlfile.XMLDirectoryService</string>
-
- <key>params</key>
- <dict>
- <key>xmlFile</key>
- <string>resources.xml</string>
- <key>recordTypes</key>
- <array>
- <string>resources</string>
- <string>locations</string>
- </array>
- <key>cacheTimeout</key>
- <integer>30</integer>
- </dict>
- </dict>
-
- <!-- Open Directory Service (Mac OS X) -->
- <!--
- <key>DirectoryService</key>
- <dict>
- <key>type</key>
- <string>twistedcaldav.directory.appleopendirectory.OpenDirectoryService</string>
-
- <key>params</key>
- <dict>
- <key>node</key>
- <string>/Search</string>
- <key>cacheTimeout</key>
- <integer>30</integer>
- </dict>
- </dict>
- -->
-
- <!--
- Augment service
-
- Augments for the directory service records to add calendar specific attributes.
-
- A variety of augment services are available for use.
- When using a partitioned server, a service that can be accessed from each host will be needed.
- -->
-
- <!-- XML File Augment Service -->
- <key>AugmentService</key>
- <dict>
- <key>type</key>
- <string>twistedcaldav.directory.augment.AugmentXMLDB</string>
-
- <key>params</key>
- <dict>
- <key>xmlFiles</key>
- <array>
- <string>augments.xml</string>
- </array>
- </dict>
- </dict>
-
- <!-- Sqlite Augment Service -->
- <!--
- <key>AugmentService</key>
- <dict>
- <key>type</key>
- <string>twistedcaldav.directory.augment.AugmentSqliteDB</string>
-
- <key>params</key>
- <dict>
- <key>dbpath</key>
- <string>/etc/caldavd/augments.sqlite</string>
- </dict>
- </dict>
- -->
-
- <!-- PostgreSQL Augment Service -->
- <!--
- <key>AugmentService</key>
- <dict>
- <key>type</key>
- <string>twistedcaldav.directory.augment.AugmentPostgreSQLDB</string>
-
- <key>params</key>
- <dict>
- <key>host</key>
- <string>localhost</string>
- <key>database</key>
- <string>augments</string>
- </dict>
- </dict>
- -->
-
- <!-- Sqlite ProxyDB Service -->
- <key>ProxyDBService</key>
- <dict>
- <key>type</key>
- <string>twistedcaldav.directory.calendaruserproxy.ProxySqliteDB</string>
-
- <key>params</key>
- <dict>
- <key>dbpath</key>
- <string>proxies.sqlite</string>
- </dict>
- </dict>
-
- <!-- PostgreSQL ProxyDB Service -->
- <!--
- <key>ProxyDBService</key>
- <dict>
- <key>type</key>
- <string>twistedcaldav.directory.calendaruserproxy.ProxyPostgreSQLDB</string>
-
- <key>params</key>
- <dict>
- <key>host</key>
- <string>localhost</string>
- <key>database</key>
- <string>proxies</string>
- </dict>
- </dict>
- -->
-
- <key>ProxyLoadFromFile</key>
- <string>conf/auth/proxies-test.xml</string>
-
- <!--
- Special principals
-
- These principals are granted special access and/or perform
- special roles on the server.
- -->
-
- <!-- Principals with "DAV:all" access (relative URLs) -->
- <key>AdminPrincipals</key>
- <array>
- <string>/principals/__uids__/admin/</string>
- </array>
-
- <!-- Principals with "DAV:read" access (relative URLs) -->
- <key>ReadPrincipals</key>
- <array>
- <!-- <string>/principals/__uids__/983C8238-FB6B-4D92-9242-89C0A39E5F81/</string> -->
- </array>
-
- <!-- Principals that can pose as other principals -->
- <key>SudoersFile</key>
- <string>conf/sudoers.plist</string>
-
- <!-- Create "proxy access" principals -->
- <key>EnableProxyPrincipals</key>
- <true/>
-
-
- <!--
- Permissions
- -->
-
- <!-- Anonymous read access for root resource -->
- <key>EnableAnonymousReadRoot</key>
- <true/>
-
- <!-- Anonymous read access for resource hierarchy -->
- <key>EnableAnonymousReadNav</key>
- <false/>
-
- <!-- Enables directory listings for principals -->
- <key>EnablePrincipalListings</key>
- <true/>
-
- <!-- Render calendar collections as a monolithic iCalendar object -->
- <key>EnableMonolithicCalendars</key>
- <true/>
-
-
- <!--
- Authentication
- -->
-
- <key>Authentication</key>
- <dict>
-
- <!-- Clear text; best avoided -->
- <key>Basic</key>
- <dict>
- <key>Enabled</key>
- <true/>
- </dict>
-
- <!-- Digest challenge/response -->
- <key>Digest</key>
- <dict>
- <key>Enabled</key>
- <true/>
- <key>Algorithm</key>
- <string>md5</string>
- <key>Qop</key>
- <string></string>
- </dict>
-
- <!-- Kerberos/SPNEGO -->
- <key>Kerberos</key>
- <dict>
- <key>Enabled</key>
- <false/>
- <key>ServicePrincipal</key>
- <string></string>
- </dict>
-
- <!-- Wikiserver authentication (Mac OS X) -->
- <key>Wiki</key>
- <dict>
- <key>Enabled</key>
- <true/>
- <key>Cookie</key>
- <string>sessionID</string>
- <key>URL</key>
- <string>http://127.0.0.1/RPC2</string>
- <key>UserMethod</key>
- <string>userForSession</string>
- <key>WikiMethod</key>
- <string>accessLevelForUserWikiCalendar</string>
- </dict>
-
- </dict>
-
-
- <!--
- Logging
- -->
-
- <!-- Apache-style access log -->
- <key>AccessLogFile</key>
- <string>logs/access.log</string>
- <key>RotateAccessLog</key>
- <false/>
-
- <!-- Server activity log -->
- <key>ErrorLogFile</key>
- <string>logs/error.log</string>
-
- <!-- Log levels -->
- <key>DefaultLogLevel</key>
- <string>warn</string> <!-- debug, info, warn, error -->
-
- <!-- Log level overrides for specific functionality -->
- <key>LogLevels</key>
- <dict>
- <!--
- <key>twistedcaldav.directory.appleopendirectory.OpenDirectoryService</key>
- <string>debug</string>
- -->
- </dict>
-
- <!-- Global server stats -->
- <key>GlobalStatsSocket</key>
- <string>logs/caldavd-stats.sock</string>
-
- <!-- Global server stats logging period -->
- <key>GlobalStatsLoggingPeriod</key>
- <integer>60</integer>
-
- <!-- Global server stats logging frequency [0 = disable stats] -->
- <key>GlobalStatsLoggingFrequency</key>
- <integer>12</integer>
-
- <!-- Server process ID file -->
- <key>PIDFile</key>
- <string>logs/caldavd.pid</string>
-
-
- <!--
- Accounting
- -->
-
- <!-- Enable accounting for certain operations -->
- <key>AccountingCategories</key>
- <dict>
- <key>iTIP</key>
- <false/>
- <key>HTTP</key>
- <false/>
- </dict>
- <!-- Enable accounting for specific principals -->
- <key>AccountingPrincipals</key>
- <array>
- <!-- <string>/principals/__uids__/454D85C0-09F0-4DC6-A3C6-97DFEB4622CD/</string> -->
- </array>
-
-
- <!--
- SSL/TLS
- -->
-
- <!-- Public key -->
- <key>SSLCertificate</key>
- <string>twistedcaldav/test/data/server.pem</string>
-
- <!-- SSL authority chain (for intermediate certs) -->
- <key>SSLAuthorityChain</key>
- <string></string>
-
- <!-- Private key -->
- <key>SSLPrivateKey</key>
- <string>twistedcaldav/test/data/server.pem</string>
-
-
- <!--
- Process management
- -->
-
- <key>UserName</key>
- <string></string>
-
- <key>GroupName</key>
- <string></string>
-
- <key>ProcessType</key>
- <string>Combined</string>
-
- <key>MultiProcess</key>
- <dict>
- <key>ProcessCount</key>
- <integer>2</integer> <!-- 0 = larger of: 4 or (2 * CPU count) -->
- </dict>
-
-
- <!--
- Notifications
- -->
-
- <key>Notifications</key>
- <dict>
- <!-- Time spent coalescing notifications before delivery -->
- <key>CoalesceSeconds</key>
- <integer>3</integer>
-
- <key>InternalNotificationHost</key>
- <string>localhost</string>
-
- <key>InternalNotificationPort</key>
- <integer>62309</integer>
-
- <key>Services</key>
- <dict>
- <key>SimpleLineNotifier</key>
- <dict>
- <!-- Simple line notification service (for testing) -->
- <key>Service</key>
- <string>twistedcaldav.notify.SimpleLineNotifierService</string>
- <key>Enabled</key>
- <false/>
- <key>Port</key>
- <integer>62308</integer>
- </dict>
-
- <key>XMPPNotifier</key>
- <dict>
- <!-- XMPP notification service -->
- <key>Service</key>
- <string>twistedcaldav.notify.XMPPNotifierService</string>
- <key>Enabled</key>
- <false/>
-
- <!-- XMPP host and port to contact -->
- <key>Host</key>
- <string>xmpp.host.name</string>
- <key>Port</key>
- <integer>5222</integer>
-
- <!-- Jabber ID and password for the server -->
- <key>JID</key>
- <string>jid at xmpp.host.name/resource</string>
- <key>Password</key>
- <string>password_goes_here</string>
-
- <!-- PubSub service address -->
- <key>ServiceAddress</key>
- <string>pubsub.xmpp.host.name</string>
-
- <key>NodeConfiguration</key>
- <dict>
- <key>pubsub#deliver_payloads</key>
- <string>1</string>
- <key>pubsub#persist_items</key>
- <string>1</string>
- </dict>
-
- <!-- Sends a presence notification to XMPP server at this interval (prevents disconnect) -->
- <key>KeepAliveSeconds</key>
- <integer>120</integer>
-
- <!-- Sends a pubsub publish to a particular heartbeat node at this interval -->
- <key>HeartbeatMinutes</key>
- <integer>30</integer>
-
- <!-- List of glob-like expressions defining which XMPP JIDs can converse with the server (for debugging) -->
- <key>AllowedJIDs</key>
- <array>
- <!--
- <string>*.example.com</string>
- -->
- </array>
- </dict>
- </dict>
- </dict>
-
-
- <!--
- Server-to-server protocol
- -->
-
- <key>Scheduling</key>
- <dict>
-
- <!-- CalDAV protocol options -->
- <key>CalDAV</key>
- <dict>
- <key>EmailDomain</key>
- <string></string>
- <key>HTTPDomain</key>
- <string></string>
- <key>AddressPatterns</key>
- <array>
- </array>
- <key>OldDraftCompatibility</key>
- <true/>
- <key>ScheduleTagCompatibility</key>
- <true/>
- <key>EnablePrivateComments</key>
- <true/>
- </dict>
-
- <!-- iSchedule protocol options -->
- <key>iSchedule</key>
- <dict>
- <key>Enabled</key>
- <false/>
- <key>AddressPatterns</key>
- <array>
- </array>
- <key>Servers</key>
- <string>conf/servertoserver-test.xml</string>
- </dict>
-
- <!-- iMIP protocol options -->
- <key>iMIP</key>
- <dict>
- <key>Enabled</key>
- <false/>
- <key>MailGatewayServer</key>
- <string>localhost</string>
- <key>MailGatewayPort</key>
- <integer>62310</integer>
- <key>Sending</key>
- <dict>
- <key>Server</key>
- <string></string>
- <key>Port</key>
- <integer>587</integer>
- <key>UseSSL</key>
- <true/>
- <key>Username</key>
- <string></string>
- <key>Password</key>
- <string></string>
- <key>Address</key>
- <string></string> <!-- Address email will be sent from -->
- </dict>
- <key>Receiving</key>
- <dict>
- <key>Server</key>
- <string></string>
- <key>Port</key>
- <integer>995</integer>
- <key>Type</key>
- <string></string> <!-- Either "pop" or "imap" -->
- <key>UseSSL</key>
- <true/>
- <key>Username</key>
- <string></string>
- <key>Password</key>
- <string></string>
- <key>PollingSeconds</key>
- <integer>30</integer>
- </dict>
- <key>AddressPatterns</key>
- <array>
- <string>mailto:.*</string>
- </array>
- </dict>
-
- <!-- General options for scheduling -->
- <key>Options</key>
- <dict>
- <key>AllowGroupAsOrganizer</key>
- <false/>
- <key>AllowLocationAsOrganizer</key>
- <false/>
- <key>AllowResourceAsOrganizer</key>
- <false/>
- </dict>
-
- </dict>
-
-
- <!--
- Free-busy URL protocol
- -->
-
- <key>FreeBusyURL</key>
- <dict>
- <key>Enabled</key>
- <true/>
- <key>TimePeriod</key>
- <integer>14</integer>
- <key>AnonymousAccess</key>
- <false/>
- </dict>
-
-
- <!--
- Non-standard CalDAV extensions
- -->
-
- <!-- Calendar Drop Box -->
- <key>EnableDropBox</key>
- <true/>
-
- <!-- Private Events -->
- <key>EnablePrivateEvents</key>
- <true/>
-
- <!-- Timezone Service -->
- <key>EnableTimezoneService</key>
- <true/>
-
-
- <!--
- Miscellaneous items
- -->
-
- <!-- Service ACLs (Mac OS X) -->
- <key>EnableSACLs</key>
- <false/>
-
- <!-- Web-based administration -->
- <key>EnableWebAdmin</key>
- <true/>
-
- <!-- Support for Content-Encoding compression options as specified in RFC2616 Section 3.5 -->
- <key>ResponseCompression</key>
- <false/>
-
- <!-- The retry-after value (in seconds) to return with a 503 error. -->
- <key>HTTPRetryAfter</key>
- <integer>180</integer>
-
- <!-- A unix socket used for communication between the child and master processes.
- An empty value tells the server to use a tcp socket instead. -->
- <key>ControlSocket</key>
- <string>logs/caldavd.sock</string>
-
- <!-- Support for Memcached -->
- <key>Memcached</key>
- <dict>
- <key>MaxClients</key>
- <integer>5</integer>
- <key>memcached</key>
- <string>memcached</string> <!-- Find in PATH -->
- <key>Options</key>
- <array>
- <!--<string>-vv</string>-->
- </array>
- <key>Pools</key>
- <dict>
- <key>Default</key>
- <dict>
- <key>ClientEnabled</key>
- <false/>
- <key>ServerEnabled</key>
- <false/>
- </dict>
- </dict>
- </dict>
-
- <!--
- Twisted
- -->
-
- <key>Twisted</key>
- <dict>
- <key>twistd</key>
- <string>../Twisted/bin/twistd</string>
- </dict>
-
-
- <key>Localization</key>
- <dict>
- <key>LocalesDirectory</key>
- <string>locales</string>
- <key>Language</key>
- <string>English</string>
- </dict>
-
-
- </dict>
-</plist>
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/caldavd.plist (from rev 5438, CalendarServer/trunk/calendarserver/tools/test/deprovision/caldavd.plist)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/caldavd.plist (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/caldavd.plist 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,767 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+ Copyright (c) 2006-2010 Apple Inc. All rights reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <dict>
+
+ <!--
+ Public network address information
+
+ This is the server's public network address, which is provided to
+ clients in URLs and the like. It may or may not be the network
+ address that the server is listening to directly, though it is by
+ default. For example, it may be the address of a load balancer or
+ proxy which forwards connections to the server.
+ -->
+
+ <!-- Network host name [empty = system host name] -->
+ <key>ServerHostName</key>
+ <string></string> <!-- The hostname clients use when connecting -->
+
+ <!-- HTTP port [0 = disable HTTP] -->
+ <key>HTTPPort</key>
+ <integer>8008</integer>
+
+ <!-- SSL port [0 = disable HTTPS] -->
+ <!-- (Must also configure SSLCertificate and SSLPrivateKey below) -->
+ <key>SSLPort</key>
+ <integer>8443</integer>
+
+ <!-- Redirect non-SSL ports to an SSL port -->
+ <key>RedirectHTTPToHTTPS</key>
+ <false/>
+
+ <!--
+ Network address configuration information
+
+ This configures the actual network address that the server binds to.
+ -->
+
+ <!-- List of IP addresses to bind to [empty = all] -->
+ <key>BindAddresses</key>
+ <array>
+ </array>
+
+ <!-- List of port numbers to bind to for HTTP [empty = same as "Port"] -->
+ <key>BindHTTPPorts</key>
+ <array>
+ </array>
+
+ <!-- List of port numbers to bind to for SSL [empty = same as "SSLPort"] -->
+ <key>BindSSLPorts</key>
+ <array>
+ </array>
+
+
+ <!--
+ Data Store
+ -->
+
+ <!-- Server root -->
+ <key>ServerRoot</key>
+ <string>%(ServerRoot)s</string>
+
+ <!-- Data root -->
+ <key>DataRoot</key>
+ <string>Data</string>
+
+ <!-- Document root -->
+ <key>DocumentRoot</key>
+ <string>Documents</string>
+
+ <!-- Configuration root -->
+ <key>ConfigRoot</key>
+ <string>/etc/caldavd</string>
+
+ <!-- Log root -->
+ <key>LogRoot</key>
+ <string>/var/log/caldavd</string>
+
+ <!-- Run root -->
+ <key>RunRoot</key>
+ <string>/var/run</string>
+
+ <!-- Child aliases -->
+ <key>Aliases</key>
+ <dict>
+ <!--
+ <key>foo</key>
+ <dict>
+ <key>path</key>
+ <string>/path/to/foo</string>
+ </dict>
+ -->
+ </dict>
+
+ <!-- User quota (in bytes) -->
+ <key>UserQuota</key>
+ <integer>104857600</integer><!-- 100Mb -->
+
+ <!-- Attachment size limit (in bytes) -->
+ <key>MaximumAttachmentSize</key>
+ <integer>1048576</integer><!-- 1Mb -->
+
+ <!-- Maximum number of unique attendees per entire event -->
+ <!-- 0 for no limit -->
+ <key>MaxAttendeesPerInstance</key>
+ <integer>100</integer>
+
+ <!-- Maximum number of instances allowed for a single RRULE -->
+ <!-- 0 for no limit -->
+ <key>MaxInstancesForRRULE</key>
+ <integer>400</integer>
+
+
+ <!--
+ Directory service
+
+ A directory service provides information about principals (eg.
+ users, groups, locations and resources) to the server.
+
+ A variety of directory services are available for use.
+ -->
+
+ <!-- XML File Directory Service -->
+ <key>DirectoryService</key>
+ <dict>
+ <key>type</key>
+ <string>twistedcaldav.directory.xmlfile.XMLDirectoryService</string>
+
+ <key>params</key>
+ <dict>
+ <key>xmlFile</key>
+ <string>accounts.xml</string>
+ <key>recordTypes</key>
+ <array>
+ <string>users</string>
+ <string>groups</string>
+ </array>
+ </dict>
+ </dict>
+
+ <!-- XML File Resource Service -->
+ <key>ResourceService</key>
+ <dict>
+ <key>Enabled</key>
+ <true/>
+ <key>type</key>
+ <string>twistedcaldav.directory.xmlfile.XMLDirectoryService</string>
+
+ <key>params</key>
+ <dict>
+ <key>xmlFile</key>
+ <string>resources.xml</string>
+ <key>recordTypes</key>
+ <array>
+ <string>resources</string>
+ <string>locations</string>
+ </array>
+ <key>cacheTimeout</key>
+ <integer>30</integer>
+ </dict>
+ </dict>
+
+ <!-- Open Directory Service (Mac OS X) -->
+ <!--
+ <key>DirectoryService</key>
+ <dict>
+ <key>type</key>
+ <string>twistedcaldav.directory.appleopendirectory.OpenDirectoryService</string>
+
+ <key>params</key>
+ <dict>
+ <key>node</key>
+ <string>/Search</string>
+ <key>cacheTimeout</key>
+ <integer>30</integer>
+ </dict>
+ </dict>
+ -->
+
+ <!--
+ Augment service
+
+ Augments for the directory service records to add calendar specific attributes.
+
+ A variety of augment services are available for use.
+ When using a partitioned server, a service that can be accessed from each host will be needed.
+ -->
+
+ <!-- XML File Augment Service -->
+ <key>AugmentService</key>
+ <dict>
+ <key>type</key>
+ <string>twistedcaldav.directory.augment.AugmentXMLDB</string>
+
+ <key>params</key>
+ <dict>
+ <key>xmlFiles</key>
+ <array>
+ <string>augments.xml</string>
+ </array>
+ </dict>
+ </dict>
+
+ <!-- Sqlite Augment Service -->
+ <!--
+ <key>AugmentService</key>
+ <dict>
+ <key>type</key>
+ <string>twistedcaldav.directory.augment.AugmentSqliteDB</string>
+
+ <key>params</key>
+ <dict>
+ <key>dbpath</key>
+ <string>/etc/caldavd/augments.sqlite</string>
+ </dict>
+ </dict>
+ -->
+
+ <!-- PostgreSQL Augment Service -->
+ <!--
+ <key>AugmentService</key>
+ <dict>
+ <key>type</key>
+ <string>twistedcaldav.directory.augment.AugmentPostgreSQLDB</string>
+
+ <key>params</key>
+ <dict>
+ <key>host</key>
+ <string>localhost</string>
+ <key>database</key>
+ <string>augments</string>
+ </dict>
+ </dict>
+ -->
+
+ <!-- Sqlite ProxyDB Service -->
+ <key>ProxyDBService</key>
+ <dict>
+ <key>type</key>
+ <string>twistedcaldav.directory.calendaruserproxy.ProxySqliteDB</string>
+
+ <key>params</key>
+ <dict>
+ <key>dbpath</key>
+ <string>proxies.sqlite</string>
+ </dict>
+ </dict>
+
+ <!-- PostgreSQL ProxyDB Service -->
+ <!--
+ <key>ProxyDBService</key>
+ <dict>
+ <key>type</key>
+ <string>twistedcaldav.directory.calendaruserproxy.ProxyPostgreSQLDB</string>
+
+ <key>params</key>
+ <dict>
+ <key>host</key>
+ <string>localhost</string>
+ <key>database</key>
+ <string>proxies</string>
+ </dict>
+ </dict>
+ -->
+
+ <key>ProxyLoadFromFile</key>
+ <string>conf/auth/proxies-test.xml</string>
+
+ <!--
+ Special principals
+
+ These principals are granted special access and/or perform
+ special roles on the server.
+ -->
+
+ <!-- Principals with "DAV:all" access (relative URLs) -->
+ <key>AdminPrincipals</key>
+ <array>
+ <string>/principals/__uids__/admin/</string>
+ </array>
+
+ <!-- Principals with "DAV:read" access (relative URLs) -->
+ <key>ReadPrincipals</key>
+ <array>
+ <!-- <string>/principals/__uids__/983C8238-FB6B-4D92-9242-89C0A39E5F81/</string> -->
+ </array>
+
+ <!-- Principals that can pose as other principals -->
+ <key>SudoersFile</key>
+ <string>conf/sudoers.plist</string>
+
+ <!-- Create "proxy access" principals -->
+ <key>EnableProxyPrincipals</key>
+ <true/>
+
+
+ <!--
+ Permissions
+ -->
+
+ <!-- Anonymous read access for root resource -->
+ <key>EnableAnonymousReadRoot</key>
+ <true/>
+
+ <!-- Anonymous read access for resource hierarchy -->
+ <key>EnableAnonymousReadNav</key>
+ <false/>
+
+ <!-- Enables directory listings for principals -->
+ <key>EnablePrincipalListings</key>
+ <true/>
+
+ <!-- Render calendar collections as a monolithic iCalendar object -->
+ <key>EnableMonolithicCalendars</key>
+ <true/>
+
+
+ <!--
+ Authentication
+ -->
+
+ <key>Authentication</key>
+ <dict>
+
+ <!-- Clear text; best avoided -->
+ <key>Basic</key>
+ <dict>
+ <key>Enabled</key>
+ <true/>
+ </dict>
+
+ <!-- Digest challenge/response -->
+ <key>Digest</key>
+ <dict>
+ <key>Enabled</key>
+ <true/>
+ <key>Algorithm</key>
+ <string>md5</string>
+ <key>Qop</key>
+ <string></string>
+ </dict>
+
+ <!-- Kerberos/SPNEGO -->
+ <key>Kerberos</key>
+ <dict>
+ <key>Enabled</key>
+ <false/>
+ <key>ServicePrincipal</key>
+ <string></string>
+ </dict>
+
+ <!-- Wikiserver authentication (Mac OS X) -->
+ <key>Wiki</key>
+ <dict>
+ <key>Enabled</key>
+ <true/>
+ <key>Cookie</key>
+ <string>sessionID</string>
+ <key>URL</key>
+ <string>http://127.0.0.1/RPC2</string>
+ <key>UserMethod</key>
+ <string>userForSession</string>
+ <key>WikiMethod</key>
+ <string>accessLevelForUserWikiCalendar</string>
+ </dict>
+
+ </dict>
+
+
+ <!--
+ Logging
+ -->
+
+ <!-- Apache-style access log -->
+ <key>AccessLogFile</key>
+ <string>logs/access.log</string>
+ <key>RotateAccessLog</key>
+ <false/>
+
+ <!-- Server activity log -->
+ <key>ErrorLogFile</key>
+ <string>logs/error.log</string>
+
+ <!-- Log levels -->
+ <key>DefaultLogLevel</key>
+ <string>warn</string> <!-- debug, info, warn, error -->
+
+ <!-- Log level overrides for specific functionality -->
+ <key>LogLevels</key>
+ <dict>
+ <!--
+ <key>twistedcaldav.directory.appleopendirectory.OpenDirectoryService</key>
+ <string>debug</string>
+ -->
+ </dict>
+
+ <!-- Global server stats -->
+ <key>GlobalStatsSocket</key>
+ <string>logs/caldavd-stats.sock</string>
+
+ <!-- Global server stats logging period -->
+ <key>GlobalStatsLoggingPeriod</key>
+ <integer>60</integer>
+
+ <!-- Global server stats logging frequency [0 = disable stats] -->
+ <key>GlobalStatsLoggingFrequency</key>
+ <integer>12</integer>
+
+ <!-- Server process ID file -->
+ <key>PIDFile</key>
+ <string>logs/caldavd.pid</string>
+
+
+ <!--
+ Accounting
+ -->
+
+ <!-- Enable accounting for certain operations -->
+ <key>AccountingCategories</key>
+ <dict>
+ <key>iTIP</key>
+ <false/>
+ <key>HTTP</key>
+ <false/>
+ </dict>
+ <!-- Enable accounting for specific principals -->
+ <key>AccountingPrincipals</key>
+ <array>
+ <!-- <string>/principals/__uids__/454D85C0-09F0-4DC6-A3C6-97DFEB4622CD/</string> -->
+ </array>
+
+
+ <!--
+ SSL/TLS
+ -->
+
+ <!-- Public key -->
+ <key>SSLCertificate</key>
+ <string>twistedcaldav/test/data/server.pem</string>
+
+ <!-- SSL authority chain (for intermediate certs) -->
+ <key>SSLAuthorityChain</key>
+ <string></string>
+
+ <!-- Private key -->
+ <key>SSLPrivateKey</key>
+ <string>twistedcaldav/test/data/server.pem</string>
+
+
+ <!--
+ Process management
+ -->
+
+ <key>UserName</key>
+ <string></string>
+
+ <key>GroupName</key>
+ <string></string>
+
+ <key>ProcessType</key>
+ <string>Combined</string>
+
+ <key>MultiProcess</key>
+ <dict>
+ <key>ProcessCount</key>
+ <integer>2</integer> <!-- 0 = larger of: 4 or (2 * CPU count) -->
+ </dict>
+
+
+ <!--
+ Notifications
+ -->
+
+ <key>Notifications</key>
+ <dict>
+ <!-- Time spent coalescing notifications before delivery -->
+ <key>CoalesceSeconds</key>
+ <integer>3</integer>
+
+ <key>InternalNotificationHost</key>
+ <string>localhost</string>
+
+ <key>InternalNotificationPort</key>
+ <integer>62309</integer>
+
+ <key>Services</key>
+ <dict>
+ <key>SimpleLineNotifier</key>
+ <dict>
+ <!-- Simple line notification service (for testing) -->
+ <key>Service</key>
+ <string>twistedcaldav.notify.SimpleLineNotifierService</string>
+ <key>Enabled</key>
+ <false/>
+ <key>Port</key>
+ <integer>62308</integer>
+ </dict>
+
+ <key>XMPPNotifier</key>
+ <dict>
+ <!-- XMPP notification service -->
+ <key>Service</key>
+ <string>twistedcaldav.notify.XMPPNotifierService</string>
+ <key>Enabled</key>
+ <false/>
+
+ <!-- XMPP host and port to contact -->
+ <key>Host</key>
+ <string>xmpp.host.name</string>
+ <key>Port</key>
+ <integer>5222</integer>
+
+ <!-- Jabber ID and password for the server -->
+ <key>JID</key>
+ <string>jid at xmpp.host.name/resource</string>
+ <key>Password</key>
+ <string>password_goes_here</string>
+
+ <!-- PubSub service address -->
+ <key>ServiceAddress</key>
+ <string>pubsub.xmpp.host.name</string>
+
+ <key>NodeConfiguration</key>
+ <dict>
+ <key>pubsub#deliver_payloads</key>
+ <string>1</string>
+ <key>pubsub#persist_items</key>
+ <string>1</string>
+ </dict>
+
+ <!-- Sends a presence notification to XMPP server at this interval (prevents disconnect) -->
+ <key>KeepAliveSeconds</key>
+ <integer>120</integer>
+
+ <!-- Sends a pubsub publish to a particular heartbeat node at this interval -->
+ <key>HeartbeatMinutes</key>
+ <integer>30</integer>
+
+ <!-- List of glob-like expressions defining which XMPP JIDs can converse with the server (for debugging) -->
+ <key>AllowedJIDs</key>
+ <array>
+ <!--
+ <string>*.example.com</string>
+ -->
+ </array>
+ </dict>
+ </dict>
+ </dict>
+
+
+ <!--
+ Server-to-server protocol
+ -->
+
+ <key>Scheduling</key>
+ <dict>
+
+ <!-- CalDAV protocol options -->
+ <key>CalDAV</key>
+ <dict>
+ <key>EmailDomain</key>
+ <string></string>
+ <key>HTTPDomain</key>
+ <string></string>
+ <key>AddressPatterns</key>
+ <array>
+ </array>
+ <key>OldDraftCompatibility</key>
+ <true/>
+ <key>ScheduleTagCompatibility</key>
+ <true/>
+ <key>EnablePrivateComments</key>
+ <true/>
+ </dict>
+
+ <!-- iSchedule protocol options -->
+ <key>iSchedule</key>
+ <dict>
+ <key>Enabled</key>
+ <false/>
+ <key>AddressPatterns</key>
+ <array>
+ </array>
+ <key>Servers</key>
+ <string>conf/servertoserver-test.xml</string>
+ </dict>
+
+ <!-- iMIP protocol options -->
+ <key>iMIP</key>
+ <dict>
+ <key>Enabled</key>
+ <false/>
+ <key>MailGatewayServer</key>
+ <string>localhost</string>
+ <key>MailGatewayPort</key>
+ <integer>62310</integer>
+ <key>Sending</key>
+ <dict>
+ <key>Server</key>
+ <string></string>
+ <key>Port</key>
+ <integer>587</integer>
+ <key>UseSSL</key>
+ <true/>
+ <key>Username</key>
+ <string></string>
+ <key>Password</key>
+ <string></string>
+ <key>Address</key>
+ <string></string> <!-- Address email will be sent from -->
+ </dict>
+ <key>Receiving</key>
+ <dict>
+ <key>Server</key>
+ <string></string>
+ <key>Port</key>
+ <integer>995</integer>
+ <key>Type</key>
+ <string></string> <!-- Either "pop" or "imap" -->
+ <key>UseSSL</key>
+ <true/>
+ <key>Username</key>
+ <string></string>
+ <key>Password</key>
+ <string></string>
+ <key>PollingSeconds</key>
+ <integer>30</integer>
+ </dict>
+ <key>AddressPatterns</key>
+ <array>
+ <string>mailto:.*</string>
+ </array>
+ </dict>
+
+ <!-- General options for scheduling -->
+ <key>Options</key>
+ <dict>
+ <key>AllowGroupAsOrganizer</key>
+ <false/>
+ <key>AllowLocationAsOrganizer</key>
+ <false/>
+ <key>AllowResourceAsOrganizer</key>
+ <false/>
+ </dict>
+
+ </dict>
+
+
+ <!--
+ Free-busy URL protocol
+ -->
+
+ <key>FreeBusyURL</key>
+ <dict>
+ <key>Enabled</key>
+ <true/>
+ <key>TimePeriod</key>
+ <integer>14</integer>
+ <key>AnonymousAccess</key>
+ <false/>
+ </dict>
+
+
+ <!--
+ Non-standard CalDAV extensions
+ -->
+
+ <!-- Calendar Drop Box -->
+ <key>EnableDropBox</key>
+ <true/>
+
+ <!-- Private Events -->
+ <key>EnablePrivateEvents</key>
+ <true/>
+
+ <!-- Timezone Service -->
+ <key>EnableTimezoneService</key>
+ <true/>
+
+
+ <!--
+ Miscellaneous items
+ -->
+
+ <!-- Service ACLs (Mac OS X) -->
+ <key>EnableSACLs</key>
+ <false/>
+
+ <!-- Web-based administration -->
+ <key>EnableWebAdmin</key>
+ <true/>
+
+ <!-- Support for Content-Encoding compression options as specified in RFC2616 Section 3.5 -->
+ <key>ResponseCompression</key>
+ <false/>
+
+ <!-- The retry-after value (in seconds) to return with a 503 error. -->
+ <key>HTTPRetryAfter</key>
+ <integer>180</integer>
+
+ <!-- A unix socket used for communication between the child and master processes.
+ An empty value tells the server to use a tcp socket instead. -->
+ <key>ControlSocket</key>
+ <string>logs/caldavd.sock</string>
+
+ <!-- Support for Memcached -->
+ <key>Memcached</key>
+ <dict>
+ <key>MaxClients</key>
+ <integer>5</integer>
+ <key>memcached</key>
+ <string>memcached</string> <!-- Find in PATH -->
+ <key>Options</key>
+ <array>
+ <!--<string>-vv</string>-->
+ </array>
+ <key>Pools</key>
+ <dict>
+ <key>Default</key>
+ <dict>
+ <key>ClientEnabled</key>
+ <false/>
+ <key>ServerEnabled</key>
+ <false/>
+ </dict>
+ </dict>
+ </dict>
+
+ <!--
+ Twisted
+ -->
+
+ <key>Twisted</key>
+ <dict>
+ <key>twistd</key>
+ <string>../Twisted/bin/twistd</string>
+ </dict>
+
+
+ <key>Localization</key>
+ <dict>
+ <key>LocalesDirectory</key>
+ <string>locales</string>
+ <key>Language</key>
+ <string>English</string>
+ </dict>
+
+
+ </dict>
+</plist>
Deleted: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/resources-locations.xml
===================================================================
--- CalendarServer/trunk/calendarserver/tools/test/deprovision/resources-locations.xml 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/resources-locations.xml 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,34 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-
-<!--
-Copyright (c) 2006-2010 Apple Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
- -->
-
-<!DOCTYPE accounts SYSTEM "accounts.dtd">
-
-<accounts realm="Test Realm">
- <location repeat="10">
- <uid>location%02d</uid>
- <guid>location%02d</guid>
- <password>location%02d</password>
- <name>Room %02d</name>
- </location>
- <resource repeat="10">
- <uid>resource%02d</uid>
- <guid>resource%02d</guid>
- <password>resource%02d</password>
- <name>Resource %02d</name>
- </resource>
-</accounts>
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/resources-locations.xml (from rev 5438, CalendarServer/trunk/calendarserver/tools/test/deprovision/resources-locations.xml)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/resources-locations.xml (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/resources-locations.xml 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,34 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+Copyright (c) 2006-2010 Apple Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+ -->
+
+<!DOCTYPE accounts SYSTEM "accounts.dtd">
+
+<accounts realm="Test Realm">
+ <location repeat="10">
+ <uid>location%02d</uid>
+ <guid>location%02d</guid>
+ <password>location%02d</password>
+ <name>Room %02d</name>
+ </location>
+ <resource repeat="10">
+ <uid>resource%02d</uid>
+ <guid>resource%02d</guid>
+ <password>resource%02d</password>
+ <name>Resource %02d</name>
+ </resource>
+</accounts>
Deleted: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/users-groups.xml
===================================================================
--- CalendarServer/trunk/calendarserver/tools/test/deprovision/users-groups.xml 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/users-groups.xml 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,38 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-
-<!--
-Copyright (c) 2006-2010 Apple Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
- -->
-
-<!DOCTYPE accounts SYSTEM "accounts.dtd">
-
-<accounts realm="Test Realm">
- <user>
- <uid>deprovisioned</uid>
- <guid>E9E78C86-4829-4520-A35D-70DDADAB2092</guid>
- <password>test</password>
- <name>Deprovisioned User</name>
- <first-name>Deprovisioned</first-name>
- <last-name>User</last-name>
- </user>
- <user>
- <uid>keeper</uid>
- <guid>291C2C29-B663-4342-8EA1-A055E6A04D65</guid>
- <password>test</password>
- <name>Keeper User</name>
- <first-name>Keeper</first-name>
- <last-name>User</last-name>
- </user>
-</accounts>
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/users-groups.xml (from rev 5438, CalendarServer/trunk/calendarserver/tools/test/deprovision/users-groups.xml)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/users-groups.xml (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/deprovision/users-groups.xml 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+Copyright (c) 2006-2010 Apple Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+ -->
+
+<!DOCTYPE accounts SYSTEM "accounts.dtd">
+
+<accounts realm="Test Realm">
+ <user>
+ <uid>deprovisioned</uid>
+ <guid>E9E78C86-4829-4520-A35D-70DDADAB2092</guid>
+ <password>test</password>
+ <name>Deprovisioned User</name>
+ <first-name>Deprovisioned</first-name>
+ <last-name>User</last-name>
+ </user>
+ <user>
+ <uid>keeper</uid>
+ <guid>291C2C29-B663-4342-8EA1-A055E6A04D65</guid>
+ <password>test</password>
+ <name>Keeper User</name>
+ <first-name>Keeper</first-name>
+ <last-name>User</last-name>
+ </user>
+</accounts>
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/gateway/caldavd.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/gateway/caldavd.plist 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/gateway/caldavd.plist 2010-04-07 19:27:31 UTC (rev 5439)
@@ -400,7 +400,7 @@
<!-- Log levels -->
<key>DefaultLogLevel</key>
- <string>info</string> <!-- debug, info, warn, error -->
+ <string>warn</string> <!-- debug, info, warn, error -->
<!-- Log level overrides for specific functionality -->
<key>LogLevels</key>
@@ -743,11 +743,7 @@
</dict>
</dict>
- <!-- Response Caching -->
- <key>ResponseCacheTimeout</key>
- <integer>30</integer> <!-- in minutes -->
-
<!--
Twisted
-->
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/principals/caldavd.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/principals/caldavd.plist 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/principals/caldavd.plist 2010-04-07 19:27:31 UTC (rev 5439)
@@ -400,7 +400,7 @@
<!-- Log levels -->
<key>DefaultLogLevel</key>
- <string>info</string> <!-- debug, info, warn, error -->
+ <string>warn</string> <!-- debug, info, warn, error -->
<!-- Log level overrides for specific functionality -->
<key>LogLevels</key>
@@ -743,11 +743,6 @@
</dict>
</dict>
- <!-- Response Caching -->
- <key>ResponseCacheTimeout</key>
- <integer>30</integer> <!-- in minutes -->
-
-
<!--
Twisted
-->
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/test_gateway.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/test_gateway.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/test_gateway.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -41,7 +41,7 @@
templateFile.close()
newConfig = template % {
- 'ServerRoot' : os.path.abspath(config.ServerRoot),
+ "ServerRoot" : os.path.abspath(config.ServerRoot),
}
configFilePath = FilePath(os.path.join(config.ConfigRoot, "caldavd.plist"))
configFilePath.setContent(newConfig)
@@ -49,6 +49,8 @@
self.configFileName = configFilePath.path
config.load(self.configFileName)
+ os.makedirs(config.DataRoot)
+
origUsersFile = FilePath(os.path.join(os.path.dirname(__file__),
"gateway", "users-groups.xml"))
copyUsersFile = FilePath(os.path.join(config.DataRoot, "accounts.xml"))
@@ -100,35 +102,35 @@
@inlineCallbacks
def test_getLocationList(self):
results = yield self.runCommand(command_getLocationList)
- self.assertEquals(len(results['result']), 10)
+ self.assertEquals(len(results["result"]), 10)
@inlineCallbacks
def test_getLocationAttributes(self):
results = yield self.runCommand(command_createLocation)
results = yield self.runCommand(command_getLocationAttributes)
- self.assertEquals(results['result']['Building'], "Test Building")
- self.assertEquals(results['result']['City'], "Cupertino")
- self.assertEquals(results['result']['Capacity'], "40")
- self.assertEquals(results['result']['Description'], "Test Description")
- self.assertEquals(results['result']['ZIP'], "95014")
- self.assertEquals(results['result']['Floor'], "First")
- self.assertEquals(results['result']['RecordName'], ['createdlocation01'])
- self.assertEquals(results['result']['State'], "CA")
- self.assertEquals(results['result']['Street'], "1 Infinite Loop")
- self.assertEquals(results['result']['RealName'], "Created Location 01")
- self.assertEquals(results['result']['Comment'], "Test Comment")
- self.assertEquals(results['result']['AutoSchedule'], True)
+ self.assertEquals(results["result"]["Building"], "Test Building")
+ self.assertEquals(results["result"]["City"], "Cupertino")
+ self.assertEquals(results["result"]["Capacity"], "40")
+ self.assertEquals(results["result"]["Description"], "Test Description")
+ self.assertEquals(results["result"]["ZIP"], "95014")
+ self.assertEquals(results["result"]["Floor"], "First")
+ self.assertEquals(results["result"]["RecordName"], ["createdlocation01"])
+ self.assertEquals(results["result"]["State"], "CA")
+ self.assertEquals(results["result"]["Street"], "1 Infinite Loop")
+ self.assertEquals(results["result"]["RealName"], "Created Location 01")
+ self.assertEquals(results["result"]["Comment"], "Test Comment")
+ self.assertEquals(results["result"]["AutoSchedule"], True)
@inlineCallbacks
def test_getResourceList(self):
results = yield self.runCommand(command_getResourceList)
- self.assertEquals(len(results['result']), 10)
+ self.assertEquals(len(results["result"]), 10)
@inlineCallbacks
def test_getResourceAttributes(self):
results = yield self.runCommand(command_createResource)
results = yield self.runCommand(command_getResourceAttributes)
- self.assertEquals(results['result']['Comment'], "Test Comment")
+ self.assertEquals(results["result"]["Comment"], "Test Comment")
@inlineCallbacks
def test_createLocation(self):
@@ -150,16 +152,16 @@
self.assertNotEquals(record, None)
self.assertEquals(record.autoSchedule, True)
- self.assertEquals(record.extras['comment'], "Test Comment")
- self.assertEquals(record.extras['building'], "Test Building")
- self.assertEquals(record.extras['floor'], "First")
- self.assertEquals(record.extras['capacity'], "40")
- self.assertEquals(record.extras['street'], "1 Infinite Loop")
- self.assertEquals(record.extras['city'], "Cupertino")
- self.assertEquals(record.extras['state'], "CA")
- self.assertEquals(record.extras['zip'], "95014")
- self.assertEquals(record.extras['country'], "USA")
- self.assertEquals(record.extras['phone'], "(408) 555-1212")
+ self.assertEquals(record.extras["comment"], "Test Comment")
+ self.assertEquals(record.extras["building"], "Test Building")
+ self.assertEquals(record.extras["floor"], "First")
+ self.assertEquals(record.extras["capacity"], "40")
+ self.assertEquals(record.extras["street"], "1 Infinite Loop")
+ self.assertEquals(record.extras["city"], "Cupertino")
+ self.assertEquals(record.extras["state"], "CA")
+ self.assertEquals(record.extras["zip"], "95014")
+ self.assertEquals(record.extras["country"], "USA")
+ self.assertEquals(record.extras["phone"], "(408) 555-1212")
@inlineCallbacks
def test_setLocationAttributes(self):
@@ -176,20 +178,20 @@
record = directory.recordWithUID("836B1B66-2E9A-4F46-8B1C-3DD6772C20B2")
- self.assertEquals(record.extras['comment'], "Updated Test Comment")
- self.assertEquals(record.extras['building'], "Updated Test Building")
- self.assertEquals(record.extras['floor'], "Second")
- self.assertEquals(record.extras['capacity'], "41")
- self.assertEquals(record.extras['street'], "2 Infinite Loop")
- self.assertEquals(record.extras['city'], "Updated Cupertino")
- self.assertEquals(record.extras['state'], "Updated CA")
- self.assertEquals(record.extras['zip'], "95015")
- self.assertEquals(record.extras['country'], "Updated USA")
- self.assertEquals(record.extras['phone'], "(408) 555-1213")
+ self.assertEquals(record.extras["comment"], "Updated Test Comment")
+ self.assertEquals(record.extras["building"], "Updated Test Building")
+ self.assertEquals(record.extras["floor"], "Second")
+ self.assertEquals(record.extras["capacity"], "41")
+ self.assertEquals(record.extras["street"], "2 Infinite Loop")
+ self.assertEquals(record.extras["city"], "Updated Cupertino")
+ self.assertEquals(record.extras["state"], "Updated CA")
+ self.assertEquals(record.extras["zip"], "95015")
+ self.assertEquals(record.extras["country"], "Updated USA")
+ self.assertEquals(record.extras["phone"], "(408) 555-1213")
self.assertEquals(record.autoSchedule, True)
results = yield self.runCommand(command_getLocationAttributes)
- self.assertEquals(results['result']['AutoSchedule'], True)
+ self.assertEquals(results["result"]["AutoSchedule"], True)
@inlineCallbacks
@@ -248,13 +250,13 @@
@inlineCallbacks
def test_addWriteProxy(self):
results = yield self.runCommand(command_addWriteProxy)
- self.assertEquals(len(results['result']['Proxies']), 1)
+ self.assertEquals(len(results["result"]["Proxies"]), 1)
@inlineCallbacks
def test_removeWriteProxy(self):
results = yield self.runCommand(command_addWriteProxy)
results = yield self.runCommand(command_removeWriteProxy)
- self.assertEquals(len(results['result']['Proxies']), 0)
+ self.assertEquals(len(results["result"]["Proxies"]), 0)
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/test_principals.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/test_principals.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/test_principals.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -36,7 +36,7 @@
templateFile.close()
newConfig = template % {
- 'ServerRoot' : os.path.abspath(config.ServerRoot),
+ "ServerRoot" : os.path.abspath(config.ServerRoot),
}
configFilePath = FilePath(os.path.join(config.ConfigRoot, "caldavd.plist"))
configFilePath.setContent(newConfig)
@@ -44,6 +44,8 @@
self.configFileName = configFilePath.path
config.load(self.configFileName)
+ os.makedirs(config.DataRoot)
+
origUsersFile = FilePath(os.path.join(os.path.dirname(__file__),
"principals", "users-groups.xml"))
copyUsersFile = FilePath(os.path.join(config.DataRoot, "accounts.xml"))
@@ -98,6 +100,19 @@
self.assertTrue("resources" in results)
@inlineCallbacks
+ def test_listPrincipals(self):
+ results = yield self.runCommand("--list-principals=users")
+ for i in xrange(1, 10):
+ self.assertTrue("user%02d" % (i,) in results)
+
+ @inlineCallbacks
+ def test_search(self):
+ results = yield self.runCommand("--search=user")
+ self.assertTrue("10 matches found" in results)
+ for i in xrange(1, 10):
+ self.assertTrue("user%02d" % (i,) in results)
+
+ @inlineCallbacks
def test_modifyWriteProxies(self):
results = yield self.runCommand("--add-write-proxy=users:user01",
"locations:location01")
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/test_purge.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/test_purge.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/calendarserver/tools/test/test_purge.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -14,13 +14,21 @@
# limitations under the License.
##
+from calendarserver.tap.util import getRootResource
+from calendarserver.tools.principals import addProxy
+from calendarserver.tools.purge import purgeOldEvents, purgeGUID, purgeProxyAssignments
+from datetime import datetime, timedelta
+from twext.python.filepath import CachingFilePath as FilePath
+from twext.python.plistlib import readPlistFromString
+from twext.web2.dav import davxml
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, Deferred, returnValue
+from twistedcaldav.config import config
+from twistedcaldav.directory.directory import DirectoryRecord
+from twistedcaldav.test.util import TestCase, CapturingProcessProtocol
import os
+import xml
import zlib
-from twistedcaldav.config import config
-from twistedcaldav.test.util import TestCase
-from twisted.internet.defer import inlineCallbacks
-from calendarserver.tap.util import getRootResource
-from calendarserver.tools.purge import purgeOldEvents
resourceAttr = "WebDAV:{DAV:}resourcetype"
collectionType = zlib.compress("""<?xml version='1.0' encoding='UTF-8'?>
@@ -41,7 +49,7 @@
self.directory = self.rootResource.getDirectory()
@inlineCallbacks
- def test_purge(self):
+ def test_purgeOldEvents(self):
before = {
"calendars" : {
"__uids__" : {
@@ -83,32 +91,33 @@
self.assertEquals(count, 2)
after = {
- "calendars" : {
- "__uids__" : {
- "64" : {
- "23" : {
- "6423F94A-6B76-4A3A-815B-D52CFD77935D" : {
- "calendar": {
- ".db.sqlite": {
- "@contents" : None, # ignore contents
- },
- "endless.ics": {
- "@contents" : ENDLESS_ICS,
- },
- "straddling.ics": {
- "@contents" : STRADDLING_ICS,
- },
- "recent.ics": {
- "@contents" : RECENT_ICS,
- },
+ "__uids__" : {
+ "64" : {
+ "23" : {
+ "6423F94A-6B76-4A3A-815B-D52CFD77935D" : {
+ "calendar": {
+ ".db.sqlite": {
+ "@contents" : None, # ignore contents
},
+ "endless.ics": {
+ "@contents" : ENDLESS_ICS,
+ },
+ "straddling.ics": {
+ "@contents" : STRADDLING_ICS,
+ },
+ "recent.ics": {
+ "@contents" : RECENT_ICS,
+ },
},
},
},
},
},
}
- self.assertTrue(self.verifyHierarchy(config.DocumentRoot, after))
+ self.assertTrue(self.verifyHierarchy(
+ os.path.join(config.DocumentRoot, "calendars"),
+ after)
+ )
@@ -328,3 +337,751 @@
END:VCALENDAR
""".replace("\n", "\r\n")
+
+
+
+class DeprovisionTestCase(TestCase):
+
+ def setUp(self):
+ super(DeprovisionTestCase, self).setUp()
+
+ testRoot = os.path.join(os.path.dirname(__file__), "deprovision")
+ templateName = os.path.join(testRoot, "caldavd.plist")
+ templateFile = open(templateName)
+ template = templateFile.read()
+ templateFile.close()
+
+ newConfig = template % {
+ "ServerRoot" : os.path.abspath(config.ServerRoot),
+ }
+ configFilePath = FilePath(os.path.join(config.ConfigRoot, "caldavd.plist"))
+ configFilePath.setContent(newConfig)
+
+ self.configFileName = configFilePath.path
+ config.load(self.configFileName)
+
+ os.makedirs(config.DataRoot)
+ os.makedirs(config.DocumentRoot)
+
+ origUsersFile = FilePath(os.path.join(os.path.dirname(__file__),
+ "deprovision", "users-groups.xml"))
+ copyUsersFile = FilePath(os.path.join(config.DataRoot, "accounts.xml"))
+ origUsersFile.copyTo(copyUsersFile)
+
+ origResourcesFile = FilePath(os.path.join(os.path.dirname(__file__),
+ "deprovision", "resources-locations.xml"))
+ copyResourcesFile = FilePath(os.path.join(config.DataRoot, "resources.xml"))
+ origResourcesFile.copyTo(copyResourcesFile)
+
+ origAugmentFile = FilePath(os.path.join(os.path.dirname(__file__),
+ "deprovision", "augments.xml"))
+ copyAugmentFile = FilePath(os.path.join(config.DataRoot, "augments.xml"))
+ origAugmentFile.copyTo(copyAugmentFile)
+
+ self.rootResource = getRootResource(config)
+ self.directory = self.rootResource.getDirectory()
+
+ # Make sure trial puts the reactor in the right state, by letting it
+ # run one reactor iteration. (Ignore me, please.)
+ d = Deferred()
+ reactor.callLater(0, d.callback, True)
+ return d
+
+ @inlineCallbacks
+ def runCommand(self, command, error=False):
+ """
+ Run the given command by feeding it as standard input to
+ calendarserver_deprovision in a subprocess.
+ """
+ sourceRoot = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
+ python = os.path.join(sourceRoot, "python")
+ script = os.path.join(sourceRoot, "bin", "calendarserver_purge_guid")
+
+ args = [python, script, "-f", self.configFileName]
+ if error:
+ args.append("--error")
+
+ cwd = sourceRoot
+
+ deferred = Deferred()
+ reactor.spawnProcess(CapturingProcessProtocol(deferred, command), python, args, env=os.environ, path=cwd)
+ output = yield deferred
+ try:
+ plist = readPlistFromString(output)
+ except xml.parsers.expat.ExpatError, e:
+ print "Error (%s) parsing (%s)" % (e, output)
+ raise
+
+ returnValue(plist)
+
+
+ @inlineCallbacks
+ def test_purgeProxies(self):
+
+ # Set up fake user
+ purging = "5D6ABA3C-3446-4340-8083-7E37C5BC0B26"
+ record = DirectoryRecord(self.directory, "users", purging,
+ shortNames=(purging,), enabledForCalendaring=True)
+ record.enabled = True # Enabling might not be required here
+ self.directory._tmpRecords["shortNames"][purging] = record
+ self.directory._tmpRecords["guids"][purging] = record
+ pc = self.directory.principalCollection
+ purgingPrincipal = pc.principalForRecord(record)
+
+ keeping = "291C2C29-B663-4342-8EA1-A055E6A04D65"
+ keepingPrincipal = pc.principalForUID(keeping)
+
+ def getProxies(principal, proxyType):
+ subPrincipal = principal.getChild("calendar-proxy-" + proxyType)
+ return subPrincipal.readProperty(davxml.GroupMemberSet, None)
+
+ # Add purgingPrincipal as a proxy for keepingPrincipal
+ (yield addProxy(keepingPrincipal, "write", purgingPrincipal))
+
+ # Add keepingPrincipal as a proxy for purgingPrincipal
+ (yield addProxy(purgingPrincipal, "write", keepingPrincipal))
+
+ # Verify the proxy assignments
+ membersProperty = (yield getProxies(keepingPrincipal, "write"))
+ self.assertEquals(len(membersProperty.children), 1)
+ self.assertEquals(membersProperty.children[0],
+ "/principals/__uids__/5D6ABA3C-3446-4340-8083-7E37C5BC0B26/")
+ membersProperty = (yield getProxies(keepingPrincipal, "read"))
+ self.assertEquals(len(membersProperty.children), 0)
+
+ membersProperty = (yield getProxies(purgingPrincipal, "write"))
+ self.assertEquals(len(membersProperty.children), 1)
+ self.assertEquals(membersProperty.children[0],
+ "/principals/__uids__/291C2C29-B663-4342-8EA1-A055E6A04D65/")
+ membersProperty = (yield getProxies(purgingPrincipal, "read"))
+ self.assertEquals(len(membersProperty.children), 0)
+
+ # Purging the guid should clear out proxy assignments
+
+ assignments = (yield purgeProxyAssignments(purgingPrincipal))
+ self.assertTrue(("5D6ABA3C-3446-4340-8083-7E37C5BC0B26", "write", "291C2C29-B663-4342-8EA1-A055E6A04D65") in assignments)
+ self.assertTrue(("291C2C29-B663-4342-8EA1-A055E6A04D65", "write", "5D6ABA3C-3446-4340-8083-7E37C5BC0B26") in assignments)
+
+ membersProperty = (yield getProxies(keepingPrincipal, "write"))
+ self.assertEquals(len(membersProperty.children), 0)
+ membersProperty = (yield getProxies(purgingPrincipal, "write"))
+ self.assertEquals(len(membersProperty.children), 0)
+
+ @inlineCallbacks
+ def test_purgeExistingGUID(self):
+
+ # Deprovisioned user is E9E78C86-4829-4520-A35D-70DDADAB2092
+ # Keeper user is 291C2C29-B663-4342-8EA1-A055E6A04D65
+
+ before = {
+ "calendars" : {
+ "__uids__" : {
+ "E9" : {
+ "E7" : {
+ "E9E78C86-4829-4520-A35D-70DDADAB2092" : {
+ "calendar": {
+ "@xattrs" :
+ {
+ resourceAttr : collectionType,
+ },
+ "noninvite.ics": {
+ "@contents" : NON_INVITE_ICS,
+ },
+ "organizer.ics": {
+ "@contents" : ORGANIZER_ICS,
+ },
+ "attendee.ics": {
+ "@contents" : ATTENDEE_ICS,
+ },
+ },
+ },
+ },
+ },
+ "29" : {
+ "1C" : {
+ "291C2C29-B663-4342-8EA1-A055E6A04D65" : {
+ "calendar": {
+ "@xattrs" :
+ {
+ resourceAttr : collectionType,
+ },
+ "organizer.ics": {
+ "@contents" : ORGANIZER_ICS,
+ },
+ "attendee.ics": {
+ "@contents" : ATTENDEE_ICS,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ self.createHierarchy(before, config.DocumentRoot)
+ count, assignments = (yield purgeGUID(
+ "E9E78C86-4829-4520-A35D-70DDADAB2092",
+ self.directory, self.rootResource))
+
+ self.assertEquals(count, 2)
+
+ after = {
+ "__uids__" : {
+ "E9" : {
+ "E7" : {
+ "E9E78C86-4829-4520-A35D-70DDADAB2092" : {
+ "calendar": {
+ ".db.sqlite": {
+ "@contents" : None, # ignore contents
+ },
+ "noninvite.ics": {
+ "@contents" : NON_INVITE_ICS,
+ },
+ },
+ },
+ },
+ },
+ "29" : {
+ "1C" : {
+ "291C2C29-B663-4342-8EA1-A055E6A04D65" : {
+ "inbox": {
+ ".db.sqlite": {
+ "@contents" : None, # ignore contents
+ },
+ "*.ics/UID:7ED97931-9A19-4596-9D4D-52B36D6AB803": {
+ "@contents" : (
+ "METHOD:CANCEL",
+ ),
+ },
+ "*.ics/UID:1974603C-B2C0-4623-92A0-2436DEAB07EF": {
+ "@contents" : (
+ "METHOD:REPLY",
+ "ATTENDEE;CN=Deprovisioned User;CUTYPE=INDIVIDUAL;PARTSTAT=DECLINED:urn:uui\r\n d:E9E78C86-4829-4520-A35D-70DDADAB2092",
+ ),
+ },
+ },
+ "calendar": {
+ ".db.sqlite": {
+ "@contents" : None, # ignore contents
+ },
+ "organizer.ics": {
+ "@contents" : (
+ "STATUS:CANCELLED",
+ ),
+ },
+ "attendee.ics": {
+ "@contents" : (
+ "ATTENDEE;CN=Deprovisioned User;CUTYPE=INDIVIDUAL;PARTSTAT=DECLINED;SCHEDUL\r\n E-STATUS=2.0:urn:uuid:E9E78C86-4829-4520-A35D-70DDADAB2092",
+ ),
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ self.assertTrue(self.verifyHierarchy(
+ os.path.join(config.DocumentRoot, "calendars"),
+ after)
+ )
+
+
+ @inlineCallbacks
+ def test_purgeNonExistentGUID(self):
+
+ before = {
+ "calendars" : {
+ "__uids__" : {
+ "1C" : {
+ "B4" : {
+ "1CB4378B-DD76-462D-B4D4-BD131FE89243" : {
+ "calendar": {
+ "@xattrs" :
+ {
+ resourceAttr : collectionType,
+ },
+ # non-repeating, non-invite, in the past
+ # = untouched
+ "noninvite_past.ics": {
+ "@contents" : NON_INVITE_PAST_ICS,
+ },
+ # non-repeating, non-invite, in the future
+ # = removed
+ "noninvite_future.ics": {
+ "@contents" : NON_INVITE_FUTURE_ICS,
+ },
+ "organizer.ics": {
+ "@contents" : ORGANIZER_ICS_2,
+ },
+ "attendee.ics": {
+ "@contents" : ATTENDEE_ICS_2,
+ },
+ "repeating_organizer.ics": {
+ "@contents" : REPEATING_ORGANIZER_ICS,
+ },
+ },
+ },
+ },
+ },
+ "29" : {
+ "1C" : {
+ "291C2C29-B663-4342-8EA1-A055E6A04D65" : {
+ "calendar": {
+ "@xattrs" :
+ {
+ resourceAttr : collectionType,
+ },
+ "organizer.ics": {
+ "@contents" : ORGANIZER_ICS_2,
+ },
+ "attendee.ics": {
+ "@contents" : ATTENDEE_ICS_2,
+ },
+ "repeating_organizer.ics": {
+ "@contents" : REPEATING_ORGANIZER_ICS,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ self.createHierarchy(before, config.DocumentRoot)
+ count, assignments = (yield purgeGUID(
+ "1CB4378B-DD76-462D-B4D4-BD131FE89243",
+ self.directory, self.rootResource))
+
+ self.assertEquals(count, 4)
+
+ after = {
+ "__uids__" : {
+ "1C" : {
+ "B4" : {
+ "1CB4378B-DD76-462D-B4D4-BD131FE89243" : {
+ "calendar": {
+ ".db.sqlite": {
+ "@contents" : None, # ignore contents
+ },
+ "noninvite_past.ics": {
+ "@contents" : NON_INVITE_PAST_ICS,
+ },
+ },
+ },
+ },
+ },
+ "29" : {
+ "1C" : {
+ "291C2C29-B663-4342-8EA1-A055E6A04D65" : {
+ "inbox": {
+ ".db.sqlite": {
+ "@contents" : None, # ignore contents
+ },
+ "*.ics/UID:7ED97931-9A19-4596-9D4D-52B36D6AB803": {
+ "@contents" : (
+ "METHOD:CANCEL",
+ ),
+ },
+ "*.ics/UID:1974603C-B2C0-4623-92A0-2436DEAB07EF": {
+ "@contents" : (
+ "METHOD:REPLY",
+ "ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=DECLINED:urn:uuid:1CB4378B-DD76-462D-B\r\n 4D4-BD131FE89243",
+ ),
+ },
+ "*.ics/UID:8ED97931-9A19-4596-9D4D-52B36D6AB803": {
+ "@contents" : (
+ "METHOD:CANCEL",
+ ),
+ },
+ },
+ "calendar": {
+ ".db.sqlite": {
+ "@contents" : None, # ignore contents
+ },
+ "organizer.ics": {
+ "@contents" : (
+ "STATUS:CANCELLED",
+ ),
+ },
+ "attendee.ics": {
+ "@contents" : (
+ "ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=DECLINED;SCHEDULE-STATUS=2.0:urn:uuid:\r\n 1CB4378B-DD76-462D-B4D4-BD131FE89243",
+ ),
+ },
+ "repeating_organizer.ics": {
+ "@contents" : (
+ "STATUS:CANCELLED",
+ ),
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ self.assertTrue(self.verifyHierarchy(
+ os.path.join(config.DocumentRoot, "calendars"),
+ after)
+ )
+
+
+
+ @inlineCallbacks
+ def test_purgeMultipleNonExistentGUIDs(self):
+
+ before = {
+ "calendars" : {
+ "__uids__" : {
+ "76" : { # Non-existent
+ "7F" : {
+ "767F9EB0-8A58-4F61-8163-4BE0BB72B873" : {
+ "calendar": {
+ "@xattrs" :
+ {
+ resourceAttr : collectionType,
+ },
+ "noninvite.ics": {
+ "@contents" : NON_INVITE_ICS_3,
+ },
+ "organizer.ics": {
+ "@contents" : ORGANIZER_ICS_3,
+ },
+ "attendee.ics": {
+ "@contents" : ATTENDEE_ICS_3,
+ },
+ "attendee2.ics": {
+ "@contents" : ATTENDEE_ICS_4,
+ },
+ },
+ },
+ },
+ },
+ "42" : { # Non-existent
+ "EB" : {
+ "42EB074A-F859-4E8F-A4D0-7F0ADCB73D87" : {
+ "calendar": {
+ "@xattrs" :
+ {
+ resourceAttr : collectionType,
+ },
+ "organizer.ics": {
+ "@contents" : ORGANIZER_ICS_3,
+ },
+ "attendee.ics": {
+ "@contents" : ATTENDEE_ICS_3,
+ },
+ "attendee2.ics": {
+ "@contents" : ATTENDEE_ICS_4,
+ },
+ },
+ },
+ },
+ },
+ "29" : { # Existing
+ "1C" : {
+ "291C2C29-B663-4342-8EA1-A055E6A04D65" : {
+ "calendar": {
+ "@xattrs" :
+ {
+ resourceAttr : collectionType,
+ },
+ "organizer.ics": {
+ "@contents" : ORGANIZER_ICS_3,
+ },
+ "attendee.ics": {
+ "@contents" : ATTENDEE_ICS_3,
+ },
+ "attendee2.ics": {
+ "@contents" : ATTENDEE_ICS_4,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ self.createHierarchy(before, config.DocumentRoot)
+ count, assignments = (yield purgeGUID(
+ "767F9EB0-8A58-4F61-8163-4BE0BB72B873",
+ self.directory, self.rootResource))
+
+ self.assertEquals(count, 3)
+
+ after = {
+ "__uids__" : {
+ "76" : { # Non-existent
+ "7F" : {
+ "767F9EB0-8A58-4F61-8163-4BE0BB72B873" : {
+ "calendar": {
+ ".db.sqlite": {
+ "@contents" : None, # ignore contents
+ },
+ "noninvite.ics": { # event in the past
+ "@contents" : NON_INVITE_ICS_3,
+ },
+ },
+ },
+ },
+ },
+ "42" : { # Non-existent -- untouched
+ "EB" : {
+ "42EB074A-F859-4E8F-A4D0-7F0ADCB73D87" : {
+ "calendar": {
+ "@xattrs" :
+ {
+ resourceAttr : collectionType,
+ },
+ "organizer.ics": {
+ "@contents" : ORGANIZER_ICS_3,
+ },
+ "attendee.ics": {
+ "@contents" : ATTENDEE_ICS_3,
+ },
+ "attendee2.ics": {
+ "@contents" : ATTENDEE_ICS_4,
+ },
+ },
+ },
+ },
+ },
+ "29" : {
+ "1C" : {
+ "291C2C29-B663-4342-8EA1-A055E6A04D65" : {
+ "inbox": {
+ ".db.sqlite": {
+ "@contents" : None, # ignore contents
+ },
+ "*.ics/UID:7ED97931-9A19-4596-9D4D-52B36D6AB803": {
+ "@contents" : (
+ "METHOD:CANCEL",
+ ),
+ },
+ "*.ics/UID:79F26B10-6ECE-465E-9478-53F2A9FCAFEE": {
+ "@contents" : (
+ "METHOD:REPLY",
+ "ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=DECLINED:urn:uuid:767F9EB0-8A58-4F61-8\r\n 163-4BE0BB72B873",
+ ),
+ },
+ },
+ "calendar": {
+ ".db.sqlite": {
+ "@contents" : None, # ignore contents
+ },
+ "organizer.ics": {
+ # Purging non-existent organizer; has non-existent
+ # and existent attendees
+ "@contents" : (
+ "STATUS:CANCELLED",
+ ),
+ },
+ "attendee.ics": {
+ # (Note: implicit scheduler doesn't update this)
+ # Purging non-existent attendee; has non-existent
+ # organizer and existent attendee
+ "@contents" : ATTENDEE_ICS_3,
+ },
+ "attendee2.ics": {
+ # Purging non-existent attendee; has non-existent
+ # attendee and existent organizer
+ "@contents" : (
+ "ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=DECLINED;SCHEDULE-STATUS=2.0:urn:uuid:\r\n 767F9EB0-8A58-4F61-8163-4BE0BB72B873",
+ )
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ self.assertTrue(self.verifyHierarchy(
+ os.path.join(config.DocumentRoot, "calendars"),
+ after)
+ )
+
+
+future = (datetime.utcnow() + timedelta(days=1)).strftime("%Y%m%dT%H%M%SZ")
+past = (datetime.utcnow() - timedelta(days=1)).strftime("%Y%m%dT%H%M%SZ")
+
+# For test_purgeExistingGUID
+
+# No organizer/attendee
+NON_INVITE_ICS = """BEGIN:VCALENDAR
+VERSION:2.0
+BEGIN:VEVENT
+UID:151AFC76-6036-40EF-952B-97D1840760BF
+SUMMARY:Non Invitation
+DTSTART:%s
+DURATION:PT1H
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n") % (past,)
+
+# Purging existing organizer; has existing attendee
+ORGANIZER_ICS = """BEGIN:VCALENDAR
+VERSION:2.0
+BEGIN:VEVENT
+UID:7ED97931-9A19-4596-9D4D-52B36D6AB803
+SUMMARY:Organizer
+DTSTART:%s
+DURATION:PT1H
+ORGANIZER:urn:uuid:E9E78C86-4829-4520-A35D-70DDADAB2092
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:E9E78C86-4829-4520-A35D-70DDADAB2092
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n") % (future,)
+
+# Purging existing attendee; has existing organizer
+ATTENDEE_ICS = """BEGIN:VCALENDAR
+VERSION:2.0
+BEGIN:VEVENT
+UID:1974603C-B2C0-4623-92A0-2436DEAB07EF
+SUMMARY:Attendee
+DTSTART:%s
+DURATION:PT1H
+ORGANIZER:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:E9E78C86-4829-4520-A35D-70DDADAB2092
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n") % (future,)
+
+
+# For test_purgeNonExistentGUID
+
+# No organizer/attendee, in the past
+NON_INVITE_PAST_ICS = """BEGIN:VCALENDAR
+VERSION:2.0
+BEGIN:VEVENT
+UID:151AFC76-6036-40EF-952B-97D1840760BF
+SUMMARY:Non Invitation
+DTSTART:%s
+DURATION:PT1H
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n") % (past,)
+
+# No organizer/attendee, in the future
+NON_INVITE_FUTURE_ICS = """BEGIN:VCALENDAR
+VERSION:2.0
+BEGIN:VEVENT
+UID:251AFC76-6036-40EF-952B-97D1840760BF
+SUMMARY:Non Invitation
+DTSTART:%s
+DURATION:PT1H
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n") % (future,)
+
+
+# Purging non-existent organizer; has existing attendee
+ORGANIZER_ICS_2 = """BEGIN:VCALENDAR
+VERSION:2.0
+BEGIN:VEVENT
+UID:7ED97931-9A19-4596-9D4D-52B36D6AB803
+SUMMARY:Organizer
+DTSTART:%s
+DURATION:PT1H
+ORGANIZER:urn:uuid:1CB4378B-DD76-462D-B4D4-BD131FE89243
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:1CB4378B-DD76-462D-B4D4-BD131FE89243
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n") % (future,)
+
+# Purging non-existent attendee; has existing organizer
+ATTENDEE_ICS_2 = """BEGIN:VCALENDAR
+VERSION:2.0
+BEGIN:VEVENT
+UID:1974603C-B2C0-4623-92A0-2436DEAB07EF
+SUMMARY:Attendee
+DTSTART:%s
+DURATION:PT1H
+ORGANIZER:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:1CB4378B-DD76-462D-B4D4-BD131FE89243
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n") % (future,)
+
+# Purging non-existent organizer; has existing attendee; repeating
+REPEATING_ORGANIZER_ICS = """BEGIN:VCALENDAR
+VERSION:2.0
+BEGIN:VEVENT
+UID:8ED97931-9A19-4596-9D4D-52B36D6AB803
+SUMMARY:Repeating Organizer
+DTSTART:%s
+DURATION:PT1H
+RRULE:FREQ=DAILY;COUNT=400
+ORGANIZER:urn:uuid:1CB4378B-DD76-462D-B4D4-BD131FE89243
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:1CB4378B-DD76-462D-B4D4-BD131FE89243
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n") % (past,)
+
+
+# For test_purgeMultipleNonExistentGUIDs
+
+# No organizer/attendee
+NON_INVITE_ICS_3 = """BEGIN:VCALENDAR
+VERSION:2.0
+BEGIN:VEVENT
+UID:151AFC76-6036-40EF-952B-97D1840760BF
+SUMMARY:Non Invitation
+DTSTART:%s
+DURATION:PT1H
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n") % (past,)
+
+# Purging non-existent organizer; has non-existent and existent attendees
+ORGANIZER_ICS_3 = """BEGIN:VCALENDAR
+VERSION:2.0
+BEGIN:VEVENT
+UID:7ED97931-9A19-4596-9D4D-52B36D6AB803
+SUMMARY:Organizer
+DTSTART:%s
+DURATION:PT1H
+ORGANIZER:urn:uuid:767F9EB0-8A58-4F61-8163-4BE0BB72B873
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:767F9EB0-8A58-4F61-8163-4BE0BB72B873
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:42EB074A-F859-4E8F-A4D0-7F0ADCB73D87
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n") % (future,)
+
+# Purging non-existent attendee; has non-existent organizer and existent attendee
+# (Note: Implicit scheduling doesn't update this at all for the existing attendee)
+ATTENDEE_ICS_3 = """BEGIN:VCALENDAR
+VERSION:2.0
+BEGIN:VEVENT
+UID:1974603C-B2C0-4623-92A0-2436DEAB07EF
+SUMMARY:Attendee
+DTSTART:%s
+DURATION:PT1H
+ORGANIZER:urn:uuid:42EB074A-F859-4E8F-A4D0-7F0ADCB73D87
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:767F9EB0-8A58-4F61-8163-4BE0BB72B873
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:42EB074A-F859-4E8F-A4D0-7F0ADCB73D87
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n") % (future,)
+
+# Purging non-existent attendee; has non-existent attendee and existent organizer
+ATTENDEE_ICS_4 = """BEGIN:VCALENDAR
+VERSION:2.0
+BEGIN:VEVENT
+UID:79F26B10-6ECE-465E-9478-53F2A9FCAFEE
+SUMMARY:2 non-existent attendees
+DTSTART:%s
+DURATION:PT1H
+ORGANIZER:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:767F9EB0-8A58-4F61-8163-4BE0BB72B873
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:42EB074A-F859-4E8F-A4D0-7F0ADCB73D87
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n") % (future,)
+
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/conf/caldavd-test.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/conf/caldavd-test.plist 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/conf/caldavd-test.plist 2010-04-07 19:27:31 UTC (rev 5439)
@@ -727,11 +727,7 @@
</array>
</dict>
- <!-- Response Caching -->
- <key>ResponseCacheTimeout</key>
- <integer>30</integer> <!-- in minutes -->
-
<!--
Twisted
-->
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/conf/carddavd-test.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/conf/carddavd-test.plist 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/conf/carddavd-test.plist 2010-04-07 19:27:31 UTC (rev 5439)
@@ -721,11 +721,7 @@
</array>
</dict>
- <!-- Response Caching -->
- <key>ResponseCacheTimeout</key>
- <integer>30</integer> <!-- in minutes -->
-
<!--
Twisted
-->
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/conf/resources/caldavd-resources.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/conf/resources/caldavd-resources.plist 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/conf/resources/caldavd-resources.plist 2010-04-07 19:27:31 UTC (rev 5439)
@@ -718,11 +718,7 @@
</array>
</dict>
- <!-- Response Caching -->
- <key>ResponseCacheTimeout</key>
- <integer>30</integer> <!-- in minutes -->
-
<!--
Twisted
-->
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/contrib/launchd/addressbookserver.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/contrib/launchd/addressbookserver.plist 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/contrib/launchd/addressbookserver.plist 2010-04-07 19:27:31 UTC (rev 5439)
@@ -42,13 +42,7 @@
<key>ThrottleInterval</key>
<integer>60</integer>
-
- <key>StandardOutPath</key>
- <string>/var/log/carddavd/error.log</string>
- <key>StandardErrorPath</key>
- <string>/var/log/carddavd/error.log</string>
-
<key>HardResourceLimits</key>
<dict>
<key>NumberOfFiles</key>
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/contrib/launchd/calendarserver.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/contrib/launchd/calendarserver.plist 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/contrib/launchd/calendarserver.plist 2010-04-07 19:27:31 UTC (rev 5439)
@@ -42,13 +42,7 @@
<key>ThrottleInterval</key>
<integer>60</integer>
-
- <key>StandardOutPath</key>
- <string>/var/log/caldavd/error.log</string>
- <key>StandardErrorPath</key>
- <string>/var/log/caldavd/error.log</string>
-
<key>HardResourceLimits</key>
<dict>
<key>NumberOfFiles</key>
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/contrib/migration/59_calendarmigrator.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/contrib/migration/59_calendarmigrator.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/contrib/migration/59_calendarmigrator.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -26,7 +26,7 @@
import shutil
import sys
-from twext.python.plistlib import readPlist, writePlist
+from plistlib import readPlist, writePlist
LAUNCHD_KEY = "org.calendarserver.calendarserver"
LOG = "/Library/Logs/Migration/calendarmigrator.log"
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/doc/caldavd.8
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/doc/caldavd.8 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/doc/caldavd.8 2010-04-07 19:27:31 UTC (rev 5439)
@@ -23,6 +23,7 @@
.Sh SYNOPSIS
.Nm
.Op Fl hX
+.Op Fl hL
.Op Fl u Ar username
.Op Fl g Ar groupname
.Op Fl T Ar twistd
@@ -41,6 +42,8 @@
Displays usage information
.It Fl X
Starts the server but does not daemonize it.
+.It Fl L
+Sends error logging output to stdout rather than the file specified in caldavd.plist.
.It Fl u Ar username
Drops privileges to the given username.
.It Fl g Ar groupname
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/doc/calendarserver_manage_principals.8
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/doc/calendarserver_manage_principals.8 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/doc/calendarserver_manage_principals.8 2010-04-07 19:27:31 UTC (rev 5439)
@@ -24,6 +24,9 @@
.Nm
.Op Fl -config Ar file
.Op Fl -read-property Ar property
+.Op Fl -list-principal-types
+.Op Fl -list-principals Ar type
+.Op Fl -search Ar search-string
.Op Fl -list-read-proxies
.Op Fl -list-write-proxies
.Op Fl -list-proxies
@@ -83,6 +86,12 @@
.Ar {namespace}name ,
for example:
.Ar {DAV:}displayname .
+.It Fl -list-principal-types
+List all of the known principals types.
+.It Fl -list-principals Ar type
+List all of the principals of the given type.
+.It Fl -search Ar search-string
+Search for principals whose name or email address contains the search string.
.It Fl -list-read-proxies
List the read proxies.
.It Fl -list-write-proxies
@@ -119,6 +128,10 @@
.Pp
.Dl "calendarserver_manage_principals --set-auto-schedule true --get-auto-schedule resources:projector"
.Pp
+Search for all principals named Joe:
+.Pp
+.Dl "calendarserver_manage_principals --search joe"
+.Pp
.Sh FILES
.Bl -tag -width flag
.It /etc/caldavd/caldavd.plist
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/doc/calendarserver_purge_principals.8 (from rev 5438, CalendarServer/trunk/doc/calendarserver_purge_principals.8)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/doc/calendarserver_purge_principals.8 (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/doc/calendarserver_purge_principals.8 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,56 @@
+.\"
+.\" Copyright (c) 2006-2010 Apple Inc. All rights reserved.
+.\"
+.\" Licensed under the Apache License, Version 2.0 (the "License");
+.\" you may not use this file except in compliance with the License.
+.\" You may obtain a copy of the License at
+.\"
+.\" http://www.apache.org/licenses/LICENSE-2.0
+.\"
+.\" Unless required by applicable law or agreed to in writing, software
+.\" distributed under the License is distributed on an "AS IS" BASIS,
+.\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+.\" See the License for the specific language governing permissions and
+.\" limitations under the License.
+.\"
+.\" The following requests are required for all man pages.
+.Dd June 17, 2009
+.Dt CALENDARSERVER_PURGE_PRINCIPALS 8
+.Os
+.Sh NAME
+.Nm calendarserver_purge_principals
+.Nd Darwin Calendar Server deprovisioned user clean-up utility
+.Sh SYNOPSIS
+.Nm
+.Op Fl -config Ar file
+.Op Fl -dry-run
+.Op Fl -verbose
+.Op Fl -help
+.Ar guid
+.Op guid ...
+.Sh DESCRIPTION
+.Nm
+is a tool for removing one or more principals' future events and proxy assignments from the calendar server. Events in the past are retained, but any ongoing events are canceled.
+.Pp
+.Nm
+should be run as a user with the same priviledges as the Calendar
+Server itself, as it needs to read and write data that belongs to the
+server.
+.Sh OPTIONS
+.Bl -tag -width flag
+.It Fl h, -help
+Display usage information
+.It Fl f, -config Ar FILE
+Use the Calendar Server configuration specified in the given file. Defaults to /etc/caldavd/caldavd.plist.
+.It Fl n, -dry-run
+Calculate and display how many events would be removed, but don't actually remove them.
+.It Fl v, -verbose
+Print progress information.
+.El
+.Sh FILES
+.Bl -tag -width flag
+.It /etc/caldavd/caldavd.plist
+The Calendar Server configuration file.
+.El
+.Sh SEE ALSO
+.Xr caldavd 8
Deleted: CalendarServer/branches/users/cdaboo/shared-calendars-5187/kqreactor.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/kqreactor.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/kqreactor.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,228 +0,0 @@
-# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-
-"""
-A kqueue()/kevent() based implementation of the Twisted main loop.
-
-To install the event loop (and you should do this before any connections,
-listeners or connectors are added)::
-
- | from twisted.internet import kqreactor
- | kqreactor.install()
-
-
-Maintainer: U{Itamar Shtull-Trauring<mailto:twisted at itamarst.org>}
-"""
-
-
-import errno, sys
-
-try:
- from select import KQ_FILTER_READ, KQ_FILTER_WRITE, KQ_EV_DELETE, KQ_EV_ADD
- from select import kqueue, kevent, KQ_EV_ENABLE, KQ_EV_DISABLE, KQ_EV_EOF
-except ImportError:
- from select26 import KQ_FILTER_READ, KQ_FILTER_WRITE, KQ_EV_DELETE, KQ_EV_ADD
- from select26 import kqueue, kevent, KQ_EV_ENABLE, KQ_EV_DISABLE, KQ_EV_EOF
-
-from zope.interface import implements
-
-from twisted.python import log, failure
-from twisted.internet import main, posixbase
-from twisted.internet.interfaces import IReactorFDSet
-
-
-
-class KQueueReactor(posixbase.PosixReactorBase):
- """
- A reactor that uses kqueue(2)/kevent(2).
-
- @ivar _kq: A L{kqueue} which will be used to check for I/O readiness.
-
- @ivar _selectables: A dictionary mapping integer file descriptors to
- instances of L{FileDescriptor} which have been registered with the
- reactor. All L{FileDescriptors} which are currently receiving read or
- write readiness notifications will be present as values in this
- dictionary.
-
- @ivar _reads: A set storing integer file descriptors. These values will be
- registered with C{_kq} for read readiness notifications which will be
- dispatched to the corresponding L{FileDescriptor} instances in
- C{_selectables}.
-
- @ivar _writes: A set storing integer file descriptors. These values will
- be registered with C{_kq} for write readiness notifications which will
- be dispatched to the corresponding L{FileDescriptor} instances in
- C{_selectables}.
- """
- implements(IReactorFDSet)
-
- def __init__(self):
- """
- Initialize kqueue object, file descriptor tracking sets, and the base
- class.
- """
- self._kq = kqueue()
- self._reads = set()
- self._writes = set()
- self._selectables = {}
- posixbase.PosixReactorBase.__init__(self)
-
-
- def _updateRegistration(self, fd, filter, flags):
- ev = kevent(fd, filter, flags)
- self._kq.control([ev], 0, 0)
-
-
- def addReader(self, reader):
- """
- Add a FileDescriptor for notification of data available to read.
- """
- fd = reader.fileno()
- if fd not in self._reads:
- if fd not in self._selectables:
- self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_ADD|KQ_EV_ENABLE)
- self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_ADD|KQ_EV_DISABLE)
- self._selectables[fd] = reader
- else:
- self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_ENABLE)
- self._reads.add(fd)
-
-
- def addWriter(self, writer):
- """
- Add a FileDescriptor for notification of data available to write.
- """
- fd = writer.fileno()
- if fd not in self._writes:
- if fd not in self._selectables:
- self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_ADD|KQ_EV_ENABLE)
- self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_ADD|KQ_EV_DISABLE)
- self._selectables[fd] = writer
- else:
- self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_ENABLE)
- self._writes.add(fd)
-
-
- def removeReader(self, reader):
- """
- Remove a Selectable for notification of data available to read.
- """
- fd = reader.fileno()
- if fd == -1:
- for fd, fdes in self._selectables.iteritems():
- if reader is fdes:
- break
- else:
- return
- if fd in self._reads:
- self._reads.discard(fd)
- if fd not in self._writes:
- del self._selectables[fd]
- self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_DISABLE)
-
-
- def removeWriter(self, writer):
- """
- Remove a Selectable for notification of data available to write.
- """
- fd = writer.fileno()
- if fd == -1:
- for fd, fdes in self._selectables.iteritems():
- if writer is fdes:
- break
- else:
- return
- if fd in self._writes:
- self._writes.discard(fd)
- if fd not in self._reads:
- del self._selectables[fd]
- self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_DISABLE)
-
-
- def removeAll(self):
- """
- Remove all selectables, and return a list of them.
- """
- if self.waker is not None:
- self.removeReader(self.waker)
- result = self._selectables.values()
- for fd in self._reads:
- self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_DELETE)
- for fd in self._writes:
- self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_DELETE)
- self._reads.clear()
- self._writes.clear()
- self._selectables.clear()
- if self.waker is not None:
- self.addReader(self.waker)
- return result
-
-
- def getReaders(self):
- return [self._selectables[fd] for fd in self._reads]
-
-
- def getWriters(self):
- return [self._selectables[fd] for fd in self._writes]
-
-
- def doKEvent(self, timeout):
- """
- Poll the kqueue for new events.
- """
- if timeout is None:
- timeout = 1
-
- try:
- l = self._kq.control([], len(self._selectables), timeout)
- except OSError, e:
- if e[0] == errno.EINTR:
- return
- else:
- raise
- _drdw = self._doWriteOrRead
- for event in l:
- fd = event.ident
- try:
- selectable = self._selectables[fd]
- except KeyError:
- # Handles the infrequent case where one selectable's
- # handler disconnects another.
- continue
- log.callWithLogger(selectable, _drdw, selectable, fd, event)
-
-
- def _doWriteOrRead(self, selectable, fd, event):
- why = None
- inRead = False
- filter, flags, data, fflags = event.filter, event.flags, event.data, event.fflags
- if flags & KQ_EV_EOF and data and fflags:
- why = main.CONNECTION_LOST
- else:
- try:
- if filter == KQ_FILTER_READ:
- inRead = True
- why = selectable.doRead()
- if filter == KQ_FILTER_WRITE:
- inRead = False
- why = selectable.doWrite()
- if not selectable.fileno() == fd:
- inRead = False
- why = main.CONNECTION_LOST
- except:
- log.err()
- why = sys.exc_info()[1]
-
- if why:
- self._disconnectSelectable(selectable, why, inRead)
-
- doIteration = doKEvent
-
-
-def install():
- k = KQueueReactor()
- main.installReactor(k)
-
-
-__all__ = ["KQueueReactor", "install"]
Deleted: CalendarServer/branches/users/cdaboo/shared-calendars-5187/memcacheclient.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/memcacheclient.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/memcacheclient.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,1457 +0,0 @@
-#!/usr/bin/env python
-
-"""
-client module for memcached (memory cache daemon)
-
-Overview
-========
-
-See U{the MemCached homepage<http://www.danga.com/memcached>} for more about memcached.
-
-Usage summary
-=============
-
-This should give you a feel for how this module operates::
-
- import memcacheclient
- mc = memcacheclient.Client(['127.0.0.1:11211'], debug=0)
-
- mc.set("some_key", "Some value")
- value = mc.get("some_key")
-
- mc.set("another_key", 3)
- mc.delete("another_key")
-
- mc.set("key", "1") # note that the key used for incr/decr must be a string.
- mc.incr("key")
- mc.decr("key")
-
-The standard way to use memcache with a database is like this::
-
- key = derive_key(obj)
- obj = mc.get(key)
- if not obj:
- obj = backend_api.get(...)
- mc.set(obj)
-
- # we now have obj, and future passes through this code
- # will use the object from the cache.
-
-Detailed Documentation
-======================
-
-More detailed documentation is available in the L{Client} class.
-"""
-
-import sys
-import socket
-import time
-import os
-import re
-import types
-
-from twext.python.log import Logger
-
-from twistedcaldav.config import config
-
-log = Logger()
-
-
-try:
- import cPickle as pickle
-except ImportError:
- import pickle
-
-try:
- from zlib import compress, decompress
- _supports_compress = True
-except ImportError:
- _supports_compress = False
- # quickly define a decompress just in case we recv compressed data.
- def decompress(val):
- raise _Error("received compressed data but I don't support compession (import error)")
-
-try:
- from cStringIO import StringIO
-except ImportError:
- from StringIO import StringIO
-
-from binascii import crc32 # zlib version is not cross-platform
-serverHashFunction = crc32
-
-__author__ = "Evan Martin <martine at danga.com>"
-__version__ = "1.44"
-__copyright__ = "Copyright (C) 2003 Danga Interactive"
-__license__ = "Python"
-
-SERVER_MAX_KEY_LENGTH = 250
-# Storing values larger than 1MB requires recompiling memcached. If you do,
-# this value can be changed by doing "memcacheclient.SERVER_MAX_VALUE_LENGTH = N"
-# after importing this module.
-SERVER_MAX_VALUE_LENGTH = 1024*1024
-
-class _Error(Exception):
- pass
-
-class MemcacheError(_Error):
- """
- Memcache connection error
- """
-
-class NotFoundError(MemcacheError):
- """
- NOT_FOUND error
- """
-
-class TokenMismatchError(MemcacheError):
- """
- Check-and-set token mismatch
- """
-
-try:
- # Only exists in Python 2.4+
- from threading import local
-except ImportError:
- # TODO: add the pure-python local implementation
- class local(object):
- pass
-
-class ClientFactory(object):
-
- # unit tests should set this to True to enable the fake test cache
- allowTestCache = False
-
- @classmethod
- def getClient(cls, servers, debug=0, pickleProtocol=0,
- pickler=pickle.Pickler, unpickler=pickle.Unpickler,
- pload=None, pid=None):
-
- if config.Memcached.Pools.Default.ClientEnabled:
- return Client(servers, debug=debug, pickleProtocol=pickleProtocol,
- pickler=pickler, unpickler=unpickler, pload=pload, pid=pid)
- elif cls.allowTestCache:
- return TestClient(servers, debug=debug,
- pickleProtocol=pickleProtocol, pickler=pickler,
- unpickler=unpickler, pload=pload, pid=pid)
- else:
- return None
-
-
-class Client(local):
- """
- Object representing a pool of memcache servers.
-
- See L{memcache} for an overview.
-
- In all cases where a key is used, the key can be either:
- 1. A simple hashable type (string, integer, etc.).
- 2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid
- making this module calculate a hash value. You may prefer, for
- example, to keep all of a given user's objects on the same memcache
- server, so you could use the user's unique id as the hash value.
-
- @group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog
- @group Insertion: set, add, replace, set_multi
- @group Retrieval: get, get_multi
- @group Integers: incr, decr
- @group Removal: delete, delete_multi
- @sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\
- set, set_multi, add, replace, get, get_multi, incr, decr, delete, delete_multi
- """
- _FLAG_PICKLE = 1<<0
- _FLAG_INTEGER = 1<<1
- _FLAG_LONG = 1<<2
- _FLAG_COMPRESSED = 1<<3
-
- _SERVER_RETRIES = 10 # how many times to try finding a free server.
-
- # exceptions for Client
- class MemcachedKeyError(Exception):
- pass
- class MemcachedKeyLengthError(MemcachedKeyError):
- pass
- class MemcachedKeyCharacterError(MemcachedKeyError):
- pass
- class MemcachedKeyNoneError(MemcachedKeyError):
- pass
- class MemcachedKeyTypeError(MemcachedKeyError):
- pass
- class MemcachedStringEncodingError(Exception):
- pass
-
- def __init__(self, servers, debug=0, pickleProtocol=0,
- pickler=pickle.Pickler, unpickler=pickle.Unpickler,
- pload=None, pid=None):
- """
- Create a new Client object with the given list of servers.
-
- @param servers: C{servers} is passed to L{set_servers}.
- @param debug: whether to display error messages when a server can't be
- contacted.
- @param pickleProtocol: number to mandate protocol used by (c)Pickle.
- @param pickler: optional override of default Pickler to allow subclassing.
- @param unpickler: optional override of default Unpickler to allow subclassing.
- @param pload: optional persistent_load function to call on pickle loading.
- Useful for cPickle since subclassing isn't allowed.
- @param pid: optional persistent_id function to call on pickle storing.
- Useful for cPickle since subclassing isn't allowed.
- """
- local.__init__(self)
- self.set_servers(servers)
- self.debug = debug
- self.stats = {}
-
- # Allow users to modify pickling/unpickling behavior
- self.pickleProtocol = pickleProtocol
- self.pickler = pickler
- self.unpickler = unpickler
- self.persistent_load = pload
- self.persistent_id = pid
-
- # figure out the pickler style
- file = StringIO()
- try:
- pickler = self.pickler(file, protocol = self.pickleProtocol)
- self.picklerIsKeyword = True
- except TypeError:
- self.picklerIsKeyword = False
-
- def set_servers(self, servers):
- """
- Set the pool of servers used by this client.
-
- @param servers: an array of servers.
- Servers can be passed in two forms:
- 1. Strings of the form C{"host:port"}, which implies a default weight of 1.
- 2. Tuples of the form C{("host:port", weight)}, where C{weight} is
- an integer weight value.
- """
- self.servers = [_Host(s, self.debuglog) for s in servers]
- self._init_buckets()
-
- def get_stats(self):
- '''Get statistics from each of the servers.
-
- @return: A list of tuples ( server_identifier, stats_dictionary ).
- The dictionary contains a number of name/value pairs specifying
- the name of the status field and the string value associated with
- it. The values are not converted from strings.
- '''
- data = []
- for s in self.servers:
- if not s.connect(): continue
- if s.family == socket.AF_INET:
- name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
- else:
- name = 'unix:%s (%s)' % ( s.address, s.weight )
- s.send_cmd('stats')
- serverData = {}
- data.append(( name, serverData ))
- readline = s.readline
- while 1:
- line = readline()
- if not line or line.strip() == 'END': break
- stats = line.split(' ', 2)
- serverData[stats[1]] = stats[2]
-
- return(data)
-
- def get_slabs(self):
- data = []
- for s in self.servers:
- if not s.connect(): continue
- if s.family == socket.AF_INET:
- name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
- else:
- name = 'unix:%s (%s)' % ( s.address, s.weight )
- serverData = {}
- data.append(( name, serverData ))
- s.send_cmd('stats items')
- readline = s.readline
- while 1:
- line = readline()
- if not line or line.strip() == 'END': break
- item = line.split(' ', 2)
- #0 = STAT, 1 = ITEM, 2 = Value
- slab = item[1].split(':', 2)
- #0 = items, 1 = Slab #, 2 = Name
- if not serverData.has_key(slab[1]):
- serverData[slab[1]] = {}
- serverData[slab[1]][slab[2]] = item[2]
- return data
-
- def flush_all(self):
- 'Expire all data currently in the memcache servers.'
- for s in self.servers:
- if not s.connect(): continue
- s.send_cmd('flush_all')
- s.expect("OK")
-
- def debuglog(self, str):
- if self.debug:
- sys.stderr.write("MemCached: %s\n" % str)
-
- def _statlog(self, func):
- if not self.stats.has_key(func):
- self.stats[func] = 1
- else:
- self.stats[func] += 1
-
- def forget_dead_hosts(self):
- """
- Reset every host in the pool to an "alive" state.
- """
- for s in self.servers:
- s.deaduntil = 0
-
- def _init_buckets(self):
- self.buckets = []
- for server in self.servers:
- for i in range(server.weight):
- self.buckets.append(server)
-
- def _get_server(self, key):
- if type(key) == types.TupleType:
- serverhash, key = key
- else:
- serverhash = serverHashFunction(key)
-
- for i in range(Client._SERVER_RETRIES):
- server = self.buckets[serverhash % len(self.buckets)]
- if server.connect():
- #print "(using server %s)" % server,
- return server, key
- serverhash = serverHashFunction(str(serverhash) + str(i))
- log.error("Memcacheclient _get_server( ) failed to connect")
- return None, None
-
- def disconnect_all(self):
- for s in self.servers:
- s.close_socket()
-
- def delete_multi(self, keys, time=0, key_prefix=''):
- '''
- Delete multiple keys in the memcache doing just one query.
-
- >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
- >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
- 1
- >>> mc.delete_multi(['key1', 'key2'])
- 1
- >>> mc.get_multi(['key1', 'key2']) == {}
- 1
-
-
- This method is recommended over iterated regular L{delete}s as it reduces total latency, since
- your app doesn't have to wait for each round-trip of L{delete} before sending
- the next one.
-
- @param keys: An iterable of keys to clear
- @param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay.
- @param key_prefix: Optional string to prepend to each key when sending to memcache.
- See docs for L{get_multi} and L{set_multi}.
-
- @return: 1 if no failure in communication with any memcacheds.
- @rtype: int
-
- '''
-
- self._statlog('delete_multi')
-
- server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
-
- # send out all requests on each server before reading anything
- dead_servers = []
-
- rc = 1
- for server in server_keys.iterkeys():
- bigcmd = []
- write = bigcmd.append
- if time != None:
- for key in server_keys[server]: # These are mangled keys
- write("delete %s %d\r\n" % (key, time))
- else:
- for key in server_keys[server]: # These are mangled keys
- write("delete %s\r\n" % key)
- try:
- server.send_cmds(''.join(bigcmd))
- except socket.error, msg:
- rc = 0
- if type(msg) is types.TupleType: msg = msg[1]
- server.mark_dead(msg)
- dead_servers.append(server)
-
- # if any servers died on the way, don't expect them to respond.
- for server in dead_servers:
- del server_keys[server]
-
- notstored = [] # original keys.
- for server, keys in server_keys.iteritems():
- try:
- for key in keys:
- server.expect("DELETED")
- except socket.error, msg:
- if type(msg) is types.TupleType: msg = msg[1]
- server.mark_dead(msg)
- rc = 0
- return rc
-
- def delete(self, key, time=0):
- '''Deletes a key from the memcache.
-
- @return: Nonzero on success.
- @param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay.
- @rtype: int
- '''
- check_key(key)
- server, key = self._get_server(key)
- if not server:
- return 0
- self._statlog('delete')
- if time != None:
- cmd = "delete %s %d" % (key, time)
- else:
- cmd = "delete %s" % key
-
- try:
- server.send_cmd(cmd)
- server.expect("DELETED")
- except socket.error, msg:
- if type(msg) is types.TupleType: msg = msg[1]
- server.mark_dead(msg)
- return 0
- return 1
-
- def incr(self, key, delta=1):
- """
- Sends a command to the server to atomically increment the value for C{key} by
- C{delta}, or by 1 if C{delta} is unspecified. Returns None if C{key} doesn't
- exist on server, otherwise it returns the new value after incrementing.
-
- Note that the value for C{key} must already exist in the memcache, and it
- must be the string representation of an integer.
-
- >>> mc.set("counter", "20") # returns 1, indicating success
- 1
- >>> mc.incr("counter")
- 21
- >>> mc.incr("counter")
- 22
-
- Overflow on server is not checked. Be aware of values approaching
- 2**32. See L{decr}.
-
- @param delta: Integer amount to increment by (should be zero or greater).
- @return: New value after incrementing.
- @rtype: int
- """
- return self._incrdecr("incr", key, delta)
-
- def decr(self, key, delta=1):
- """
- Like L{incr}, but decrements. Unlike L{incr}, underflow is checked and
- new values are capped at 0. If server value is 1, a decrement of 2
- returns 0, not -1.
-
- @param delta: Integer amount to decrement by (should be zero or greater).
- @return: New value after decrementing.
- @rtype: int
- """
- return self._incrdecr("decr", key, delta)
-
- def _incrdecr(self, cmd, key, delta):
- check_key(key)
- server, key = self._get_server(key)
- if not server:
- return 0
- self._statlog(cmd)
- cmd = "%s %s %d" % (cmd, key, delta)
- try:
- server.send_cmd(cmd)
- line = server.readline()
- return int(line)
- except socket.error, msg:
- if type(msg) is types.TupleType: msg = msg[1]
- server.mark_dead(msg)
- return None
-
- def add(self, key, val, time = 0, min_compress_len = 0):
- '''
- Add new key with value.
-
- Like L{set}, but only stores in memcache if the key doesn't already exist.
-
- @return: Nonzero on success.
- @rtype: int
- '''
- return self._set("add", key, val, time, min_compress_len)
-
- def append(self, key, val, time=0, min_compress_len=0):
- '''Append the value to the end of the existing key's value.
-
- Only stores in memcache if key already exists.
- Also see L{prepend}.
-
- @return: Nonzero on success.
- @rtype: int
- '''
- return self._set("append", key, val, time, min_compress_len)
-
- def prepend(self, key, val, time=0, min_compress_len=0):
- '''Prepend the value to the beginning of the existing key's value.
-
- Only stores in memcache if key already exists.
- Also see L{append}.
-
- @return: Nonzero on success.
- @rtype: int
- '''
- return self._set("prepend", key, val, time, min_compress_len)
-
- def replace(self, key, val, time=0, min_compress_len=0):
- '''Replace existing key with value.
-
- Like L{set}, but only stores in memcache if the key already exists.
- The opposite of L{add}.
-
- @return: Nonzero on success.
- @rtype: int
- '''
- return self._set("replace", key, val, time, min_compress_len)
-
- def set(self, key, val, time=0, min_compress_len=0, token=None):
- '''Unconditionally sets a key to a given value in the memcache.
-
- The C{key} can optionally be an tuple, with the first element
- being the server hash value and the second being the key.
- If you want to avoid making this module calculate a hash value.
- You may prefer, for example, to keep all of a given user's objects
- on the same memcache server, so you could use the user's unique
- id as the hash value.
-
- @return: Nonzero on success.
- @rtype: int
- @param time: Tells memcached the time which this value should expire, either
- as a delta number of seconds, or an absolute unix time-since-the-epoch
- value. See the memcached protocol docs section "Storage Commands"
- for more info on <exptime>. We default to 0 == cache forever.
- @param min_compress_len: The threshold length to kick in auto-compression
- of the value using the zlib.compress() routine. If the value being cached is
- a string, then the length of the string is measured, else if the value is an
- , then the length of the pickle result is measured. If the resulting
- attempt at compression yeilds a larger string than the input, then it is
- discarded. For backwards compatability, this parameter defaults to 0,
- indicating don't ever try to compress.
- '''
- return self._set("set", key, val, time, min_compress_len, token=token)
-
-
- def _map_and_prefix_keys(self, key_iterable, key_prefix):
- """Compute the mapping of server (_Host instance) -> list of keys to stuff onto that server, as well as the mapping of
- prefixed key -> original key.
-
-
- """
- # Check it just once ...
- key_extra_len=len(key_prefix)
- if key_prefix:
- check_key(key_prefix)
-
- # server (_Host) -> list of unprefixed server keys in mapping
- server_keys = {}
-
- prefixed_to_orig_key = {}
- # build up a list for each server of all the keys we want.
- for orig_key in key_iterable:
- if type(orig_key) is types.TupleType:
- # Tuple of hashvalue, key ala _get_server(). Caller is essentially telling us what server to stuff this on.
- # Ensure call to _get_server gets a Tuple as well.
- str_orig_key = str(orig_key[1])
- server, key = self._get_server((orig_key[0], key_prefix + str_orig_key)) # Gotta pre-mangle key before hashing to a server. Returns the mangled key.
- else:
- str_orig_key = str(orig_key) # set_multi supports int / long keys.
- server, key = self._get_server(key_prefix + str_orig_key)
-
- # Now check to make sure key length is proper ...
- check_key(str_orig_key, key_extra_len=key_extra_len)
-
- if not server:
- continue
-
- if not server_keys.has_key(server):
- server_keys[server] = []
- server_keys[server].append(key)
- prefixed_to_orig_key[key] = orig_key
-
- return (server_keys, prefixed_to_orig_key)
-
- def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0):
- '''
- Sets multiple keys in the memcache doing just one query.
-
- >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
- >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
- 1
-
-
- This method is recommended over regular L{set} as it lowers the number of
- total packets flying around your network, reducing total latency, since
- your app doesn't have to wait for each round-trip of L{set} before sending
- the next one.
-
- @param mapping: A dict of key/value pairs to set.
- @param time: Tells memcached the time which this value should expire, either
- as a delta number of seconds, or an absolute unix time-since-the-epoch
- value. See the memcached protocol docs section "Storage Commands"
- for more info on <exptime>. We default to 0 == cache forever.
- @param key_prefix: Optional string to prepend to each key when sending to memcache. Allows you to efficiently stuff these keys into a pseudo-namespace in memcache:
- >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}, key_prefix='subspace_')
- >>> len(notset_keys) == 0
- True
- >>> mc.get_multi(['subspace_key1', 'subspace_key2']) == {'subspace_key1' : 'val1', 'subspace_key2' : 'val2'}
- True
-
- Causes key 'subspace_key1' and 'subspace_key2' to be set. Useful in conjunction with a higher-level layer which applies namespaces to data in memcache.
- In this case, the return result would be the list of notset original keys, prefix not applied.
-
- @param min_compress_len: The threshold length to kick in auto-compression
- of the value using the zlib.compress() routine. If the value being cached is
- a string, then the length of the string is measured, else if the value is an
- object, then the length of the pickle result is measured. If the resulting
- attempt at compression yeilds a larger string than the input, then it is
- discarded. For backwards compatability, this parameter defaults to 0,
- indicating don't ever try to compress.
- @return: List of keys which failed to be stored [ memcache out of memory, etc. ].
- @rtype: list
-
- '''
-
- self._statlog('set_multi')
-
-
-
- server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(mapping.iterkeys(), key_prefix)
-
- # send out all requests on each server before reading anything
- dead_servers = []
-
- for server in server_keys.iterkeys():
- bigcmd = []
- write = bigcmd.append
- try:
- for key in server_keys[server]: # These are mangled keys
- store_info = self._val_to_store_info(mapping[prefixed_to_orig_key[key]], min_compress_len)
- write("set %s %d %d %d\r\n%s\r\n" % (key, store_info[0], time, store_info[1], store_info[2]))
- server.send_cmds(''.join(bigcmd))
- except socket.error, msg:
- if type(msg) is types.TupleType: msg = msg[1]
- server.mark_dead(msg)
- dead_servers.append(server)
-
- # if any servers died on the way, don't expect them to respond.
- for server in dead_servers:
- del server_keys[server]
-
- # short-circuit if there are no servers, just return all keys
- if not server_keys: return(mapping.keys())
-
- notstored = [] # original keys.
- for server, keys in server_keys.iteritems():
- try:
- for key in keys:
- line = server.readline()
- if line == 'STORED':
- continue
- else:
- notstored.append(prefixed_to_orig_key[key]) #un-mangle.
- except (_Error, socket.error), msg:
- if type(msg) is types.TupleType: msg = msg[1]
- server.mark_dead(msg)
- return notstored
-
- def _val_to_store_info(self, val, min_compress_len):
- """
- Transform val to a storable representation, returning a tuple of the flags, the length of the new value, and the new value itself.
- """
- flags = 0
- if isinstance(val, str):
- pass
- elif isinstance(val, int):
- flags |= Client._FLAG_INTEGER
- val = "%d" % val
- # force no attempt to compress this silly string.
- min_compress_len = 0
- elif isinstance(val, long):
- flags |= Client._FLAG_LONG
- val = "%d" % val
- # force no attempt to compress this silly string.
- min_compress_len = 0
- else:
- flags |= Client._FLAG_PICKLE
- file = StringIO()
- if self.picklerIsKeyword:
- pickler = self.pickler(file, protocol = self.pickleProtocol)
- else:
- pickler = self.pickler(file, self.pickleProtocol)
- if self.persistent_id:
- pickler.persistent_id = self.persistent_id
- pickler.dump(val)
- val = file.getvalue()
-
- lv = len(val)
- # We should try to compress if min_compress_len > 0 and we could
- # import zlib and this string is longer than our min threshold.
- if min_compress_len and _supports_compress and lv > min_compress_len:
- comp_val = compress(val)
- # Only retain the result if the compression result is smaller
- # than the original.
- if len(comp_val) < lv:
- flags |= Client._FLAG_COMPRESSED
- val = comp_val
-
- # silently do not store if value length exceeds maximum
- if len(val) >= SERVER_MAX_VALUE_LENGTH: return(0)
-
- return (flags, len(val), val)
-
- def _set(self, cmd, key, val, time, min_compress_len = 0, token=None):
- check_key(key)
- server, key = self._get_server(key)
- if not server:
- return 0
-
- self._statlog(cmd)
-
- store_info = self._val_to_store_info(val, min_compress_len)
- if not store_info: return(0)
-
- if token is not None:
- cmd = "cas"
- fullcmd = "cas %s %d %d %d %s\r\n%s" % (key, store_info[0], time, store_info[1], token, store_info[2])
- else:
- fullcmd = "%s %s %d %d %d\r\n%s" % (cmd, key, store_info[0], time, store_info[1], store_info[2])
- try:
- server.send_cmd(fullcmd)
- result = server.expect("STORED")
-
- if (result == "STORED"):
- return True
-
- if (result == "NOT_FOUND"):
- raise NotFoundError(key)
-
- if token and result == "EXISTS":
- log.debug("Memcacheclient check-and-set failed")
- raise TokenMismatchError(key)
-
- log.error("Memcacheclient %s command failed with result (%s)" %
- (cmd, result))
-
- return False
-
- except socket.error, msg:
- if type(msg) is types.TupleType: msg = msg[1]
- server.mark_dead(msg)
- return 0
-
- def get(self, key):
- '''Retrieves a key from the memcache.
-
- @return: The value or None.
- '''
- check_key(key)
- server, key = self._get_server(key)
- if not server:
- raise MemcacheError("Memcache connection error")
-
- self._statlog('get')
-
- try:
- server.send_cmd("get %s" % key)
- rkey, flags, rlen, = self._expectvalue(server)
- if not rkey:
- return None
- value = self._recv_value(server, flags, rlen)
- server.expect("END")
- except (_Error, socket.error), msg:
- if type(msg) is types.TupleType: msg = msg[1]
- server.mark_dead(msg)
- raise MemcacheError("Memcache connection error")
- return value
-
- def gets(self, key):
- '''Retrieves a key from the memcache.
-
- @return: The value or None.
- '''
- check_key(key)
- server, key = self._get_server(key)
- if not server:
- raise MemcacheError("Memcache connection error")
-
- self._statlog('get')
-
- try:
- server.send_cmd("gets %s" % key)
- rkey, flags, rlen, cas_token = self._expectvalue_cas(server)
- if not rkey:
- return (None, None)
- value = self._recv_value(server, flags, rlen)
- server.expect("END")
- except (_Error, socket.error), msg:
- if type(msg) is types.TupleType: msg = msg[1]
- server.mark_dead(msg)
- raise MemcacheError("Memcache connection error")
- return (value, cas_token)
-
- def get_multi(self, keys, key_prefix=''):
- '''
- Retrieves multiple keys from the memcache doing just one query.
-
- >>> success = mc.set("foo", "bar")
- >>> success = mc.set("baz", 42)
- >>> mc.get_multi(["foo", "baz", "foobar"]) == {"foo": "bar", "baz": 42}
- 1
- >>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == []
- 1
-
- This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'.
- >>> mc.get_multi(['k1', 'k2', 'nonexist'], key_prefix='pfx_') == {'k1' : 1, 'k2' : 2}
- 1
-
- get_mult [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields.
- They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix.
- In this mode, the key_prefix could be a table name, and the key itself a db primary key number.
-
- >>> mc.set_multi({42: 'douglass adams', 46 : 'and 2 just ahead of me'}, key_prefix='numkeys_') == []
- 1
- >>> mc.get_multi([46, 42], key_prefix='numkeys_') == {42: 'douglass adams', 46 : 'and 2 just ahead of me'}
- 1
-
- This method is recommended over regular L{get} as it lowers the number of
- total packets flying around your network, reducing total latency, since
- your app doesn't have to wait for each round-trip of L{get} before sending
- the next one.
-
- See also L{set_multi}.
-
- @param keys: An array of keys.
- @param key_prefix: A string to prefix each key when we communicate with memcache.
- Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix.
- @return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the retured dictionary will not have it present.
-
- '''
-
- self._statlog('get_multi')
-
- server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
-
- # send out all requests on each server before reading anything
- dead_servers = []
- for server in server_keys.iterkeys():
- try:
- server.send_cmd("get %s" % " ".join(server_keys[server]))
- except socket.error, msg:
- if type(msg) is types.TupleType: msg = msg[1]
- server.mark_dead(msg)
- dead_servers.append(server)
-
- # if any servers died on the way, don't expect them to respond.
- for server in dead_servers:
- del server_keys[server]
-
- retvals = {}
- for server in server_keys.iterkeys():
- try:
- line = server.readline()
- while line and line != 'END':
- rkey, flags, rlen = self._expectvalue(server, line)
- # Bo Yang reports that this can sometimes be None
- if rkey is not None:
- val = self._recv_value(server, flags, rlen)
- try:
- retvals[prefixed_to_orig_key[rkey]] = val # un-prefix returned key.
- except KeyError:
- pass
- line = server.readline()
- except (_Error, socket.error), msg:
- if type(msg) is types.TupleType: msg = msg[1]
- server.mark_dead(msg)
- return retvals
-
- def gets_multi(self, keys, key_prefix=''):
- '''
- Retrieves multiple keys from the memcache doing just one query.
- See also L{gets} and L{get_multi}.
- '''
-
- self._statlog('gets_multi')
-
- server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
-
- # send out all requests on each server before reading anything
- dead_servers = []
- for server in server_keys.iterkeys():
- try:
- server.send_cmd("gets %s" % " ".join(server_keys[server]))
- except socket.error, msg:
- if type(msg) is types.TupleType: msg = msg[1]
- server.mark_dead(msg)
- dead_servers.append(server)
-
- # if any servers died on the way, don't expect them to respond.
- for server in dead_servers:
- del server_keys[server]
-
- retvals = {}
- for server in server_keys.iterkeys():
- try:
- line = server.readline()
- while line and line != 'END':
- rkey, flags, rlen, cas_token = self._expectvalue_cas(server, line)
- # Bo Yang reports that this can sometimes be None
- if rkey is not None:
- val = self._recv_value(server, flags, rlen)
- try:
- retvals[prefixed_to_orig_key[rkey]] = (val, cas_token) # un-prefix returned key.
- except KeyError:
- pass
- line = server.readline()
- except (_Error, socket.error), msg:
- if type(msg) is types.TupleType: msg = msg[1]
- server.mark_dead(msg)
- return retvals
-
- def _expectvalue(self, server, line=None):
- if not line:
- line = server.readline()
-
- if line[:5] == 'VALUE':
- resp, rkey, flags, len = line.split()
- flags = int(flags)
- rlen = int(len)
- return (rkey, flags, rlen)
- else:
- return (None, None, None)
-
- def _expectvalue_cas(self, server, line=None):
- if not line:
- line = server.readline()
-
- if line[:5] == 'VALUE':
- resp, rkey, flags, len, rtoken = line.split()
- flags = int(flags)
- rlen = int(len)
- return (rkey, flags, rlen, rtoken)
- else:
- return (None, None, None, None)
-
- def _recv_value(self, server, flags, rlen):
- rlen += 2 # include \r\n
- buf = server.recv(rlen)
- if len(buf) != rlen:
- raise _Error("received %d bytes when expecting %d" % (len(buf), rlen))
-
- if len(buf) == rlen:
- buf = buf[:-2] # strip \r\n
-
- if flags & Client._FLAG_COMPRESSED:
- buf = decompress(buf)
-
-
- if flags == 0 or flags == Client._FLAG_COMPRESSED:
- # Either a bare string or a compressed string now decompressed...
- val = buf
- elif flags & Client._FLAG_INTEGER:
- val = int(buf)
- elif flags & Client._FLAG_LONG:
- val = long(buf)
- elif flags & Client._FLAG_PICKLE:
- try:
- file = StringIO(buf)
- unpickler = self.unpickler(file)
- if self.persistent_load:
- unpickler.persistent_load = self.persistent_load
- val = unpickler.load()
- except Exception, e:
- self.debuglog('Pickle error: %s\n' % e)
- val = None
- else:
- self.debuglog("unknown flags on get: %x\n" % flags)
-
- return val
-
-
-
-class TestClient(Client):
- """
- Fake memcache client for unit tests
-
- """
-
- def __init__(self, servers, debug=0, pickleProtocol=0,
- pickler=pickle.Pickler, unpickler=pickle.Unpickler,
- pload=None, pid=None):
-
- local.__init__(self)
-
- super(TestClient, self).__init__(servers, debug=debug,
- pickleProtocol=pickleProtocol, pickler=pickler, unpickler=unpickler,
- pload=pload, pid=pid)
-
- self.data = {}
- self.token = 0
-
-
-
- def get_stats(self):
- raise NotImplementedError()
-
- def get_slabs(self):
- raise NotImplementedError()
-
- def flush_all(self):
- raise NotImplementedError()
-
- def forget_dead_hosts(self):
- raise NotImplementedError()
-
- def delete_multi(self, keys, time=0, key_prefix=''):
- '''
- Delete multiple keys in the memcache doing just one query.
-
- >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
- >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
- 1
- >>> mc.delete_multi(['key1', 'key2'])
- 1
- >>> mc.get_multi(['key1', 'key2']) == {}
- 1
- '''
-
- self._statlog('delete_multi')
- for key in keys:
- key = key_prefix + key
- del self.data[key]
- return 1
-
- def delete(self, key, time=0):
- '''Deletes a key from the memcache.
-
- @return: Nonzero on success.
- @param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay.
- @rtype: int
- '''
- check_key(key)
- del self.data[key]
- return 1
-
-
- def incr(self, key, delta=1):
- raise NotImplementedError()
-
- def decr(self, key, delta=1):
- raise NotImplementedError()
-
- def add(self, key, val, time = 0, min_compress_len = 0):
- raise NotImplementedError()
-
- def append(self, key, val, time=0, min_compress_len=0):
- raise NotImplementedError()
-
- def prepend(self, key, val, time=0, min_compress_len=0):
- raise NotImplementedError()
-
- def replace(self, key, val, time=0, min_compress_len=0):
- raise NotImplementedError()
-
- def set(self, key, val, time=0, min_compress_len=0, token=None):
- self._statlog('set')
- return self._set("set", key, val, time, min_compress_len, token=token)
-
- def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0):
- self._statlog('set_multi')
- for key, val in mapping.iteritems():
- key = key_prefix + key
- self._set("set", key, val, time, min_compress_len)
- return []
-
- def _set(self, cmd, key, val, time, min_compress_len = 0, token=None):
- check_key(key)
- self._statlog(cmd)
-
- serialized = pickle.dumps(val, pickle.HIGHEST_PROTOCOL)
-
- if token is not None:
- if self.data.has_key(key):
- stored_val, stored_token = self.data[key]
- if token != stored_token:
- raise TokenMismatchError(key)
-
- self.data[key] = (serialized, str(self.token))
- self.token += 1
-
- return True
-
- def get(self, key):
- check_key(key)
-
- self._statlog('get')
- if self.data.has_key(key):
- stored_val, stored_token = self.data[key]
- val = pickle.loads(stored_val)
- return val
- return None
-
-
- def gets(self, key):
- check_key(key)
- if self.data.has_key(key):
- stored_val, stored_token = self.data[key]
- val = pickle.loads(stored_val)
- return (val, stored_token)
- return (None, None)
-
- def get_multi(self, keys, key_prefix=''):
- self._statlog('get_multi')
-
- results = {}
- for key in keys:
- key = key_prefix + key
- val = self.get(key)
- results[key] = val
- return results
-
- def gets_multi(self, keys, key_prefix=''):
- self._statlog('gets_multi')
- results = {}
- for key in keys:
- key = key_prefix + key
- result = self.gets(key)
- if result[1] is not None:
- results[key] = result
- return results
-
-
-class _Host:
- _DEAD_RETRY = 1 # number of seconds before retrying a dead server.
- _SOCKET_TIMEOUT = 3 # number of seconds before sockets timeout.
-
- def __init__(self, host, debugfunc=None):
- if isinstance(host, types.TupleType):
- host, self.weight = host
- else:
- self.weight = 1
-
- # parse the connection string
- m = re.match(r'^(?P<proto>unix):(?P<path>.*)$', host)
- if not m:
- m = re.match(r'^(?P<proto>inet):'
- r'(?P<host>[^:]+)(:(?P<port>[0-9]+))?$', host)
- if not m: m = re.match(r'^(?P<host>[^:]+):(?P<port>[0-9]+)$', host)
- if not m:
- raise ValueError('Unable to parse connection string: "%s"' % host)
-
- hostData = m.groupdict()
- if hostData.get('proto') == 'unix':
- self.family = socket.AF_UNIX
- self.address = hostData['path']
- else:
- self.family = socket.AF_INET
- self.ip = hostData['host']
- self.port = int(hostData.get('port', 11211))
- self.address = ( self.ip, self.port )
-
- if not debugfunc:
- debugfunc = lambda x: x
- self.debuglog = debugfunc
-
- self.deaduntil = 0
- self.socket = None
-
- self.buffer = ''
-
- def _check_dead(self):
- if self.deaduntil and self.deaduntil > time.time():
- return 1
- self.deaduntil = 0
- return 0
-
- def connect(self):
- if self._get_socket():
- return 1
- return 0
-
- def mark_dead(self, reason):
- log.error("Memcacheclient socket marked dead (%s)" % (reason,))
- self.debuglog("MemCache: %s: %s. Marking dead." % (self, reason))
- self.deaduntil = time.time() + _Host._DEAD_RETRY
- self.close_socket()
-
- def _get_socket(self):
- if self._check_dead():
- log.error("Memcacheclient _get_socket() found dead socket")
- return None
- if self.socket:
- return self.socket
- s = socket.socket(self.family, socket.SOCK_STREAM)
- if hasattr(s, 'settimeout'): s.settimeout(self._SOCKET_TIMEOUT)
- try:
- s.connect(self.address)
- except socket.timeout, msg:
- log.error("Memcacheclient _get_socket() connection timed out (%s)" %
- (msg,))
- self.mark_dead("connect: %s" % msg)
- return None
- except socket.error, msg:
- if type(msg) is types.TupleType: msg = msg[1]
- log.error("Memcacheclient _get_socket() connection error (%s)" %
- (msg,))
- self.mark_dead("connect: %s" % msg[1])
- return None
- self.socket = s
- self.buffer = ''
- return s
-
- def close_socket(self):
- if self.socket:
- self.socket.close()
- self.socket = None
-
- def send_cmd(self, cmd):
- self.socket.sendall(cmd + '\r\n')
-
- def send_cmds(self, cmds):
- """ cmds already has trailing \r\n's applied """
- self.socket.sendall(cmds)
-
- def readline(self):
- buf = self.buffer
- recv = self.socket.recv
- while True:
- index = buf.find('\r\n')
- if index >= 0:
- break
- data = recv(4096)
- if not data:
- self.mark_dead('Connection closed while reading from %s'
- % repr(self))
- break
- buf += data
- if index >= 0:
- self.buffer = buf[index+2:]
- buf = buf[:index]
- else:
- self.buffer = ''
- return buf
-
- def expect(self, text):
- line = self.readline()
- if line != text:
- self.debuglog("while expecting '%s', got unexpected response '%s'" % (text, line))
- return line
-
- def recv(self, rlen):
- self_socket_recv = self.socket.recv
- buf = self.buffer
- while len(buf) < rlen:
- foo = self_socket_recv(4096)
- buf += foo
- if len(foo) == 0:
- raise _Error, ( 'Read %d bytes, expecting %d, '
- 'read returned 0 length bytes' % ( len(buf), rlen ))
- self.buffer = buf[rlen:]
- return buf[:rlen]
-
- def __str__(self):
- d = ''
- if self.deaduntil:
- d = " (dead until %d)" % self.deaduntil
-
- if self.family == socket.AF_INET:
- return "inet:%s:%d%s" % (self.address[0], self.address[1], d)
- else:
- return "unix:%s%s" % (self.address, d)
-
-def check_key(key, key_extra_len=0):
- """Checks sanity of key. Fails if:
- Key length is > SERVER_MAX_KEY_LENGTH (Raises MemcachedKeyLength).
- Contains control characters (Raises MemcachedKeyCharacterError).
- Is not a string (Raises MemcachedStringEncodingError)
- Is an unicode string (Raises MemcachedStringEncodingError)
- Is not a string (Raises MemcachedKeyError)
- Is None (Raises MemcachedKeyError)
- """
-
- return # Short-circuit this expensive method
-
- if type(key) == types.TupleType: key = key[1]
- if not key:
- raise Client.MemcachedKeyNoneError, ("Key is None")
- if isinstance(key, unicode):
- raise Client.MemcachedStringEncodingError, ("Keys must be str()'s, not "
- "unicode. Convert your unicode strings using "
- "mystring.encode(charset)!")
- if not isinstance(key, str):
- raise Client.MemcachedKeyTypeError, ("Key must be str()'s")
-
- if isinstance(key, basestring):
- if len(key) + key_extra_len > SERVER_MAX_KEY_LENGTH:
- raise Client.MemcachedKeyLengthError, ("Key length is > %s"
- % SERVER_MAX_KEY_LENGTH)
- for char in key:
- if ord(char) < 32 or ord(char) == 127:
- raise Client.MemcachedKeyCharacterError, "Control characters not allowed"
-
-def _doctest():
- import doctest, memcacheclient
- servers = ["127.0.0.1:11211"]
- mc = Client(servers, debug=1)
- globs = {"mc": mc}
- return doctest.testmod(memcacheclient, globs=globs)
-
-if __name__ == "__main__":
- print "Testing docstrings..."
- _doctest()
- print "Running tests:"
- print
- serverList = [["127.0.0.1:11211"]]
- if '--do-unix' in sys.argv:
- serverList.append([os.path.join(os.getcwd(), 'memcached.socket')])
-
- for servers in serverList:
- mc = Client(servers, debug=1)
-
- def to_s(val):
- if not isinstance(val, types.StringTypes):
- return "%s (%s)" % (val, type(val))
- return "%s" % val
- def test_setget(key, val):
- print "Testing set/get {'%s': %s} ..." % (to_s(key), to_s(val)),
- mc.set(key, val)
- newval = mc.get(key)
- if newval == val:
- print "OK"
- return 1
- else:
- print "FAIL"
- return 0
-
-
- class FooStruct:
- def __init__(self):
- self.bar = "baz"
- def __str__(self):
- return "A FooStruct"
- def __eq__(self, other):
- if isinstance(other, FooStruct):
- return self.bar == other.bar
- return 0
-
- test_setget("a_string", "some random string")
- test_setget("an_integer", 42)
- if test_setget("long", long(1<<30)):
- print "Testing delete ...",
- if mc.delete("long"):
- print "OK"
- else:
- print "FAIL"
- print "Testing get_multi ...",
- print mc.get_multi(["a_string", "an_integer"])
-
- print "Testing get(unknown value) ...",
- print to_s(mc.get("unknown_value"))
-
- f = FooStruct()
- test_setget("foostruct", f)
-
- print "Testing incr ...",
- x = mc.incr("an_integer", 1)
- if x == 43:
- print "OK"
- else:
- print "FAIL"
-
- print "Testing decr ...",
- x = mc.decr("an_integer", 1)
- if x == 42:
- print "OK"
- else:
- print "FAIL"
-
- # sanity tests
- print "Testing sending spaces...",
- try:
- x = mc.set("this has spaces", 1)
- except Client.MemcachedKeyCharacterError, msg:
- print "OK"
- else:
- print "FAIL"
-
- print "Testing sending control characters...",
- try:
- x = mc.set("this\x10has\x11control characters\x02", 1)
- except Client.MemcachedKeyCharacterError, msg:
- print "OK"
- else:
- print "FAIL"
-
- print "Testing using insanely long key...",
- try:
- x = mc.set('a'*SERVER_MAX_KEY_LENGTH + 'aaaa', 1)
- except Client.MemcachedKeyLengthError, msg:
- print "OK"
- else:
- print "FAIL"
-
- print "Testing sending a unicode-string key...",
- try:
- x = mc.set(u'keyhere', 1)
- except Client.MemcachedStringEncodingError, msg:
- print "OK",
- else:
- print "FAIL",
- try:
- x = mc.set((u'a'*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
- except:
- print "FAIL",
- else:
- print "OK",
- import pickle
- s = pickle.loads('V\\u4f1a\np0\n.')
- try:
- x = mc.set((s*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
- except Client.MemcachedKeyLengthError:
- print "OK"
- else:
- print "FAIL"
-
- print "Testing using a value larger than the memcached value limit...",
- x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH)
- if mc.get('keyhere') == None:
- print "OK",
- else:
- print "FAIL",
- x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH + 'aaa')
- if mc.get('keyhere') == None:
- print "OK"
- else:
- print "FAIL"
-
- print "Testing set_multi() with no memcacheds running",
- mc.disconnect_all()
- errors = mc.set_multi({'keyhere' : 'a', 'keythere' : 'b'})
- if errors != []:
- print "FAIL"
- else:
- print "OK"
-
- print "Testing delete_multi() with no memcacheds running",
- mc.disconnect_all()
- ret = mc.delete_multi({'keyhere' : 'a', 'keythere' : 'b'})
- if ret != 1:
- print "FAIL"
- else:
- print "OK"
-
-# vim: ts=4 sw=4 et :
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/run
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/run 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/run 2010-04-07 19:27:31 UTC (rev 5439)
@@ -35,8 +35,6 @@
wd="$(cd "$(dirname "$0")" && pwd)";
-. "${wd}/support/build.sh";
-
DAVD="cal"
# Echo the usage for the main 'run' script, then exit with an error.
@@ -108,6 +106,7 @@
fi;
}
+. "${wd}/support/build.sh";
# Actually run the server. (Or, exit, if things aren't sufficiently set up in
# order to do that.)
@@ -121,7 +120,7 @@
echo "Missing config file: ${config}";
echo "You might want to start by copying the test configuration:";
echo "";
- echo " cp conf/${DAVD}davd-test.plist conf/caldavd-dev.plist";
+ echo " cp conf/${DAVD}davd-test.plist conf/${DAVD}davd-dev.plist";
echo "";
exit 1;
fi;
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/setup.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/setup.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/setup.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -68,7 +68,10 @@
from distutils.core import Extension
-extensions = []
+extensions = [
+ Extension("twext.python.sendmsg",
+ sources=["twext/python/sendmsg.c"])
+]
if sys.platform == "darwin":
extensions.append(
@@ -96,7 +99,7 @@
author = "Apple Inc.",
author_email = None,
license = None,
- platforms = [ "all" ],
+ platforms = ["all"],
packages = find_modules(),
package_data = {
"twistedcaldav": [
@@ -120,7 +123,7 @@
data_files = [ ("caldavd", ["conf/caldavd.plist"]),
("carddavd", ["conf/carddavd.plist" ])],
ext_modules = extensions,
- py_modules = ["kqreactor", "memcacheclient"],
+ py_modules = [],
)
if "install" in dist.commands:
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/support/Makefile.Apple
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/support/Makefile.Apple 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/support/Makefile.Apple 2010-04-07 19:27:31 UTC (rev 5439)
@@ -120,7 +120,7 @@
$(BuildDirectory)/$(Project):
@echo "Copying source for $(Project)..."
$(_v) $(MKDIR) -p "$@"
- $(_v) pax -rw bin conf Makefile lib-patches setup.py kqreactor.py memcacheclient.py calendarserver twistedcaldav twext txdav txcaldav txcarddav twisted support "$@/"
+ $(_v) pax -rw bin conf Makefile lib-patches setup.py calendarserver twistedcaldav twext txdav txcaldav txcarddav twisted support "$@/"
$(BuildDirectory)/%: %.tgz
@echo "Extracting source for $(notdir $<)..."
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/support/build.sh
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/support/build.sh 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/support/build.sh 2010-04-07 19:27:31 UTC (rev 5439)
@@ -51,7 +51,7 @@
disable_setup="false";
print_path="false";
install="";
- daemonize="-X";
+ daemonize="-X -L";
kill="false";
restart="false";
plugin_name="caldav";
@@ -63,7 +63,7 @@
# These variables are defaults for things which might be configured by
# environment; only set them if they're un-set.
conditional_set wd "$(pwd)";
- conditional_set config "${wd}/conf/caldavd-dev.plist";
+ conditional_set config "${wd}/conf/${DAVD}davd-dev.plist";
conditional_set caldav "${wd}";
if [ -z "${CALENDARSERVER_CACHE_DEPS-}" ]; then
@@ -507,6 +507,12 @@
# Tool dependencies. The code itself doesn't depend on these, but you probably want them.
svn_get "CalDAVTester" "${top}/CalDAVTester" "${svn_uri_base}/CalDAVTester/trunk" HEAD;
svn_get "Pyflakes" "${top}/Pyflakes" http://divmod.org/svn/Divmod/trunk/Pyflakes HEAD;
+
+ if "${do_setup}"; then
+ cd "${caldav}";
+ echo "Building our own extension modules...";
+ python setup.py build_ext --inplace;
+ fi;
}
# Actually do the initialization, once all functions are defined.
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/test
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/test 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/test 2010-04-07 19:27:31 UTC (rev 5439)
@@ -72,7 +72,4 @@
test_modules="calendarserver twistedcaldav twext txdav txcaldav txcarddav ${m_twisted}";
fi;
-for module in ${test_modules}; do
- cd "${wd}" && "${python}" "${twisted}/bin/trial" --rterrors ${random} ${until_fail} ${no_colour} ${coverage} "${module}";
-done;
-
+cd "${wd}" && "${python}" "${twisted}/bin/trial" --rterrors ${random} ${until_fail} ${no_colour} ${coverage} ${test_modules};
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/internet/kqreactor.py (from rev 5438, CalendarServer/trunk/twext/internet/kqreactor.py)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/internet/kqreactor.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/internet/kqreactor.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,228 @@
+# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+
+"""
+A kqueue()/kevent() based implementation of the Twisted main loop.
+
+To install the event loop (and you should do this before any connections,
+listeners or connectors are added)::
+
+ | from twisted.internet import kqreactor
+ | kqreactor.install()
+
+
+Maintainer: U{Itamar Shtull-Trauring<mailto:twisted at itamarst.org>}
+"""
+
+
+import errno, sys
+
+try:
+ from select import KQ_FILTER_READ, KQ_FILTER_WRITE, KQ_EV_DELETE, KQ_EV_ADD
+ from select import kqueue, kevent, KQ_EV_ENABLE, KQ_EV_DISABLE, KQ_EV_EOF
+except ImportError:
+ from select26 import KQ_FILTER_READ, KQ_FILTER_WRITE, KQ_EV_DELETE, KQ_EV_ADD
+ from select26 import kqueue, kevent, KQ_EV_ENABLE, KQ_EV_DISABLE, KQ_EV_EOF
+
+from zope.interface import implements
+
+from twisted.python import log
+from twisted.internet import main, posixbase
+from twisted.internet.interfaces import IReactorFDSet
+
+
+
+class KQueueReactor(posixbase.PosixReactorBase):
+ """
+ A reactor that uses kqueue(2)/kevent(2).
+
+ @ivar _kq: A L{kqueue} which will be used to check for I/O readiness.
+
+ @ivar _selectables: A dictionary mapping integer file descriptors to
+ instances of L{FileDescriptor} which have been registered with the
+ reactor. All L{FileDescriptors} which are currently receiving read or
+ write readiness notifications will be present as values in this
+ dictionary.
+
+ @ivar _reads: A set storing integer file descriptors. These values will be
+ registered with C{_kq} for read readiness notifications which will be
+ dispatched to the corresponding L{FileDescriptor} instances in
+ C{_selectables}.
+
+ @ivar _writes: A set storing integer file descriptors. These values will
+ be registered with C{_kq} for write readiness notifications which will
+ be dispatched to the corresponding L{FileDescriptor} instances in
+ C{_selectables}.
+ """
+ implements(IReactorFDSet)
+
+ def __init__(self):
+ """
+ Initialize kqueue object, file descriptor tracking sets, and the base
+ class.
+ """
+ self._kq = kqueue()
+ self._reads = set()
+ self._writes = set()
+ self._selectables = {}
+ posixbase.PosixReactorBase.__init__(self)
+
+
+ def _updateRegistration(self, fd, filter, flags):
+ ev = kevent(fd, filter, flags)
+ self._kq.control([ev], 0, 0)
+
+
+ def addReader(self, reader):
+ """
+ Add a FileDescriptor for notification of data available to read.
+ """
+ fd = reader.fileno()
+ if fd not in self._reads:
+ if fd not in self._selectables:
+ self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_ADD|KQ_EV_ENABLE)
+ self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_ADD|KQ_EV_DISABLE)
+ self._selectables[fd] = reader
+ else:
+ self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_ENABLE)
+ self._reads.add(fd)
+
+
+ def addWriter(self, writer):
+ """
+ Add a FileDescriptor for notification of data available to write.
+ """
+ fd = writer.fileno()
+ if fd not in self._writes:
+ if fd not in self._selectables:
+ self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_ADD|KQ_EV_ENABLE)
+ self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_ADD|KQ_EV_DISABLE)
+ self._selectables[fd] = writer
+ else:
+ self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_ENABLE)
+ self._writes.add(fd)
+
+
+ def removeReader(self, reader):
+ """
+ Remove a Selectable for notification of data available to read.
+ """
+ fd = reader.fileno()
+ if fd == -1:
+ for fd, fdes in self._selectables.iteritems():
+ if reader is fdes:
+ break
+ else:
+ return
+ if fd in self._reads:
+ self._reads.discard(fd)
+ if fd not in self._writes:
+ del self._selectables[fd]
+ self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_DISABLE)
+
+
+ def removeWriter(self, writer):
+ """
+ Remove a Selectable for notification of data available to write.
+ """
+ fd = writer.fileno()
+ if fd == -1:
+ for fd, fdes in self._selectables.iteritems():
+ if writer is fdes:
+ break
+ else:
+ return
+ if fd in self._writes:
+ self._writes.discard(fd)
+ if fd not in self._reads:
+ del self._selectables[fd]
+ self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_DISABLE)
+
+
+ def removeAll(self):
+ """
+ Remove all selectables, and return a list of them.
+ """
+ if self.waker is not None:
+ self.removeReader(self.waker)
+ result = self._selectables.values()
+ for fd in self._reads:
+ self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_DELETE)
+ for fd in self._writes:
+ self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_DELETE)
+ self._reads.clear()
+ self._writes.clear()
+ self._selectables.clear()
+ if self.waker is not None:
+ self.addReader(self.waker)
+ return result
+
+
+ def getReaders(self):
+ return [self._selectables[fd] for fd in self._reads]
+
+
+ def getWriters(self):
+ return [self._selectables[fd] for fd in self._writes]
+
+
+ def doKEvent(self, timeout):
+ """
+ Poll the kqueue for new events.
+ """
+ if timeout is None:
+ timeout = 1
+
+ try:
+ l = self._kq.control([], len(self._selectables), timeout)
+ except OSError, e:
+ if e[0] == errno.EINTR:
+ return
+ else:
+ raise
+ _drdw = self._doWriteOrRead
+ for event in l:
+ fd = event.ident
+ try:
+ selectable = self._selectables[fd]
+ except KeyError:
+ # Handles the infrequent case where one selectable's
+ # handler disconnects another.
+ continue
+ log.callWithLogger(selectable, _drdw, selectable, fd, event)
+
+
+ def _doWriteOrRead(self, selectable, fd, event):
+ why = None
+ inRead = False
+ filter, flags, data, fflags = event.filter, event.flags, event.data, event.fflags
+ if flags & KQ_EV_EOF and data and fflags:
+ why = main.CONNECTION_LOST
+ else:
+ try:
+ if filter == KQ_FILTER_READ:
+ inRead = True
+ why = selectable.doRead()
+ if filter == KQ_FILTER_WRITE:
+ inRead = False
+ why = selectable.doWrite()
+ if not selectable.fileno() == fd:
+ inRead = False
+ why = main.CONNECTION_LOST
+ except:
+ log.err()
+ why = sys.exc_info()[1]
+
+ if why:
+ self._disconnectSelectable(selectable, why, inRead)
+
+ doIteration = doKEvent
+
+
+def install():
+ k = KQueueReactor()
+ main.installReactor(k)
+
+
+__all__ = ["KQueueReactor", "install"]
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/internet/sendfdport.py (from rev 5438, CalendarServer/trunk/twext/internet/sendfdport.py)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/internet/sendfdport.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/internet/sendfdport.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,301 @@
+# -*- test-case-name: twext.internet.test.test_sendfdport -*-
+##
+# Copyright (c) 2005-2009 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+"""
+Implementation of a TCP/SSL port that uses sendmsg/recvmsg as implemented by
+L{twext.python.sendfd}.
+"""
+
+from os import close
+from errno import EAGAIN, ENOBUFS
+from socket import (socketpair, fromfd, error as SocketError,
+ AF_INET, AF_UNIX, SOCK_STREAM, SOCK_DGRAM)
+
+from twisted.python import log
+
+from twisted.internet.abstract import FileDescriptor
+from twisted.internet.protocol import Protocol, Factory
+
+from twext.python.sendmsg import sendmsg, recvmsg
+from twext.python.sendfd import sendfd, recvfd
+
+class InheritingProtocol(Protocol, object):
+ """
+ When a connection comes in on this protocol, stop reading and writing, and
+ dispatch the socket to another process via its factory.
+ """
+
+ def connectionMade(self):
+ """
+ A connection was received; transmit the file descriptor to another
+ process via L{InheritingProtocolFactory} and remove my transport from
+ the reactor.
+ """
+ self.transport.stopReading()
+ self.transport.stopWriting()
+ skt = self.transport.getHandle()
+ self.factory.sendSocket(skt)
+
+
+
+class InheritingProtocolFactory(Factory, object):
+ """
+ In the 'master' process, make one of these and hook it up to the sockets
+ where you want to hear stuff.
+
+ @ivar dispatcher: an L{InheritedSocketDispatcher} to use to dispatch
+ incoming connections to an appropriate subprocess.
+
+ @ivar description: the string to send along with connections received on
+ this factory.
+ """
+
+ protocol = InheritingProtocol
+
+ def __init__(self, dispatcher, description):
+ self.dispatcher = dispatcher
+ self.description = description
+
+
+ def sendSocket(self, socketObject):
+ """
+ Send the given socket object on to my dispatcher.
+ """
+ self.dispatcher.sendFileDescriptor(socketObject, self.description)
+
+
+
+class _SubprocessSocket(FileDescriptor, object):
+ """
+ A socket in the master process pointing at a file descriptor that can be
+ used to transmit sockets to a subprocess.
+
+ @ivar skt: the UNIX socket used as the sendmsg() transport.
+
+ @ivar outgoingSocketQueue: an outgoing queue of sockets to send to the
+ subprocess.
+
+ @ivar outgoingSocketQueue: a C{list} of 2-tuples of C{(socket-object, str)}
+
+ @ivar status: a record of the last status message received (via recvmsg)
+ from the subprocess: this is an application-specific indication of how
+ ready this subprocess is to receive more connections. A typical usage
+ would be to count the open connections: this is what is passed to
+
+ @type status: C{str}
+ """
+
+ def __init__(self, dispatcher, skt):
+ FileDescriptor.__init__(self, dispatcher.reactor)
+ self.status = None
+ self.dispatcher = dispatcher
+ self.skt = skt # XXX needs to be set non-blocking by somebody
+ self.fileno = skt.fileno
+ self.outgoingSocketQueue = []
+
+
+ def sendSocketToPeer(self, skt, description):
+ """
+ Enqueue a socket to send to the subprocess.
+ """
+ self.outgoingSocketQueue.append((skt, description))
+ self.startWriting()
+
+
+ def doRead(self):
+ """
+ Receive a status / health message and record it.
+ """
+ try:
+ data, flags, ancillary = recvmsg(self.skt.fileno())
+ except SocketError, se:
+ if se.errno not in (EAGAIN, ENOBUFS):
+ raise
+ else:
+ self.dispatcher.statusMessage(self, data)
+
+
+ def doWrite(self):
+ """
+ Transmit as many queued pending file descriptors as we can.
+ """
+ while self.outgoingSocketQueue:
+ skt, desc = self.outgoingSocketQueue.pop(0)
+ try:
+ sendfd(self.skt.fileno(), skt.fileno(), desc)
+ except SocketError, se:
+ if se.errno in (EAGAIN, ENOBUFS):
+ self.outgoingSocketQueue.insert(0, (skt, desc))
+ return
+ raise
+ if not self.outgoingSocketQueue:
+ self.stopWriting()
+
+
+
+class InheritedSocketDispatcher(object):
+ """
+ Used by one or more L{InheritingProtocolFactory}s, this keeps track of a
+ list of available sockets in subprocesses and sends inbound connections
+ towards them.
+ """
+
+ def __init__(self, statusWatcher):
+ """
+ Create a socket dispatcher.
+ """
+ self._subprocessSockets = []
+ self.statusWatcher = statusWatcher
+ from twisted.internet import reactor
+ self.reactor = reactor
+
+
+ @property
+ def statuses(self):
+ """
+ Yield the current status of all subprocess sockets.
+ """
+ for subsocket in self._subprocessSockets:
+ yield subsocket.status
+
+
+ def statusMessage(self, subsocket, message):
+ """
+ The status of a connection has changed; update all registered status
+ change listeners.
+ """
+ subsocket.status = self.statusWatcher.statusFromMessage(subsocket.status, message)
+
+
+ def sendFileDescriptor(self, skt, description):
+ """
+ A connection has been received. Dispatch it.
+
+ @param skt: a socket object
+
+ @param description: some text to identify to the subprocess's
+ L{InheritedPort} what type of transport to create for this socket.
+ """
+ self._subprocessSockets.sort(key=lambda conn: conn.status)
+ selectedSocket = self._subprocessSockets[0]
+ selectedSocket.sendSocketToPeer(skt, description)
+ # XXX Maybe want to send along 'description' or 'skt' or some
+ # properties thereof? -glyph
+ selectedSocket.status = self.statusWatcher.newConnectionStatus(selectedSocket.status)
+
+
+ def addSocket(self):
+ """
+ Add a C{sendmsg()}-oriented AF_UNIX socket to the pool of sockets being
+ used for transmitting file descriptors to child processes.
+
+ @return: a socket object for the receiving side; pass this object's
+ C{fileno()} as part of the C{childFDs} argument to
+ C{spawnProcess()}, then close it.
+ """
+ i, o = socketpair(AF_UNIX, SOCK_DGRAM)
+ a = _SubprocessSocket(self, o)
+ a.startReading()
+ self._subprocessSockets.append(a)
+ return i
+
+
+
+class InheritedPort(FileDescriptor, object):
+ """
+ Create this in the 'slave' process to handle incoming connections
+ dispatched via C{sendmsg}.
+ """
+
+ def __init__(self, fd, transportFactory, protocolFactory):
+ """
+ @param fd: a file descriptor
+
+ @type fd: C{int}
+
+ @param transportFactory: a 3-argument function that takes the socket
+ object produced from the file descriptor, the (non-ancillary) data
+ sent along with the incoming file descriptor, and the protocol
+ built along with it, and returns an L{ITransport} provider. Note
+ that this should NOT call C{makeConnection} on the protocol that it
+ produces, as this class will do that.
+
+ @param protocolFactory: an L{IProtocolFactory}
+ """
+ FileDescriptor.__init__(self)
+ self.fd = fd
+ self.transportFactory = transportFactory
+ self.protocolFactory = protocolFactory
+ self.statusQueue = []
+
+
+ def fileno(self):
+ """
+ Get the FD number for this socket.
+ """
+ return self.fd
+
+
+ def doRead(self):
+ """
+ A message is ready to read. Receive a file descriptor from our parent
+ process.
+ """
+ try:
+ fd, description = recvfd(self.fd)
+ except SocketError, se:
+ if se.errno != EAGAIN:
+ raise
+ else:
+ try:
+ skt = fromfd(fd, AF_INET, SOCK_STREAM)
+ # XXX it could be AF_UNIX, I guess? or even something else?
+ # should this be on the transportFactory's side of things?
+
+ close(fd) # fromfd() calls dup()
+ peeraddr = skt.getpeername()
+ protocol = self.protocolFactory.buildProtocol(peeraddr)
+ transport = self.transportFactory(skt, description, protocol)
+ protocol.makeConnection(transport)
+ except:
+ log.err()
+
+
+ def doWrite(self):
+ """
+ Write some data.
+ """
+ while self.statusQueue:
+ msg = self.statusQueue.pop(0)
+ try:
+ sendmsg(self.fd, msg, 0)
+ except SocketError, se:
+ if se.errno in (EAGAIN, ENOBUFS):
+ self.statusQueue.insert(0, msg)
+ return
+ raise
+ self.stopWriting()
+
+
+ def reportStatus(self, statusMessage):
+ """
+ Report a status message to the L{_SubprocessSocket} monitoring this
+ L{InheritedPort}'s health in the master process.
+ """
+ self.statusQueue.append(statusMessage)
+ self.startWriting()
+
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/internet/tcp.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/internet/tcp.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/internet/tcp.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -39,21 +39,28 @@
Mixin for resetting maxAccepts.
"""
def doRead(self):
- self.numberAccepts = min(self.factory.maxRequests - self.factory.outstandingRequests, self.factory.maxAccepts)
+ self.numberAccepts = min(
+ self.factory.maxRequests - self.factory.outstandingRequests,
+ self.factory.maxAccepts
+ )
tcp.Port.doRead(self)
+
+
class MaxAcceptTCPPort(MaxAcceptPortMixin, tcp.Port):
"""
Use for non-inheriting tcp ports.
"""
- pass
+
+
class MaxAcceptSSLPort(MaxAcceptPortMixin, ssl.Port):
"""
Use for non-inheriting SSL ports.
"""
- pass
+
+
class InheritedTCPPort(MaxAcceptTCPPort):
"""
A tcp port which uses an inherited file descriptor.
@@ -100,6 +107,9 @@
"""
TCP server which will uses MaxAcceptTCPPorts (and optionally,
inherited ports)
+
+ @ivar myPort: When running, this is set to the L{IListeningPort} being
+ managed by this service.
"""
def __init__(self, *args, **kwargs):
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/memcacheclient.py (from rev 5438, CalendarServer/trunk/twext/python/memcacheclient.py)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/memcacheclient.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/memcacheclient.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,1456 @@
+#!/usr/bin/env python
+
+"""
+client module for memcached (memory cache daemon)
+
+Overview
+========
+
+See U{the MemCached homepage<http://www.danga.com/memcached>} for more about memcached.
+
+Usage summary
+=============
+
+This should give you a feel for how this module operates::
+
+ import memcacheclient
+ mc = memcacheclient.Client(['127.0.0.1:11211'], debug=0)
+
+ mc.set("some_key", "Some value")
+ value = mc.get("some_key")
+
+ mc.set("another_key", 3)
+ mc.delete("another_key")
+
+ mc.set("key", "1") # note that the key used for incr/decr must be a string.
+ mc.incr("key")
+ mc.decr("key")
+
+The standard way to use memcache with a database is like this::
+
+ key = derive_key(obj)
+ obj = mc.get(key)
+ if not obj:
+ obj = backend_api.get(...)
+ mc.set(obj)
+
+ # we now have obj, and future passes through this code
+ # will use the object from the cache.
+
+Detailed Documentation
+======================
+
+More detailed documentation is available in the L{Client} class.
+"""
+
+import sys
+import socket
+import time
+import os
+import re
+import types
+
+from twext.python.log import Logger
+
+from twistedcaldav.config import config
+
+log = Logger()
+
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+try:
+ from zlib import compress, decompress
+ _supports_compress = True
+except ImportError:
+ _supports_compress = False
+ # quickly define a decompress just in case we recv compressed data.
+ def decompress(val):
+ raise _Error("received compressed data but I don't support compession (import error)")
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+from binascii import crc32 # zlib version is not cross-platform
+serverHashFunction = crc32
+
+__author__ = "Evan Martin <martine at danga.com>"
+__version__ = "1.44"
+__copyright__ = "Copyright (C) 2003 Danga Interactive"
+__license__ = "Python"
+
+SERVER_MAX_KEY_LENGTH = 250
+# Storing values larger than 1MB requires recompiling memcached. If you do,
+# this value can be changed by doing "memcacheclient.SERVER_MAX_VALUE_LENGTH = N"
+# after importing this module.
+SERVER_MAX_VALUE_LENGTH = 1024*1024
+
+class _Error(Exception):
+ pass
+
+class MemcacheError(_Error):
+ """
+ Memcache connection error
+ """
+
+class NotFoundError(MemcacheError):
+ """
+ NOT_FOUND error
+ """
+
+class TokenMismatchError(MemcacheError):
+ """
+ Check-and-set token mismatch
+ """
+
+try:
+ # Only exists in Python 2.4+
+ from threading import local
+except ImportError:
+ # TODO: add the pure-python local implementation
+ class local(object):
+ pass
+
+class ClientFactory(object):
+
+ # unit tests should set this to True to enable the fake test cache
+ allowTestCache = False
+
+ @classmethod
+ def getClient(cls, servers, debug=0, pickleProtocol=0,
+ pickler=pickle.Pickler, unpickler=pickle.Unpickler,
+ pload=None, pid=None):
+
+ if config.Memcached.Pools.Default.ClientEnabled:
+ return Client(servers, debug=debug, pickleProtocol=pickleProtocol,
+ pickler=pickler, unpickler=unpickler, pload=pload, pid=pid)
+ elif cls.allowTestCache:
+ return TestClient(servers, debug=debug,
+ pickleProtocol=pickleProtocol, pickler=pickler,
+ unpickler=unpickler, pload=pload, pid=pid)
+ else:
+ return None
+
+
+class Client(local):
+ """
+ Object representing a pool of memcache servers.
+
+ See L{memcache} for an overview.
+
+ In all cases where a key is used, the key can be either:
+ 1. A simple hashable type (string, integer, etc.).
+ 2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid
+ making this module calculate a hash value. You may prefer, for
+ example, to keep all of a given user's objects on the same memcache
+ server, so you could use the user's unique id as the hash value.
+
+ @group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog
+ @group Insertion: set, add, replace, set_multi
+ @group Retrieval: get, get_multi
+ @group Integers: incr, decr
+ @group Removal: delete, delete_multi
+ @sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\
+ set, set_multi, add, replace, get, get_multi, incr, decr, delete, delete_multi
+ """
+ _FLAG_PICKLE = 1<<0
+ _FLAG_INTEGER = 1<<1
+ _FLAG_LONG = 1<<2
+ _FLAG_COMPRESSED = 1<<3
+
+ _SERVER_RETRIES = 10 # how many times to try finding a free server.
+
+ # exceptions for Client
+ class MemcachedKeyError(Exception):
+ pass
+ class MemcachedKeyLengthError(MemcachedKeyError):
+ pass
+ class MemcachedKeyCharacterError(MemcachedKeyError):
+ pass
+ class MemcachedKeyNoneError(MemcachedKeyError):
+ pass
+ class MemcachedKeyTypeError(MemcachedKeyError):
+ pass
+ class MemcachedStringEncodingError(Exception):
+ pass
+
+ def __init__(self, servers, debug=0, pickleProtocol=0,
+ pickler=pickle.Pickler, unpickler=pickle.Unpickler,
+ pload=None, pid=None):
+ """
+ Create a new Client object with the given list of servers.
+
+ @param servers: C{servers} is passed to L{set_servers}.
+ @param debug: whether to display error messages when a server can't be
+ contacted.
+ @param pickleProtocol: number to mandate protocol used by (c)Pickle.
+ @param pickler: optional override of default Pickler to allow subclassing.
+ @param unpickler: optional override of default Unpickler to allow subclassing.
+ @param pload: optional persistent_load function to call on pickle loading.
+ Useful for cPickle since subclassing isn't allowed.
+ @param pid: optional persistent_id function to call on pickle storing.
+ Useful for cPickle since subclassing isn't allowed.
+ """
+ local.__init__(self)
+ self.set_servers(servers)
+ self.debug = debug
+ self.stats = {}
+
+ # Allow users to modify pickling/unpickling behavior
+ self.pickleProtocol = pickleProtocol
+ self.pickler = pickler
+ self.unpickler = unpickler
+ self.persistent_load = pload
+ self.persistent_id = pid
+
+ # figure out the pickler style
+ file = StringIO()
+ try:
+ pickler = self.pickler(file, protocol = self.pickleProtocol)
+ self.picklerIsKeyword = True
+ except TypeError:
+ self.picklerIsKeyword = False
+
+ def set_servers(self, servers):
+ """
+ Set the pool of servers used by this client.
+
+ @param servers: an array of servers.
+ Servers can be passed in two forms:
+ 1. Strings of the form C{"host:port"}, which implies a default weight of 1.
+ 2. Tuples of the form C{("host:port", weight)}, where C{weight} is
+ an integer weight value.
+ """
+ self.servers = [_Host(s, self.debuglog) for s in servers]
+ self._init_buckets()
+
+ def get_stats(self):
+ '''Get statistics from each of the servers.
+
+ @return: A list of tuples ( server_identifier, stats_dictionary ).
+ The dictionary contains a number of name/value pairs specifying
+ the name of the status field and the string value associated with
+ it. The values are not converted from strings.
+ '''
+ data = []
+ for s in self.servers:
+ if not s.connect(): continue
+ if s.family == socket.AF_INET:
+ name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
+ else:
+ name = 'unix:%s (%s)' % ( s.address, s.weight )
+ s.send_cmd('stats')
+ serverData = {}
+ data.append(( name, serverData ))
+ readline = s.readline
+ while 1:
+ line = readline()
+ if not line or line.strip() == 'END': break
+ stats = line.split(' ', 2)
+ serverData[stats[1]] = stats[2]
+
+ return(data)
+
+ def get_slabs(self):
+ data = []
+ for s in self.servers:
+ if not s.connect(): continue
+ if s.family == socket.AF_INET:
+ name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
+ else:
+ name = 'unix:%s (%s)' % ( s.address, s.weight )
+ serverData = {}
+ data.append(( name, serverData ))
+ s.send_cmd('stats items')
+ readline = s.readline
+ while 1:
+ line = readline()
+ if not line or line.strip() == 'END': break
+ item = line.split(' ', 2)
+ #0 = STAT, 1 = ITEM, 2 = Value
+ slab = item[1].split(':', 2)
+ #0 = items, 1 = Slab #, 2 = Name
+ if not serverData.has_key(slab[1]):
+ serverData[slab[1]] = {}
+ serverData[slab[1]][slab[2]] = item[2]
+ return data
+
+ def flush_all(self):
+ 'Expire all data currently in the memcache servers.'
+ for s in self.servers:
+ if not s.connect(): continue
+ s.send_cmd('flush_all')
+ s.expect("OK")
+
+ def debuglog(self, str):
+ if self.debug:
+ sys.stderr.write("MemCached: %s\n" % str)
+
+ def _statlog(self, func):
+ if not self.stats.has_key(func):
+ self.stats[func] = 1
+ else:
+ self.stats[func] += 1
+
+ def forget_dead_hosts(self):
+ """
+ Reset every host in the pool to an "alive" state.
+ """
+ for s in self.servers:
+ s.deaduntil = 0
+
+ def _init_buckets(self):
+ self.buckets = []
+ for server in self.servers:
+ for i in range(server.weight):
+ self.buckets.append(server)
+
+ def _get_server(self, key):
+ if type(key) == types.TupleType:
+ serverhash, key = key
+ else:
+ serverhash = serverHashFunction(key)
+
+ for i in range(Client._SERVER_RETRIES):
+ server = self.buckets[serverhash % len(self.buckets)]
+ if server.connect():
+ #print "(using server %s)" % server,
+ return server, key
+ serverhash = serverHashFunction(str(serverhash) + str(i))
+ log.error("Memcacheclient _get_server( ) failed to connect")
+ return None, None
+
+ def disconnect_all(self):
+ for s in self.servers:
+ s.close_socket()
+
+ def delete_multi(self, keys, time=0, key_prefix=''):
+ '''
+ Delete multiple keys in the memcache doing just one query.
+
+ >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
+ >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
+ 1
+ >>> mc.delete_multi(['key1', 'key2'])
+ 1
+ >>> mc.get_multi(['key1', 'key2']) == {}
+ 1
+
+
+ This method is recommended over iterated regular L{delete}s as it reduces total latency, since
+ your app doesn't have to wait for each round-trip of L{delete} before sending
+ the next one.
+
+ @param keys: An iterable of keys to clear
+ @param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay.
+ @param key_prefix: Optional string to prepend to each key when sending to memcache.
+ See docs for L{get_multi} and L{set_multi}.
+
+ @return: 1 if no failure in communication with any memcacheds.
+ @rtype: int
+
+ '''
+
+ self._statlog('delete_multi')
+
+ server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
+
+ # send out all requests on each server before reading anything
+ dead_servers = []
+
+ rc = 1
+ for server in server_keys.iterkeys():
+ bigcmd = []
+ write = bigcmd.append
+ if time != None:
+ for key in server_keys[server]: # These are mangled keys
+ write("delete %s %d\r\n" % (key, time))
+ else:
+ for key in server_keys[server]: # These are mangled keys
+ write("delete %s\r\n" % key)
+ try:
+ server.send_cmds(''.join(bigcmd))
+ except socket.error, msg:
+ rc = 0
+ if type(msg) is types.TupleType: msg = msg[1]
+ server.mark_dead(msg)
+ dead_servers.append(server)
+
+ # if any servers died on the way, don't expect them to respond.
+ for server in dead_servers:
+ del server_keys[server]
+
+ for server, keys in server_keys.iteritems():
+ try:
+ for key in keys:
+ server.expect("DELETED")
+ except socket.error, msg:
+ if type(msg) is types.TupleType: msg = msg[1]
+ server.mark_dead(msg)
+ rc = 0
+ return rc
+
+ def delete(self, key, time=0):
+ '''Deletes a key from the memcache.
+
+ @return: Nonzero on success.
+ @param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay.
+ @rtype: int
+ '''
+ check_key(key)
+ server, key = self._get_server(key)
+ if not server:
+ return 0
+ self._statlog('delete')
+ if time != None:
+ cmd = "delete %s %d" % (key, time)
+ else:
+ cmd = "delete %s" % key
+
+ try:
+ server.send_cmd(cmd)
+ server.expect("DELETED")
+ except socket.error, msg:
+ if type(msg) is types.TupleType: msg = msg[1]
+ server.mark_dead(msg)
+ return 0
+ return 1
+
+ def incr(self, key, delta=1):
+ """
+ Sends a command to the server to atomically increment the value for C{key} by
+ C{delta}, or by 1 if C{delta} is unspecified. Returns None if C{key} doesn't
+ exist on server, otherwise it returns the new value after incrementing.
+
+ Note that the value for C{key} must already exist in the memcache, and it
+ must be the string representation of an integer.
+
+ >>> mc.set("counter", "20") # returns 1, indicating success
+ 1
+ >>> mc.incr("counter")
+ 21
+ >>> mc.incr("counter")
+ 22
+
+ Overflow on server is not checked. Be aware of values approaching
+ 2**32. See L{decr}.
+
+ @param delta: Integer amount to increment by (should be zero or greater).
+ @return: New value after incrementing.
+ @rtype: int
+ """
+ return self._incrdecr("incr", key, delta)
+
+ def decr(self, key, delta=1):
+ """
+ Like L{incr}, but decrements. Unlike L{incr}, underflow is checked and
+ new values are capped at 0. If server value is 1, a decrement of 2
+ returns 0, not -1.
+
+ @param delta: Integer amount to decrement by (should be zero or greater).
+ @return: New value after decrementing.
+ @rtype: int
+ """
+ return self._incrdecr("decr", key, delta)
+
+ def _incrdecr(self, cmd, key, delta):
+ check_key(key)
+ server, key = self._get_server(key)
+ if not server:
+ return 0
+ self._statlog(cmd)
+ cmd = "%s %s %d" % (cmd, key, delta)
+ try:
+ server.send_cmd(cmd)
+ line = server.readline()
+ return int(line)
+ except socket.error, msg:
+ if type(msg) is types.TupleType: msg = msg[1]
+ server.mark_dead(msg)
+ return None
+
+ def add(self, key, val, time = 0, min_compress_len = 0):
+ '''
+ Add new key with value.
+
+ Like L{set}, but only stores in memcache if the key doesn't already exist.
+
+ @return: Nonzero on success.
+ @rtype: int
+ '''
+ return self._set("add", key, val, time, min_compress_len)
+
+ def append(self, key, val, time=0, min_compress_len=0):
+ '''Append the value to the end of the existing key's value.
+
+ Only stores in memcache if key already exists.
+ Also see L{prepend}.
+
+ @return: Nonzero on success.
+ @rtype: int
+ '''
+ return self._set("append", key, val, time, min_compress_len)
+
+ def prepend(self, key, val, time=0, min_compress_len=0):
+ '''Prepend the value to the beginning of the existing key's value.
+
+ Only stores in memcache if key already exists.
+ Also see L{append}.
+
+ @return: Nonzero on success.
+ @rtype: int
+ '''
+ return self._set("prepend", key, val, time, min_compress_len)
+
+ def replace(self, key, val, time=0, min_compress_len=0):
+ '''Replace existing key with value.
+
+ Like L{set}, but only stores in memcache if the key already exists.
+ The opposite of L{add}.
+
+ @return: Nonzero on success.
+ @rtype: int
+ '''
+ return self._set("replace", key, val, time, min_compress_len)
+
+ def set(self, key, val, time=0, min_compress_len=0, token=None):
+ '''Unconditionally sets a key to a given value in the memcache.
+
+ The C{key} can optionally be an tuple, with the first element
+ being the server hash value and the second being the key.
+ If you want to avoid making this module calculate a hash value.
+ You may prefer, for example, to keep all of a given user's objects
+ on the same memcache server, so you could use the user's unique
+ id as the hash value.
+
+ @return: Nonzero on success.
+ @rtype: int
+ @param time: Tells memcached the time which this value should expire, either
+ as a delta number of seconds, or an absolute unix time-since-the-epoch
+ value. See the memcached protocol docs section "Storage Commands"
+ for more info on <exptime>. We default to 0 == cache forever.
+ @param min_compress_len: The threshold length to kick in auto-compression
+ of the value using the zlib.compress() routine. If the value being cached is
+ a string, then the length of the string is measured, else if the value is an
+ , then the length of the pickle result is measured. If the resulting
+ attempt at compression yeilds a larger string than the input, then it is
+ discarded. For backwards compatability, this parameter defaults to 0,
+ indicating don't ever try to compress.
+ '''
+ return self._set("set", key, val, time, min_compress_len, token=token)
+
+
+ def _map_and_prefix_keys(self, key_iterable, key_prefix):
+ """Compute the mapping of server (_Host instance) -> list of keys to stuff onto that server, as well as the mapping of
+ prefixed key -> original key.
+
+
+ """
+ # Check it just once ...
+ key_extra_len=len(key_prefix)
+ if key_prefix:
+ check_key(key_prefix)
+
+ # server (_Host) -> list of unprefixed server keys in mapping
+ server_keys = {}
+
+ prefixed_to_orig_key = {}
+ # build up a list for each server of all the keys we want.
+ for orig_key in key_iterable:
+ if type(orig_key) is types.TupleType:
+ # Tuple of hashvalue, key ala _get_server(). Caller is essentially telling us what server to stuff this on.
+ # Ensure call to _get_server gets a Tuple as well.
+ str_orig_key = str(orig_key[1])
+ server, key = self._get_server((orig_key[0], key_prefix + str_orig_key)) # Gotta pre-mangle key before hashing to a server. Returns the mangled key.
+ else:
+ str_orig_key = str(orig_key) # set_multi supports int / long keys.
+ server, key = self._get_server(key_prefix + str_orig_key)
+
+ # Now check to make sure key length is proper ...
+ check_key(str_orig_key, key_extra_len=key_extra_len)
+
+ if not server:
+ continue
+
+ if not server_keys.has_key(server):
+ server_keys[server] = []
+ server_keys[server].append(key)
+ prefixed_to_orig_key[key] = orig_key
+
+ return (server_keys, prefixed_to_orig_key)
+
+ def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0):
+ '''
+ Sets multiple keys in the memcache doing just one query.
+
+ >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
+ >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
+ 1
+
+
+ This method is recommended over regular L{set} as it lowers the number of
+ total packets flying around your network, reducing total latency, since
+ your app doesn't have to wait for each round-trip of L{set} before sending
+ the next one.
+
+ @param mapping: A dict of key/value pairs to set.
+ @param time: Tells memcached the time which this value should expire, either
+ as a delta number of seconds, or an absolute unix time-since-the-epoch
+ value. See the memcached protocol docs section "Storage Commands"
+ for more info on <exptime>. We default to 0 == cache forever.
+ @param key_prefix: Optional string to prepend to each key when sending to memcache. Allows you to efficiently stuff these keys into a pseudo-namespace in memcache:
+ >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}, key_prefix='subspace_')
+ >>> len(notset_keys) == 0
+ True
+ >>> mc.get_multi(['subspace_key1', 'subspace_key2']) == {'subspace_key1' : 'val1', 'subspace_key2' : 'val2'}
+ True
+
+ Causes key 'subspace_key1' and 'subspace_key2' to be set. Useful in conjunction with a higher-level layer which applies namespaces to data in memcache.
+ In this case, the return result would be the list of notset original keys, prefix not applied.
+
+ @param min_compress_len: The threshold length to kick in auto-compression
+ of the value using the zlib.compress() routine. If the value being cached is
+ a string, then the length of the string is measured, else if the value is an
+ object, then the length of the pickle result is measured. If the resulting
+ attempt at compression yeilds a larger string than the input, then it is
+ discarded. For backwards compatability, this parameter defaults to 0,
+ indicating don't ever try to compress.
+ @return: List of keys which failed to be stored [ memcache out of memory, etc. ].
+ @rtype: list
+
+ '''
+
+ self._statlog('set_multi')
+
+
+
+ server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(mapping.iterkeys(), key_prefix)
+
+ # send out all requests on each server before reading anything
+ dead_servers = []
+
+ for server in server_keys.iterkeys():
+ bigcmd = []
+ write = bigcmd.append
+ try:
+ for key in server_keys[server]: # These are mangled keys
+ store_info = self._val_to_store_info(mapping[prefixed_to_orig_key[key]], min_compress_len)
+ write("set %s %d %d %d\r\n%s\r\n" % (key, store_info[0], time, store_info[1], store_info[2]))
+ server.send_cmds(''.join(bigcmd))
+ except socket.error, msg:
+ if type(msg) is types.TupleType: msg = msg[1]
+ server.mark_dead(msg)
+ dead_servers.append(server)
+
+ # if any servers died on the way, don't expect them to respond.
+ for server in dead_servers:
+ del server_keys[server]
+
+ # short-circuit if there are no servers, just return all keys
+ if not server_keys: return(mapping.keys())
+
+ notstored = [] # original keys.
+ for server, keys in server_keys.iteritems():
+ try:
+ for key in keys:
+ line = server.readline()
+ if line == 'STORED':
+ continue
+ else:
+ notstored.append(prefixed_to_orig_key[key]) #un-mangle.
+ except (_Error, socket.error), msg:
+ if type(msg) is types.TupleType: msg = msg[1]
+ server.mark_dead(msg)
+ return notstored
+
+ def _val_to_store_info(self, val, min_compress_len):
+ """
+ Transform val to a storable representation, returning a tuple of the flags, the length of the new value, and the new value itself.
+ """
+ flags = 0
+ if isinstance(val, str):
+ pass
+ elif isinstance(val, int):
+ flags |= Client._FLAG_INTEGER
+ val = "%d" % val
+ # force no attempt to compress this silly string.
+ min_compress_len = 0
+ elif isinstance(val, long):
+ flags |= Client._FLAG_LONG
+ val = "%d" % val
+ # force no attempt to compress this silly string.
+ min_compress_len = 0
+ else:
+ flags |= Client._FLAG_PICKLE
+ file = StringIO()
+ if self.picklerIsKeyword:
+ pickler = self.pickler(file, protocol = self.pickleProtocol)
+ else:
+ pickler = self.pickler(file, self.pickleProtocol)
+ if self.persistent_id:
+ pickler.persistent_id = self.persistent_id
+ pickler.dump(val)
+ val = file.getvalue()
+
+ lv = len(val)
+ # We should try to compress if min_compress_len > 0 and we could
+ # import zlib and this string is longer than our min threshold.
+ if min_compress_len and _supports_compress and lv > min_compress_len:
+ comp_val = compress(val)
+ # Only retain the result if the compression result is smaller
+ # than the original.
+ if len(comp_val) < lv:
+ flags |= Client._FLAG_COMPRESSED
+ val = comp_val
+
+ # silently do not store if value length exceeds maximum
+ if len(val) >= SERVER_MAX_VALUE_LENGTH: return(0)
+
+ return (flags, len(val), val)
+
+ def _set(self, cmd, key, val, time, min_compress_len = 0, token=None):
+ check_key(key)
+ server, key = self._get_server(key)
+ if not server:
+ return 0
+
+ self._statlog(cmd)
+
+ store_info = self._val_to_store_info(val, min_compress_len)
+ if not store_info: return(0)
+
+ if token is not None:
+ cmd = "cas"
+ fullcmd = "cas %s %d %d %d %s\r\n%s" % (key, store_info[0], time, store_info[1], token, store_info[2])
+ else:
+ fullcmd = "%s %s %d %d %d\r\n%s" % (cmd, key, store_info[0], time, store_info[1], store_info[2])
+ try:
+ server.send_cmd(fullcmd)
+ result = server.expect("STORED")
+
+ if (result == "STORED"):
+ return True
+
+ if (result == "NOT_FOUND"):
+ raise NotFoundError(key)
+
+ if token and result == "EXISTS":
+ log.debug("Memcacheclient check-and-set failed")
+ raise TokenMismatchError(key)
+
+ log.error("Memcacheclient %s command failed with result (%s)" %
+ (cmd, result))
+
+ return False
+
+ except socket.error, msg:
+ if type(msg) is types.TupleType: msg = msg[1]
+ server.mark_dead(msg)
+ return 0
+
+ def get(self, key):
+ '''Retrieves a key from the memcache.
+
+ @return: The value or None.
+ '''
+ check_key(key)
+ server, key = self._get_server(key)
+ if not server:
+ raise MemcacheError("Memcache connection error")
+
+ self._statlog('get')
+
+ try:
+ server.send_cmd("get %s" % key)
+ rkey, flags, rlen, = self._expectvalue(server)
+ if not rkey:
+ return None
+ value = self._recv_value(server, flags, rlen)
+ server.expect("END")
+ except (_Error, socket.error), msg:
+ if type(msg) is types.TupleType: msg = msg[1]
+ server.mark_dead(msg)
+ raise MemcacheError("Memcache connection error")
+ return value
+
+ def gets(self, key):
+ '''Retrieves a key from the memcache.
+
+ @return: The value or None.
+ '''
+ check_key(key)
+ server, key = self._get_server(key)
+ if not server:
+ raise MemcacheError("Memcache connection error")
+
+ self._statlog('get')
+
+ try:
+ server.send_cmd("gets %s" % key)
+ rkey, flags, rlen, cas_token = self._expectvalue_cas(server)
+ if not rkey:
+ return (None, None)
+ value = self._recv_value(server, flags, rlen)
+ server.expect("END")
+ except (_Error, socket.error), msg:
+ if type(msg) is types.TupleType: msg = msg[1]
+ server.mark_dead(msg)
+ raise MemcacheError("Memcache connection error")
+ return (value, cas_token)
+
+ def get_multi(self, keys, key_prefix=''):
+ '''
+ Retrieves multiple keys from the memcache doing just one query.
+
+ >>> success = mc.set("foo", "bar")
+ >>> success = mc.set("baz", 42)
+ >>> mc.get_multi(["foo", "baz", "foobar"]) == {"foo": "bar", "baz": 42}
+ 1
+ >>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == []
+ 1
+
+ This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'.
+ >>> mc.get_multi(['k1', 'k2', 'nonexist'], key_prefix='pfx_') == {'k1' : 1, 'k2' : 2}
+ 1
+
+ get_mult [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields.
+ They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix.
+ In this mode, the key_prefix could be a table name, and the key itself a db primary key number.
+
+ >>> mc.set_multi({42: 'douglass adams', 46 : 'and 2 just ahead of me'}, key_prefix='numkeys_') == []
+ 1
+ >>> mc.get_multi([46, 42], key_prefix='numkeys_') == {42: 'douglass adams', 46 : 'and 2 just ahead of me'}
+ 1
+
+ This method is recommended over regular L{get} as it lowers the number of
+ total packets flying around your network, reducing total latency, since
+ your app doesn't have to wait for each round-trip of L{get} before sending
+ the next one.
+
+ See also L{set_multi}.
+
+ @param keys: An array of keys.
+ @param key_prefix: A string to prefix each key when we communicate with memcache.
+ Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix.
+ @return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the retured dictionary will not have it present.
+
+ '''
+
+ self._statlog('get_multi')
+
+ server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
+
+ # send out all requests on each server before reading anything
+ dead_servers = []
+ for server in server_keys.iterkeys():
+ try:
+ server.send_cmd("get %s" % " ".join(server_keys[server]))
+ except socket.error, msg:
+ if type(msg) is types.TupleType: msg = msg[1]
+ server.mark_dead(msg)
+ dead_servers.append(server)
+
+ # if any servers died on the way, don't expect them to respond.
+ for server in dead_servers:
+ del server_keys[server]
+
+ retvals = {}
+ for server in server_keys.iterkeys():
+ try:
+ line = server.readline()
+ while line and line != 'END':
+ rkey, flags, rlen = self._expectvalue(server, line)
+ # Bo Yang reports that this can sometimes be None
+ if rkey is not None:
+ val = self._recv_value(server, flags, rlen)
+ try:
+ retvals[prefixed_to_orig_key[rkey]] = val # un-prefix returned key.
+ except KeyError:
+ pass
+ line = server.readline()
+ except (_Error, socket.error), msg:
+ if type(msg) is types.TupleType: msg = msg[1]
+ server.mark_dead(msg)
+ return retvals
+
+ def gets_multi(self, keys, key_prefix=''):
+ '''
+ Retrieves multiple keys from the memcache doing just one query.
+ See also L{gets} and L{get_multi}.
+ '''
+
+ self._statlog('gets_multi')
+
+ server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
+
+ # send out all requests on each server before reading anything
+ dead_servers = []
+ for server in server_keys.iterkeys():
+ try:
+ server.send_cmd("gets %s" % " ".join(server_keys[server]))
+ except socket.error, msg:
+ if type(msg) is types.TupleType: msg = msg[1]
+ server.mark_dead(msg)
+ dead_servers.append(server)
+
+ # if any servers died on the way, don't expect them to respond.
+ for server in dead_servers:
+ del server_keys[server]
+
+ retvals = {}
+ for server in server_keys.iterkeys():
+ try:
+ line = server.readline()
+ while line and line != 'END':
+ rkey, flags, rlen, cas_token = self._expectvalue_cas(server, line)
+ # Bo Yang reports that this can sometimes be None
+ if rkey is not None:
+ val = self._recv_value(server, flags, rlen)
+ try:
+ retvals[prefixed_to_orig_key[rkey]] = (val, cas_token) # un-prefix returned key.
+ except KeyError:
+ pass
+ line = server.readline()
+ except (_Error, socket.error), msg:
+ if type(msg) is types.TupleType: msg = msg[1]
+ server.mark_dead(msg)
+ return retvals
+
+ def _expectvalue(self, server, line=None):
+ if not line:
+ line = server.readline()
+
+ if line[:5] == 'VALUE':
+ resp, rkey, flags, len = line.split()
+ flags = int(flags)
+ rlen = int(len)
+ return (rkey, flags, rlen)
+ else:
+ return (None, None, None)
+
+ def _expectvalue_cas(self, server, line=None):
+ if not line:
+ line = server.readline()
+
+ if line[:5] == 'VALUE':
+ resp, rkey, flags, len, rtoken = line.split()
+ flags = int(flags)
+ rlen = int(len)
+ return (rkey, flags, rlen, rtoken)
+ else:
+ return (None, None, None, None)
+
+ def _recv_value(self, server, flags, rlen):
+ rlen += 2 # include \r\n
+ buf = server.recv(rlen)
+ if len(buf) != rlen:
+ raise _Error("received %d bytes when expecting %d" % (len(buf), rlen))
+
+ if len(buf) == rlen:
+ buf = buf[:-2] # strip \r\n
+
+ if flags & Client._FLAG_COMPRESSED:
+ buf = decompress(buf)
+
+
+ if flags == 0 or flags == Client._FLAG_COMPRESSED:
+ # Either a bare string or a compressed string now decompressed...
+ val = buf
+ elif flags & Client._FLAG_INTEGER:
+ val = int(buf)
+ elif flags & Client._FLAG_LONG:
+ val = long(buf)
+ elif flags & Client._FLAG_PICKLE:
+ try:
+ file = StringIO(buf)
+ unpickler = self.unpickler(file)
+ if self.persistent_load:
+ unpickler.persistent_load = self.persistent_load
+ val = unpickler.load()
+ except Exception, e:
+ self.debuglog('Pickle error: %s\n' % e)
+ val = None
+ else:
+ self.debuglog("unknown flags on get: %x\n" % flags)
+
+ return val
+
+
+
+class TestClient(Client):
+ """
+ Fake memcache client for unit tests
+
+ """
+
+ def __init__(self, servers, debug=0, pickleProtocol=0,
+ pickler=pickle.Pickler, unpickler=pickle.Unpickler,
+ pload=None, pid=None):
+
+ local.__init__(self)
+
+ super(TestClient, self).__init__(servers, debug=debug,
+ pickleProtocol=pickleProtocol, pickler=pickler, unpickler=unpickler,
+ pload=pload, pid=pid)
+
+ self.data = {}
+ self.token = 0
+
+
+
+ def get_stats(self):
+ raise NotImplementedError()
+
+ def get_slabs(self):
+ raise NotImplementedError()
+
+ def flush_all(self):
+ raise NotImplementedError()
+
+ def forget_dead_hosts(self):
+ raise NotImplementedError()
+
+ def delete_multi(self, keys, time=0, key_prefix=''):
+ '''
+ Delete multiple keys in the memcache doing just one query.
+
+ >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
+ >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
+ 1
+ >>> mc.delete_multi(['key1', 'key2'])
+ 1
+ >>> mc.get_multi(['key1', 'key2']) == {}
+ 1
+ '''
+
+ self._statlog('delete_multi')
+ for key in keys:
+ key = key_prefix + key
+ del self.data[key]
+ return 1
+
+ def delete(self, key, time=0):
+ '''Deletes a key from the memcache.
+
+ @return: Nonzero on success.
+ @param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay.
+ @rtype: int
+ '''
+ check_key(key)
+ del self.data[key]
+ return 1
+
+
+ def incr(self, key, delta=1):
+ raise NotImplementedError()
+
+ def decr(self, key, delta=1):
+ raise NotImplementedError()
+
+ def add(self, key, val, time = 0, min_compress_len = 0):
+ raise NotImplementedError()
+
+ def append(self, key, val, time=0, min_compress_len=0):
+ raise NotImplementedError()
+
+ def prepend(self, key, val, time=0, min_compress_len=0):
+ raise NotImplementedError()
+
+ def replace(self, key, val, time=0, min_compress_len=0):
+ raise NotImplementedError()
+
+ def set(self, key, val, time=0, min_compress_len=0, token=None):
+ self._statlog('set')
+ return self._set("set", key, val, time, min_compress_len, token=token)
+
+ def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0):
+ self._statlog('set_multi')
+ for key, val in mapping.iteritems():
+ key = key_prefix + key
+ self._set("set", key, val, time, min_compress_len)
+ return []
+
+ def _set(self, cmd, key, val, time, min_compress_len = 0, token=None):
+ check_key(key)
+ self._statlog(cmd)
+
+ serialized = pickle.dumps(val, pickle.HIGHEST_PROTOCOL)
+
+ if token is not None:
+ if self.data.has_key(key):
+ stored_val, stored_token = self.data[key]
+ if token != stored_token:
+ raise TokenMismatchError(key)
+
+ self.data[key] = (serialized, str(self.token))
+ self.token += 1
+
+ return True
+
+ def get(self, key):
+ check_key(key)
+
+ self._statlog('get')
+ if self.data.has_key(key):
+ stored_val, stored_token = self.data[key]
+ val = pickle.loads(stored_val)
+ return val
+ return None
+
+
+ def gets(self, key):
+ check_key(key)
+ if self.data.has_key(key):
+ stored_val, stored_token = self.data[key]
+ val = pickle.loads(stored_val)
+ return (val, stored_token)
+ return (None, None)
+
+ def get_multi(self, keys, key_prefix=''):
+ self._statlog('get_multi')
+
+ results = {}
+ for key in keys:
+ key = key_prefix + key
+ val = self.get(key)
+ results[key] = val
+ return results
+
+ def gets_multi(self, keys, key_prefix=''):
+ self._statlog('gets_multi')
+ results = {}
+ for key in keys:
+ key = key_prefix + key
+ result = self.gets(key)
+ if result[1] is not None:
+ results[key] = result
+ return results
+
+
+class _Host:
+ _DEAD_RETRY = 1 # number of seconds before retrying a dead server.
+ _SOCKET_TIMEOUT = 3 # number of seconds before sockets timeout.
+
+ def __init__(self, host, debugfunc=None):
+ if isinstance(host, types.TupleType):
+ host, self.weight = host
+ else:
+ self.weight = 1
+
+ # parse the connection string
+ m = re.match(r'^(?P<proto>unix):(?P<path>.*)$', host)
+ if not m:
+ m = re.match(r'^(?P<proto>inet):'
+ r'(?P<host>[^:]+)(:(?P<port>[0-9]+))?$', host)
+ if not m: m = re.match(r'^(?P<host>[^:]+):(?P<port>[0-9]+)$', host)
+ if not m:
+ raise ValueError('Unable to parse connection string: "%s"' % host)
+
+ hostData = m.groupdict()
+ if hostData.get('proto') == 'unix':
+ self.family = socket.AF_UNIX
+ self.address = hostData['path']
+ else:
+ self.family = socket.AF_INET
+ self.ip = hostData['host']
+ self.port = int(hostData.get('port', 11211))
+ self.address = ( self.ip, self.port )
+
+ if not debugfunc:
+ debugfunc = lambda x: x
+ self.debuglog = debugfunc
+
+ self.deaduntil = 0
+ self.socket = None
+
+ self.buffer = ''
+
+ def _check_dead(self):
+ if self.deaduntil and self.deaduntil > time.time():
+ return 1
+ self.deaduntil = 0
+ return 0
+
+ def connect(self):
+ if self._get_socket():
+ return 1
+ return 0
+
+ def mark_dead(self, reason):
+ log.error("Memcacheclient socket marked dead (%s)" % (reason,))
+ self.debuglog("MemCache: %s: %s. Marking dead." % (self, reason))
+ self.deaduntil = time.time() + _Host._DEAD_RETRY
+ self.close_socket()
+
+ def _get_socket(self):
+ if self._check_dead():
+ log.error("Memcacheclient _get_socket() found dead socket")
+ return None
+ if self.socket:
+ return self.socket
+ s = socket.socket(self.family, socket.SOCK_STREAM)
+ if hasattr(s, 'settimeout'): s.settimeout(self._SOCKET_TIMEOUT)
+ try:
+ s.connect(self.address)
+ except socket.timeout, msg:
+ log.error("Memcacheclient _get_socket() connection timed out (%s)" %
+ (msg,))
+ self.mark_dead("connect: %s" % msg)
+ return None
+ except socket.error, msg:
+ if type(msg) is types.TupleType: msg = msg[1]
+ log.error("Memcacheclient _get_socket() connection error (%s)" %
+ (msg,))
+ self.mark_dead("connect: %s" % msg[1])
+ return None
+ self.socket = s
+ self.buffer = ''
+ return s
+
+ def close_socket(self):
+ if self.socket:
+ self.socket.close()
+ self.socket = None
+
+ def send_cmd(self, cmd):
+ self.socket.sendall(cmd + '\r\n')
+
+ def send_cmds(self, cmds):
+ """ cmds already has trailing \r\n's applied """
+ self.socket.sendall(cmds)
+
+ def readline(self):
+ buf = self.buffer
+ recv = self.socket.recv
+ while True:
+ index = buf.find('\r\n')
+ if index >= 0:
+ break
+ data = recv(4096)
+ if not data:
+ self.mark_dead('Connection closed while reading from %s'
+ % repr(self))
+ break
+ buf += data
+ if index >= 0:
+ self.buffer = buf[index+2:]
+ buf = buf[:index]
+ else:
+ self.buffer = ''
+ return buf
+
+ def expect(self, text):
+ line = self.readline()
+ if line != text:
+ self.debuglog("while expecting '%s', got unexpected response '%s'" % (text, line))
+ return line
+
+ def recv(self, rlen):
+ self_socket_recv = self.socket.recv
+ buf = self.buffer
+ while len(buf) < rlen:
+ foo = self_socket_recv(4096)
+ buf += foo
+ if len(foo) == 0:
+ raise _Error, ( 'Read %d bytes, expecting %d, '
+ 'read returned 0 length bytes' % ( len(buf), rlen ))
+ self.buffer = buf[rlen:]
+ return buf[:rlen]
+
+ def __str__(self):
+ d = ''
+ if self.deaduntil:
+ d = " (dead until %d)" % self.deaduntil
+
+ if self.family == socket.AF_INET:
+ return "inet:%s:%d%s" % (self.address[0], self.address[1], d)
+ else:
+ return "unix:%s%s" % (self.address, d)
+
+def check_key(key, key_extra_len=0):
+ """Checks sanity of key. Fails if:
+ Key length is > SERVER_MAX_KEY_LENGTH (Raises MemcachedKeyLength).
+ Contains control characters (Raises MemcachedKeyCharacterError).
+ Is not a string (Raises MemcachedStringEncodingError)
+ Is an unicode string (Raises MemcachedStringEncodingError)
+ Is not a string (Raises MemcachedKeyError)
+ Is None (Raises MemcachedKeyError)
+ """
+
+ return # Short-circuit this expensive method
+
+ if type(key) == types.TupleType: key = key[1]
+ if not key:
+ raise Client.MemcachedKeyNoneError, ("Key is None")
+ if isinstance(key, unicode):
+ raise Client.MemcachedStringEncodingError, ("Keys must be str()'s, not "
+ "unicode. Convert your unicode strings using "
+ "mystring.encode(charset)!")
+ if not isinstance(key, str):
+ raise Client.MemcachedKeyTypeError, ("Key must be str()'s")
+
+ if isinstance(key, basestring):
+ if len(key) + key_extra_len > SERVER_MAX_KEY_LENGTH:
+ raise Client.MemcachedKeyLengthError, ("Key length is > %s"
+ % SERVER_MAX_KEY_LENGTH)
+ for char in key:
+ if ord(char) < 32 or ord(char) == 127:
+ raise Client.MemcachedKeyCharacterError, "Control characters not allowed"
+
+def _doctest():
+ import doctest, memcacheclient
+ servers = ["127.0.0.1:11211"]
+ mc = Client(servers, debug=1)
+ globs = {"mc": mc}
+ return doctest.testmod(memcacheclient, globs=globs)
+
+if __name__ == "__main__":
+ print "Testing docstrings..."
+ _doctest()
+ print "Running tests:"
+ print
+ serverList = [["127.0.0.1:11211"]]
+ if '--do-unix' in sys.argv:
+ serverList.append([os.path.join(os.getcwd(), 'memcached.socket')])
+
+ for servers in serverList:
+ mc = Client(servers, debug=1)
+
+ def to_s(val):
+ if not isinstance(val, types.StringTypes):
+ return "%s (%s)" % (val, type(val))
+ return "%s" % val
+ def test_setget(key, val):
+ print "Testing set/get {'%s': %s} ..." % (to_s(key), to_s(val)),
+ mc.set(key, val)
+ newval = mc.get(key)
+ if newval == val:
+ print "OK"
+ return 1
+ else:
+ print "FAIL"
+ return 0
+
+
+ class FooStruct:
+ def __init__(self):
+ self.bar = "baz"
+ def __str__(self):
+ return "A FooStruct"
+ def __eq__(self, other):
+ if isinstance(other, FooStruct):
+ return self.bar == other.bar
+ return 0
+
+ test_setget("a_string", "some random string")
+ test_setget("an_integer", 42)
+ if test_setget("long", long(1<<30)):
+ print "Testing delete ...",
+ if mc.delete("long"):
+ print "OK"
+ else:
+ print "FAIL"
+ print "Testing get_multi ...",
+ print mc.get_multi(["a_string", "an_integer"])
+
+ print "Testing get(unknown value) ...",
+ print to_s(mc.get("unknown_value"))
+
+ f = FooStruct()
+ test_setget("foostruct", f)
+
+ print "Testing incr ...",
+ x = mc.incr("an_integer", 1)
+ if x == 43:
+ print "OK"
+ else:
+ print "FAIL"
+
+ print "Testing decr ...",
+ x = mc.decr("an_integer", 1)
+ if x == 42:
+ print "OK"
+ else:
+ print "FAIL"
+
+ # sanity tests
+ print "Testing sending spaces...",
+ try:
+ x = mc.set("this has spaces", 1)
+ except Client.MemcachedKeyCharacterError, msg:
+ print "OK"
+ else:
+ print "FAIL"
+
+ print "Testing sending control characters...",
+ try:
+ x = mc.set("this\x10has\x11control characters\x02", 1)
+ except Client.MemcachedKeyCharacterError, msg:
+ print "OK"
+ else:
+ print "FAIL"
+
+ print "Testing using insanely long key...",
+ try:
+ x = mc.set('a'*SERVER_MAX_KEY_LENGTH + 'aaaa', 1)
+ except Client.MemcachedKeyLengthError, msg:
+ print "OK"
+ else:
+ print "FAIL"
+
+ print "Testing sending a unicode-string key...",
+ try:
+ x = mc.set(u'keyhere', 1)
+ except Client.MemcachedStringEncodingError, msg:
+ print "OK",
+ else:
+ print "FAIL",
+ try:
+ x = mc.set((u'a'*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
+ except:
+ print "FAIL",
+ else:
+ print "OK",
+ import pickle
+ s = pickle.loads('V\\u4f1a\np0\n.')
+ try:
+ x = mc.set((s*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
+ except Client.MemcachedKeyLengthError:
+ print "OK"
+ else:
+ print "FAIL"
+
+ print "Testing using a value larger than the memcached value limit...",
+ x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH)
+ if mc.get('keyhere') == None:
+ print "OK",
+ else:
+ print "FAIL",
+ x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH + 'aaa')
+ if mc.get('keyhere') == None:
+ print "OK"
+ else:
+ print "FAIL"
+
+ print "Testing set_multi() with no memcacheds running",
+ mc.disconnect_all()
+ errors = mc.set_multi({'keyhere' : 'a', 'keythere' : 'b'})
+ if errors != []:
+ print "FAIL"
+ else:
+ print "OK"
+
+ print "Testing delete_multi() with no memcacheds running",
+ mc.disconnect_all()
+ ret = mc.delete_multi({'keyhere' : 'a', 'keythere' : 'b'})
+ if ret != 1:
+ print "FAIL"
+ else:
+ print "OK"
+
+# vim: ts=4 sw=4 et :
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/sendfd.py (from rev 5438, CalendarServer/trunk/twext/python/sendfd.py)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/sendfd.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/sendfd.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,66 @@
+# -*- test-case-name: twext.python.test.test_sendmsg -*-
+##
+# Copyright (c) 2010 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from struct import pack, unpack
+from socket import SOL_SOCKET
+
+from twext.python.sendmsg import sendmsg, recvmsg, SCM_RIGHTS
+
+def sendfd(socketfd, fd, description):
+ """
+ Send the given FD to another process via L{sendmsg} on the given C{AF_UNIX}
+ socket.
+
+ @param socketfd: An C{AF_UNIX} socket, attached to another process waiting
+ to receive sockets via the ancillary data mechanism in L{sendmsg}.
+
+ @type socketfd: C{int}
+
+ @param fd: A file descriptor to be sent to the other process.
+
+ @type fd: C{int}
+
+ @param description: a string describing the socket that was passed.
+
+ @type description: C{str}
+ """
+ sendmsg(
+ socketfd, description, 0, [(SOL_SOCKET, SCM_RIGHTS, pack("i", fd))]
+ )
+
+
+def recvfd(socketfd):
+ """
+ Receive a file descriptor from a L{sendmsg} message on the given C{AF_UNIX}
+ socket.
+
+ @param socketfd: An C{AF_UNIX} socket, attached to another process waiting
+ to send sockets via the ancillary data mechanism in L{sendmsg}.
+
+ @param fd: C{int}
+
+ @return: a 2-tuple of (new file descriptor, description).
+
+ @rtype: 2-tuple of (C{int}, C{str})
+ """
+ data, flags, ancillary = recvmsg(socketfd)
+ [(cmsg_level, cmsg_type, packedFD)] = ancillary
+ # cmsg_level and cmsg_type really need to be SOL_SOCKET / SCM_RIGHTS, but
+ # since those are the *only* standard values, there's not much point in
+ # checking.
+ [unpackedFD] = unpack("i", packedFD)
+ return (unpackedFD, data)
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/sendmsg.c (from rev 5438, CalendarServer/trunk/twext/python/sendmsg.c)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/sendmsg.c (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/sendmsg.c 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2010 Apple Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <Python.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <signal.h>
+
+PyObject *sendmsg_socket_error;
+
+static PyObject *sendmsg_sendmsg(PyObject *self, PyObject *args, PyObject *keywds);
+static PyObject *sendmsg_recvmsg(PyObject *self, PyObject *args, PyObject *keywds);
+
+static PyMethodDef sendmsg_methods[] = {
+ {"sendmsg", (PyCFunction) sendmsg_sendmsg, METH_VARARGS | METH_KEYWORDS,
+ NULL},
+ {"recvmsg", (PyCFunction) sendmsg_recvmsg, METH_VARARGS | METH_KEYWORDS,
+ NULL},
+ {NULL, NULL, 0, NULL}
+};
+
+
+PyMODINIT_FUNC initsendmsg(void) {
+ PyObject *module;
+
+ sendmsg_socket_error = NULL; /* Make sure that this has a known value
+ before doing anything that might exit. */
+
+ module = Py_InitModule("sendmsg", sendmsg_methods);
+
+ if (!module) {
+ return;
+ }
+
+ /*
+ The following is the only value mentioned by POSIX:
+ http://www.opengroup.org/onlinepubs/9699919799/basedefs/sys_socket.h.html
+ */
+
+ if (-1 == PyModule_AddIntConstant(module, "SCM_RIGHTS", SCM_RIGHTS)) {
+ return;
+ }
+
+
+ /* BSD, Darwin, Hurd */
+#if defined(SCM_CREDS)
+ if (-1 == PyModule_AddIntConstant(module, "SCM_CREDS", SCM_CREDS)) {
+ return;
+ }
+#endif
+
+ /* Linux */
+#if defined(SCM_CREDENTIALS)
+ if (-1 == PyModule_AddIntConstant(module, "SCM_CREDENTIALS", SCM_CREDENTIALS)) {
+ return;
+ }
+#endif
+
+ /* Apparently everywhere, but not standardized. */
+#if defined(SCM_TIMESTAMP)
+ if (-1 == PyModule_AddIntConstant(module, "SCM_TIMESTAMP", SCM_TIMESTAMP)) {
+ return;
+ }
+#endif
+
+ module = PyImport_ImportModule("socket");
+ if (!module) {
+ return;
+ }
+
+ sendmsg_socket_error = PyObject_GetAttrString(module, "error");
+ if (!sendmsg_socket_error) {
+ return;
+ }
+}
+
+static PyObject *sendmsg_sendmsg(PyObject *self, PyObject *args, PyObject *keywds) {
+
+ int fd;
+ int flags = 0;
+ int sendmsg_result;
+ struct msghdr message_header;
+ struct iovec iov[1];
+ PyObject *ancillary = NULL;
+ static char *kwlist[] = {"fd", "data", "flags", "ancillary", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(
+ args, keywds, "it#|iO:sendmsg", kwlist,
+ &fd,
+ &iov[0].iov_base,
+ &iov[0].iov_len,
+ &flags,
+ &ancillary)) {
+ return NULL;
+ }
+
+ message_header.msg_name = NULL;
+ message_header.msg_namelen = 0;
+
+ message_header.msg_iov = iov;
+ message_header.msg_iovlen = 1;
+
+ message_header.msg_control = NULL;
+ message_header.msg_controllen = 0;
+
+ message_header.msg_flags = 0;
+
+ if (ancillary) {
+
+ if (!PyList_Check(ancillary)) {
+ PyErr_Format(PyExc_TypeError,
+ "sendmsg argument 3 expected list, got %s",
+ ancillary->ob_type->tp_name);
+ return NULL;
+ }
+
+ PyObject *iterator = PyObject_GetIter(ancillary);
+ PyObject *item = NULL;
+
+ if (iterator == NULL) {
+ return NULL;
+ }
+
+ int all_data_len = 0;
+
+ /* First we need to know how big the buffer needs to be in order to
+ have enough space for all of the messages. */
+ while ( (item = PyIter_Next(iterator)) ) {
+ int data_len, type, level;
+ char *data;
+ if (!PyArg_ParseTuple(item, "iit#:sendmsg ancillary data (level, type, data)",
+ &level,
+ &type,
+ &data,
+ &data_len)) {
+ Py_DECREF(item);
+ Py_DECREF(iterator);
+ return NULL;
+ }
+ all_data_len += CMSG_SPACE(data_len);
+
+ Py_DECREF(item);
+ }
+
+ Py_DECREF(iterator);
+ iterator = NULL;
+
+ /* Allocate the buffer for all of the ancillary elements. */
+ message_header.msg_control = malloc(all_data_len);
+ if (!message_header.msg_control) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+ message_header.msg_controllen = all_data_len;
+
+ iterator = PyObject_GetIter(ancillary); /* again */
+ item = NULL;
+
+ if (!iterator) {
+ free(message_header.msg_control);
+ return NULL;
+ }
+
+ /* Unpack the tuples into the control message. */
+ struct cmsghdr *control_message = CMSG_FIRSTHDR(&message_header);
+ while ( (item = PyIter_Next(iterator)) ) {
+ int data_len, type, level;
+ unsigned char *data, *cmsg_data;
+
+ if (!PyArg_ParseTuple(item,
+ "iit#:sendmsg ancillary data (level, type, data)",
+ &level,
+ &type,
+ &data,
+ &data_len)) {
+ Py_DECREF(item);
+ Py_DECREF(iterator);
+ free(message_header.msg_control);
+ return NULL;
+ }
+
+ control_message->cmsg_level = level;
+ control_message->cmsg_type = type;
+ control_message->cmsg_len = CMSG_LEN(data_len);
+
+ cmsg_data = CMSG_DATA(control_message);
+ memcpy(cmsg_data, data, data_len);
+
+ Py_DECREF(item);
+
+ control_message = CMSG_NXTHDR(&message_header, control_message);
+
+ /* We explicitly allocated enough space for all ancillary data
+ above; if there isn't enough room, all bets are off. */
+ assert(control_message);
+ }
+
+ Py_DECREF(iterator);
+
+ if (PyErr_Occurred()) {
+ free(message_header.msg_control);
+ return NULL;
+ }
+ }
+
+ sendmsg_result = sendmsg(fd, &message_header, flags);
+
+ if (sendmsg_result < 0) {
+ PyErr_SetFromErrno(sendmsg_socket_error);
+ if (message_header.msg_control) {
+ free(message_header.msg_control);
+ }
+ return NULL;
+ }
+
+ return Py_BuildValue("i", sendmsg_result);
+}
+
+static PyObject *sendmsg_recvmsg(PyObject *self, PyObject *args, PyObject *keywds) {
+ int fd = -1;
+ int flags = 0;
+ size_t maxsize = 8192;
+ size_t cmsg_size = 4*1024;
+ int recvmsg_result;
+ struct msghdr message_header;
+ struct cmsghdr *control_message;
+ struct iovec iov[1];
+ char *cmsgbuf;
+ PyObject *ancillary;
+ PyObject *final_result = NULL;
+
+ static char *kwlist[] = {"fd", "flags", "maxsize", "cmsg_size", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, keywds, "i|iii:recvmsg", kwlist,
+ &fd, &flags, &maxsize, &cmsg_size)) {
+ return NULL;
+ }
+
+ cmsg_size = CMSG_SPACE(cmsg_size);
+
+ message_header.msg_name = NULL;
+ message_header.msg_namelen = 0;
+
+ iov[0].iov_len = maxsize;
+ iov[0].iov_base = malloc(maxsize);
+
+ if (!iov[0].iov_base) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+
+ message_header.msg_iov = iov;
+ message_header.msg_iovlen = 1;
+
+ cmsgbuf = malloc(cmsg_size);
+
+ if (!cmsgbuf) {
+ free(iov[0].iov_base);
+ PyErr_NoMemory();
+ return NULL;
+ }
+
+ memset(cmsgbuf, 0, cmsg_size);
+ message_header.msg_control = cmsgbuf;
+ message_header.msg_controllen = cmsg_size;
+
+ recvmsg_result = recvmsg(fd, &message_header, flags);
+ if (recvmsg_result < 0) {
+ PyErr_SetFromErrno(sendmsg_socket_error);
+ goto finished;
+ }
+
+ ancillary = PyList_New(0);
+ if (!ancillary) {
+ goto finished;
+ }
+
+ for (control_message = CMSG_FIRSTHDR(&message_header);
+ control_message;
+ control_message = CMSG_NXTHDR(&message_header,
+ control_message)) {
+ PyObject *entry;
+
+ /* Some platforms apparently always fill out the ancillary data
+ structure with a single bogus value if none is provided; ignore it,
+ if that is the case. */
+
+ if ((!(control_message->cmsg_level)) &&
+ (!(control_message->cmsg_type))) {
+ continue;
+ }
+
+ entry = Py_BuildValue(
+ "(iis#)",
+ control_message->cmsg_level,
+ control_message->cmsg_type,
+ CMSG_DATA(control_message),
+ control_message->cmsg_len - sizeof(struct cmsghdr));
+
+ if (!entry) {
+ Py_DECREF(ancillary);
+ goto finished;
+ }
+
+ if (PyList_Append(ancillary, entry) < 0) {
+ Py_DECREF(ancillary);
+ Py_DECREF(entry);
+ goto finished;
+ } else {
+ Py_DECREF(entry);
+ }
+ }
+
+ final_result = Py_BuildValue(
+ "s#iO",
+ iov[0].iov_base,
+ recvmsg_result,
+ message_header.msg_flags,
+ ancillary);
+
+ Py_DECREF(ancillary);
+
+ finished:
+ free(iov[0].iov_base);
+ free(cmsgbuf);
+ return final_result;
+}
+
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/test/pullpipe.py (from rev 5438, CalendarServer/trunk/twext/python/test/pullpipe.py)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/test/pullpipe.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/test/pullpipe.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,26 @@
+#!/usr/bin/python
+# -*- test-case-name: twext.python.test.test_sendmsg -*-
+##
+# Copyright (c) 2010 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+if __name__ == '__main__':
+ from twext.python.sendfd import recvfd
+ import sys, os
+ fd, description = recvfd(int(sys.argv[1]))
+ os.write(fd, "Test fixture data: %s.\n" % (description,))
+ os.close(fd)
+
+
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/test/test_sendmsg.py (from rev 5438, CalendarServer/trunk/twext/python/test/test_sendmsg.py)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/test/test_sendmsg.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/test/test_sendmsg.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,172 @@
+##
+# Copyright (c) 2010 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+import socket
+from os import pipe, read, close, environ
+from twext.python.filepath import CachingFilePath as FilePath
+import sys
+
+from twisted.internet.defer import Deferred
+from twisted.internet.error import ProcessDone
+from twisted.trial.unittest import TestCase
+from twisted.internet.defer import inlineCallbacks
+from twisted.internet import reactor
+
+from twext.python.sendmsg import sendmsg, recvmsg
+from twext.python.sendfd import sendfd
+from twisted.internet.protocol import ProcessProtocol
+
+class ExitedWithStderr(Exception):
+ """
+ A process exited with some stderr.
+ """
+
+ def __str__(self):
+ """
+ Dump the errors in a pretty way in the event of a subprocess traceback.
+ """
+ return '\n'.join([''] + list(self.args))
+
+
+class StartStopProcessProtocol(ProcessProtocol):
+ """
+ An L{IProcessProtocol} with a Deferred for events where the subprocess
+ starts and stops.
+ """
+
+ def __init__(self):
+ self.started = Deferred()
+ self.stopped = Deferred()
+ self.output = ''
+ self.errors = ''
+
+ def connectionMade(self):
+ self.started.callback(self.transport)
+
+ def outReceived(self, data):
+ self.output += data
+
+ def errReceived(self, data):
+ self.errors += data
+
+ def processEnded(self, reason):
+ if reason.check(ProcessDone):
+ self.stopped.callback(self.output)
+ else:
+ self.stopped.errback(ExitedWithStderr(
+ self.errors, self.output))
+
+
+
+def bootReactor():
+ """
+ Yield this from a trial test to bootstrap the reactor in order to avoid
+ PotentialZombieWarning, for tests that use subprocesses. This hack will no
+ longer be necessary in Twisted 10.1, since U{the underlying bug was fixed
+ <http://twistedmatrix.com/trac/ticket/2078>}.
+ """
+ d = Deferred()
+ reactor.callLater(0, d.callback, None)
+ return d
+
+
+
+class SendmsgTestCase(TestCase):
+ """
+ Tests for sendmsg extension module and associated file-descriptor sending
+ functionality in L{twext.python.sendfd}.
+ """
+
+ def setUp(self):
+ """
+ Create a pair of UNIX sockets.
+ """
+ self.input, self.output = socket.socketpair(socket.AF_UNIX)
+
+
+ def tearDown(self):
+ """
+ Close the sockets opened by setUp.
+ """
+ self.input.close()
+ self.output.close()
+
+
+ def test_roundtrip(self):
+ """
+ L{recvmsg} will retrieve a message sent via L{sendmsg}.
+ """
+ sendmsg(self.input.fileno(), "hello, world!", 0)
+
+ result = recvmsg(fd=self.output.fileno())
+ self.assertEquals(result, ("hello, world!", 0, []))
+
+
+ def test_wrongTypeAncillary(self):
+ """
+ L{sendmsg} will show a helpful exception message when given the wrong
+ type of object for the 'ancillary' argument.
+ """
+ error = self.assertRaises(TypeError,
+ sendmsg, self.input.fileno(),
+ "hello, world!", 0, 4321)
+ self.assertEquals(str(error),
+ "sendmsg argument 3 expected list, got int")
+
+
+ def spawn(self, script):
+ """
+ Start a script that is a peer of this test as a subprocess.
+
+ @param script: the module name of the script in this directory (no
+ package prefix, no '.py')
+ @type script: C{str}
+
+ @rtype: L{StartStopProcessProtocol}
+ """
+ sspp = StartStopProcessProtocol()
+ reactor.spawnProcess(
+ sspp, sys.executable, [
+ sys.executable,
+ FilePath(__file__).sibling(script + ".py").path,
+ str(self.output.fileno()),
+ ],
+ environ,
+ childFDs={0: "w", 1: "r", 2: "r",
+ self.output.fileno(): self.output.fileno()}
+ )
+ return sspp
+
+
+ @inlineCallbacks
+ def test_sendSubProcessFD(self):
+ """
+ Calling L{sendsmsg} with SOL_SOCKET, SCM_RIGHTS, and a platform-endian
+ packed file descriptor number should send that file descriptor to a
+ different process, where it can be retrieved by using L{recvmsg}.
+ """
+ yield bootReactor()
+ sspp = self.spawn("pullpipe")
+ yield sspp.started
+ pipeOut, pipeIn = pipe()
+ self.addCleanup(close, pipeOut)
+ sendfd(self.input.fileno(), pipeIn, "blonk")
+ close(pipeIn)
+ yield sspp.stopped
+ self.assertEquals(read(pipeOut, 1024), "Test fixture data: blonk.\n")
+ # Make sure that the pipe is actually closed now.
+ self.assertEquals(read(pipeOut, 1024), "")
+
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/vcomponent.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/vcomponent.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/python/vcomponent.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -18,9 +18,15 @@
iCalendar utilities
"""
+__all__ = [
+ "VComponent",
+ "VProperty",
+ "InvalidICalendarDataError",
+]
+
# FIXME: Move twistedcaldav.ical here, but that module needs some
# cleanup first. Perhaps after porting to libical?
-from twistedcaldav.ical import InvalidICalendarDataError
from twistedcaldav.ical import Component as VComponent
from twistedcaldav.ical import Property as VProperty
+from twistedcaldav.ical import InvalidICalendarDataError
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/web2/channel/http.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/web2/channel/http.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/web2/channel/http.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -739,7 +739,7 @@
def connectionMade(self):
self.setTimeout(self.inputTimeOut)
- self.factory.outstandingRequests+=1
+ self.factory.addConnectedChannel(self)
def lineReceived(self, line):
if self._first_line:
@@ -928,7 +928,7 @@
self.transport.loseConnection()
def connectionLost(self, reason):
- self.factory.outstandingRequests-=1
+ self.factory.removeConnectedChannel(self)
self._writeLost = True
self.readConnectionLost()
@@ -950,20 +950,33 @@
"please try again later.</body></html>")
self.transport.loseConnection()
+
+
class HTTPFactory(protocol.ServerFactory):
- """Factory for HTTP server."""
+ """
+ Factory for HTTP server.
+ @ivar outstandingRequests: the number of currently connected HTTP channels.
+
+ @type outstandingRequests: C{int}
+
+ @ivar connectedChannels: all the channels that have currently active
+ connections.
+
+ @type connectedChannels: C{set} of L{HTTPChannel}
+ """
+
protocol = HTTPChannel
protocolArgs = None
- outstandingRequests = 0
-
def __init__(self, requestFactory, maxRequests=600, **kwargs):
- self.maxRequests=maxRequests
+ self.maxRequests = maxRequests
self.protocolArgs = kwargs
- self.protocolArgs['requestFactory']=requestFactory
-
+ self.protocolArgs['requestFactory'] = requestFactory
+ self.connectedChannels = set()
+
+
def buildProtocol(self, addr):
if self.outstandingRequests >= self.maxRequests:
return OverloadedServerProtocol()
@@ -975,6 +988,28 @@
return p
+ def addConnectedChannel(self, channel):
+ """
+ Add a connected channel to the set of currently connected channels and
+ increase the outstanding request count.
+ """
+ self.connectedChannels.add(channel)
+
+
+ def removeConnectedChannel(self, channel):
+ """
+ Remove a connected channel from the set of currently connected channels
+ and decrease the outstanding request count.
+ """
+ self.connectedChannels.remove(channel)
+
+
+ @property
+ def outstandingRequests(self):
+ return len(self.connectedChannels)
+
+
+
class HTTP503LoggingFactory (HTTPFactory):
"""
Factory for HTTP server which emits a 503 response when overloaded.
@@ -1087,40 +1122,50 @@
-class LimitingHTTPChannel(HTTPChannel):
- """ HTTPChannel that takes itself out of the reactor once it has enough
- requests in flight.
+class LimitingHTTPFactory(HTTPFactory):
"""
+ HTTPFactory which stores maxAccepts on behalf of the MaxAcceptPortMixin
- def connectionMade(self):
- HTTPChannel.connectionMade(self)
- if self.factory.outstandingRequests >= self.factory.maxRequests:
- self.factory.myServer.myPort.stopReading()
-
- def connectionLost(self, reason):
- HTTPChannel.connectionLost(self, reason)
- if self.factory.outstandingRequests < self.factory.maxRequests:
- self.factory.myServer.myPort.startReading()
-
-class LimitingHTTPFactory(HTTPFactory):
- """ HTTPFactory which stores maxAccepts on behalf of the MaxAcceptPortMixin
+ @ivar myServer: a reference to a L{MaxAcceptTCPServer} that this
+ L{LimitingHTTPFactory} will limit. This must be set externally.
"""
- protocol = LimitingHTTPChannel
-
def __init__(self, requestFactory, maxRequests=600, maxAccepts=100,
**kwargs):
HTTPFactory.__init__(self, requestFactory, maxRequests, **kwargs)
self.maxAccepts = maxAccepts
def buildProtocol(self, addr):
-
+ """
+ Override L{HTTPFactory.buildProtocol} in order to avoid ever returning
+ an L{OverloadedServerProtocol}; this should be handled in other ways.
+ """
p = protocol.ServerFactory.buildProtocol(self, addr)
for arg, value in self.protocolArgs.iteritems():
setattr(p, arg, value)
return p
+ def addConnectedChannel(self, channel):
+ """
+ Override L{HTTPFactory.addConnectedChannel} to pause listening on the
+ socket when there are too many outstanding channels.
+ """
+ HTTPFactory.addConnectedChannel(self, channel)
+ if self.outstandingRequests >= self.maxRequests:
+ self.myServer.myPort.stopReading()
+
+ def removeConnectedChannel(self, channel):
+ """
+ Override L{HTTPFactory.addConnectedChannel} to resume listening on the
+ socket when there are too many outstanding channels.
+ """
+ HTTPFactory.removeConnectedChannel(self, channel)
+ if self.outstandingRequests < self.maxRequests:
+ self.myServer.myPort.startReading()
+
+
+
__all__ = [
"HTTPFactory",
"HTTP503LoggingFactory",
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/web2/dav/resource.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/web2/dav/resource.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/web2/dav/resource.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -44,6 +44,7 @@
"unauthenticatedPrincipal",
]
+import cPickle as pickle
import urllib
from zope.interface import implements
@@ -51,7 +52,7 @@
from twisted.cred.error import LoginFailed, UnauthorizedLogin
from twisted.python.failure import Failure
from twisted.internet.defer import Deferred, maybeDeferred, succeed
-from twisted.internet.defer import waitForDeferred, deferredGenerator
+from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet import reactor
from twext.python.log import Logger
@@ -80,49 +81,53 @@
Mix-in class which implements the DAV property access API in
L{IDAVResource}.
- There are three categories of DAV properties, for the purposes of how this
- class manages them. A X{property} is either a X{live property} or a
- X{dead property}, and live properties are split into two categories:
+ There are three categories of DAV properties, for the purposes of
+ how this class manages them. A X{property} is either a X{live
+ property} or a X{dead property}, and live properties are split
+ into two categories:
- 1. Dead properties. There are properties that the server simply stores as
- opaque data. These are store in the X{dead property store}, which is
- provided by subclasses via the L{deadProperties} method.
+ 1. Dead properties. There are properties that the server simply
+ stores as opaque data. These are store in the X{dead property
+ store}, which is provided by subclasses via the
+ L{deadProperties} method.
- 2. Live properties which are always computed. These properties aren't
- stored anywhere (by this class) but instead are derived from the resource
- state or from data that is persisted elsewhere. These are listed in the
- L{liveProperties} attribute and are handled explicitly by the
- L{readProperty} method.
+ 2. Live properties which are always computed. These properties
+ aren't stored anywhere (by this class) but instead are derived
+ from the resource state or from data that is persisted
+ elsewhere. These are listed in the L{liveProperties}
+ attribute and are handled explicitly by the L{readProperty}
+ method.
- 3. Live properties may be acted on specially and are stored in the X{dead
- property store}. These are not listed in the L{liveProperties} attribute,
- but may be handled specially by the property access methods. For
- example, L{writeProperty} might validate the data and refuse to write
- data it deems inappropriate for a given property.
+ 3. Live properties may be acted on specially and are stored in
+ the X{dead property store}. These are not listed in the
+ L{liveProperties} attribute, but may be handled specially by
+ the property access methods. For example, L{writeProperty}
+ might validate the data and refuse to write data it deems
+ inappropriate for a given property.
There are two sets of property access methods. The first group
(L{hasProperty}, etc.) provides access to all properties. They
- automatically figure out which category a property falls into and act
- accordingly.
+ automatically figure out which category a property falls into and
+ act accordingly.
- The second group (L{hasDeadProperty}, etc.) accesses the dead property store
- directly and bypasses any live property logic that exists in the first group
- of methods. These methods are used by the first group of methods, and there
- are cases where they may be needed by other methods. I{Accessing dead
- properties directly should be done with caution.} Bypassing the live
- property logic means that values may not be the correct ones for use in
- DAV requests such as PROPFIND, and may be bypassing security checks. In
- general, one should never bypass the live property logic as part of a client
- request for property data.
+ The second group (L{hasDeadProperty}, etc.) accesses the dead
+ property store directly and bypasses any live property logic that
+ exists in the first group of methods. These methods are used by
+ the first group of methods, and there are cases where they may be
+ needed by other methods. I{Accessing dead properties directly
+ should be done with caution.} Bypassing the live property logic
+ means that values may not be the correct ones for use in DAV
+ requests such as PROPFIND, and may be bypassing security checks.
+ In general, one should never bypass the live property logic as
+ part of a client request for property data.
- Properties in the L{twisted_private_namespace} namespace are internal to the
- server and should not be exposed to clients. They can only be accessed via
- the dead property store.
+ Properties in the L{twisted_private_namespace} namespace are
+ internal to the server and should not be exposed to clients. They
+ can only be accessed via the dead property store.
"""
- # Note:
- # The DAV:owner and DAV:group live properties are only meaningful if you
- # are using ACL semantics (ie. Unix-like) which use them. This (generic)
- # class does not.
+ # Note: The DAV:owner and DAV:group live properties are only
+ # meaningful if you are using ACL semantics (ie. Unix-like) which
+ # use them. This (generic) class does not.
liveProperties = (
(dav_namespace, "resourcetype" ),
@@ -151,18 +156,21 @@
def deadProperties(self):
"""
- Provides internal access to the WebDAV dead property store. You
- probably shouldn't be calling this directly if you can use the property
- accessors in the L{IDAVResource} API instead. However, a subclass must
- override this method to provide it's own dead property store.
+ Provides internal access to the WebDAV dead property store.
+ You probably shouldn't be calling this directly if you can use
+ the property accessors in the L{IDAVResource} API instead.
+ However, a subclass must override this method to provide it's
+ own dead property store.
- This implementation returns an instance of L{NonePropertyStore}, which
- cannot store dead properties. Subclasses must override this method if
- they wish to store dead properties.
+ This implementation returns an instance of
+ L{NonePropertyStore}, which cannot store dead properties.
+ Subclasses must override this method if they wish to store
+ dead properties.
- @return: a dict-like object from which one can read and to which one can
- write dead properties. Keys are qname tuples (ie. C{(namespace, name)})
- as returned by L{davxml.WebDAVElement.qname()} and values are
+ @return: a dict-like object from which one can read and to
+ which one can write dead properties. Keys are qname
+ tuples (ie. C{(namespace, name)}) as returned by
+ L{davxml.WebDAVElement.qname()} and values are
L{davxml.WebDAVElement} instances.
"""
if not hasattr(self, "_dead_properties"):
@@ -189,7 +197,10 @@
d.addCallback(lambda result: result)
return d
- return succeed(qname in self.liveProperties or self.deadProperties().contains(qname))
+ return succeed(
+ qname in self.liveProperties or
+ self.deadProperties().contains(qname)
+ )
def readProperty(self, property, request):
"""
@@ -256,8 +267,14 @@
if name == "supportedlock":
return davxml.SupportedLock(
- davxml.LockEntry(davxml.LockScope.exclusive, davxml.LockType.write),
- davxml.LockEntry(davxml.LockScope.shared , davxml.LockType.write),
+ davxml.LockEntry(
+ davxml.LockScope.exclusive,
+ davxml.LockType.write
+ ),
+ davxml.LockEntry(
+ davxml.LockScope.shared,
+ davxml.LockType.write
+ ),
)
if name == "supported-report-set":
@@ -277,7 +294,9 @@
if name == "principal-collection-set":
return davxml.PrincipalCollectionSet(*[
- davxml.HRef(principalCollection.principalCollectionURL())
+ davxml.HRef(
+ principalCollection.principalCollectionURL()
+ )
for principalCollection in self.principalCollections()
])
@@ -287,7 +306,8 @@
raise HTTPError(StatusResponse(
responsecode.UNAUTHORIZED,
- "Access denied while reading property %s." % (sname,)
+ "Access denied while reading property %s."
+ % (sname,)
))
d = self.checkPrivileges(request, privileges)
@@ -297,9 +317,15 @@
if name == "current-user-privilege-set":
def callback():
d = self.currentPrivileges(request)
- d.addCallback(lambda privs: davxml.CurrentUserPrivilegeSet(*privs))
+ d.addCallback(
+ lambda privs:
+ davxml.CurrentUserPrivilegeSet(*privs)
+ )
return d
- return ifAllowed((davxml.ReadCurrentUserPrivilegeSet(),), callback)
+ return ifAllowed(
+ (davxml.ReadCurrentUserPrivilegeSet(),),
+ callback
+ )
if name == "acl":
def callback():
@@ -313,7 +339,9 @@
return ifAllowed((davxml.ReadACL(),), callback)
if name == "current-user-principal":
- return davxml.CurrentUserPrincipal(self.currentPrincipal(request).children[0])
+ return davxml.CurrentUserPrincipal(
+ self.currentPrincipal(request).children[0]
+ )
if name == "quota-available-bytes":
def callback(qvalue):
@@ -352,7 +380,8 @@
elif namespace == twisted_private_namespace:
raise HTTPError(StatusResponse(
responsecode.FORBIDDEN,
- "Properties in the %s namespace are private to the server." % (sname,)
+ "Properties in the %s namespace are private to the server."
+ % (sname,)
))
return self.deadProperties().get(qname)
@@ -363,19 +392,23 @@
"""
See L{IDAVResource.writeProperty}.
"""
- assert isinstance(property, davxml.WebDAVElement), "Not a property: %r" % (property,)
+ assert isinstance(property, davxml.WebDAVElement), (
+ "Not a property: %r" % (property,)
+ )
def defer():
if property.protected:
raise HTTPError(StatusResponse(
responsecode.FORBIDDEN,
- "Protected property %s may not be set." % (property.sname(),)
+ "Protected property %s may not be set."
+ % (property.sname(),)
))
if property.namespace == twisted_private_namespace:
raise HTTPError(StatusResponse(
responsecode.FORBIDDEN,
- "Properties in the %s namespace are private to the server." % (property.sname(),)
+ "Properties in the %s namespace are private to the server."
+ % (property.sname(),)
))
return self.deadProperties().set(property)
@@ -403,13 +436,15 @@
if qname[0] == twisted_private_namespace:
raise HTTPError(StatusResponse(
responsecode.FORBIDDEN,
- "Properties in the %s namespace are private to the server." % (qname[0],)
+ "Properties in the %s namespace are private to the server."
+ % (qname[0],)
))
return self.deadProperties().delete(qname)
return maybeDeferred(defer)
+ @inlineCallbacks
def listProperties(self, request):
"""
See L{IDAVResource.listProperties}.
@@ -418,32 +453,34 @@
# Add dynamic live properties that exist
dynamicLiveProperties = (
- (dav_namespace, "quota-available-bytes" ),
- (dav_namespace, "quota-used-bytes" ),
+ (dav_namespace, "quota-available-bytes"),
+ (dav_namespace, "quota-used-bytes" ),
)
for dqname in dynamicLiveProperties:
- has = waitForDeferred(self.hasProperty(dqname, request))
- yield has
- has = has.getResult()
+ has = (yield self.hasProperty(dqname, request))
if not has:
qnames.remove(dqname)
for qname in self.deadProperties().list():
- if (qname not in qnames) and (qname[0] != twisted_private_namespace):
+ if (
+ qname not in qnames and
+ qname[0] != twisted_private_namespace
+ ):
qnames.add(qname)
- yield qnames
+ returnValue(qnames)
- listProperties = deferredGenerator(listProperties)
-
def listAllprop(self, request):
"""
- Some DAV properties should not be returned to a C{DAV:allprop} query.
- RFC 3253 defines several such properties. This method computes a subset
- of the property qnames returned by L{listProperties} by filtering out
- elements whose class have the C{.hidden} attribute set to C{True}.
- @return: a list of qnames of properties which are defined and are
- appropriate for use in response to a C{DAV:allprop} query.
+ Some DAV properties should not be returned to a C{DAV:allprop}
+ query. RFC 3253 defines several such properties. This method
+ computes a subset of the property qnames returned by
+ L{listProperties} by filtering out elements whose class have
+ the C{.hidden} attribute set to C{True}.
+
+ @return: a list of qnames of properties which are defined and
+ are appropriate for use in response to a C{DAV:allprop}
+ query.
"""
def doList(qnames):
result = []
@@ -464,8 +501,8 @@
def hasDeadProperty(self, property):
"""
- Same as L{hasProperty}, but bypasses the live property store and checks
- directly from the dead property store.
+ Same as L{hasProperty}, but bypasses the live property store
+ and checks directly from the dead property store.
"""
if type(property) is tuple:
qname = property
@@ -476,8 +513,8 @@
def readDeadProperty(self, property):
"""
- Same as L{readProperty}, but bypasses the live property store and reads
- directly from the dead property store.
+ Same as L{readProperty}, but bypasses the live property store
+ and reads directly from the dead property store.
"""
if type(property) is tuple:
qname = property
@@ -488,21 +525,21 @@
def writeDeadProperty(self, property):
"""
- Same as L{writeProperty}, but bypasses the live property store and
- writes directly to the dead property store.
- Note that this should not be used unless you know that you are writing
- to an overrideable live property, as this bypasses the logic which
- protects protected properties. The result of writing to a
- non-overrideable live property with this method is undefined; the value
- in the dead property store may or may not be ignored when reading the
- property with L{readProperty}.
+ Same as L{writeProperty}, but bypasses the live property store
+ and writes directly to the dead property store. Note that
+ this should not be used unless you know that you are writing
+ to an overrideable live property, as this bypasses the logic
+ which protects protected properties. The result of writing to
+ a non-overrideable live property with this method is
+ undefined; the value in the dead property store may or may not
+ be ignored when reading the property with L{readProperty}.
"""
self.deadProperties().set(property)
def removeDeadProperty(self, property):
"""
- Same as L{removeProperty}, but bypasses the live property store and acts
- directly on the dead property store.
+ Same as L{removeProperty}, but bypasses the live property
+ store and acts directly on the dead property store.
"""
if self.hasDeadProperty(property):
if type(property) is tuple:
@@ -518,13 +555,17 @@
#
def contentType(self):
if self.hasDeadProperty((davxml.dav_namespace, "getcontenttype")):
- return self.readDeadProperty((davxml.dav_namespace, "getcontenttype")).mimeType()
+ return self.readDeadProperty(
+ (davxml.dav_namespace, "getcontenttype")
+ ).mimeType()
else:
return super(DAVPropertyMixIn, self).contentType()
def displayName(self):
if self.hasDeadProperty((davxml.dav_namespace, "displayname")):
- return str(self.readDeadProperty((davxml.dav_namespace, "displayname")))
+ return str(self.readDeadProperty(
+ (davxml.dav_namespace, "displayname")
+ ))
else:
return super(DAVPropertyMixIn, self).displayName()
@@ -536,8 +577,9 @@
def __init__(self, principalCollections=None):
"""
- @param principalCollections: an iterable of L{IDAVPrincipalCollectionResource}s
- which contain principals to be used in ACLs for this resource.
+ @param principalCollections: an iterable of
+ L{IDAVPrincipalCollectionResource}s which contain
+ principals to be used in ACLs for this resource.
"""
if principalCollections is not None:
self._principalCollections = frozenset([
@@ -552,8 +594,9 @@
def davComplianceClasses(self):
"""
This implementation raises L{NotImplementedError}.
- @return: a sequence of strings denoting WebDAV compliance classes. For
- example, a DAV level 2 server might return ("1", "2").
+ @return: a sequence of strings denoting WebDAV compliance
+ classes. For example, a DAV level 2 server might return
+ ("1", "2").
"""
unimplemented(self)
@@ -561,17 +604,21 @@
"""
See L{IDAVResource.isCollection}.
- This implementation raises L{NotImplementedError}; a subclass must
- override this method.
+ This implementation raises L{NotImplementedError}; a subclass
+ must override this method.
"""
unimplemented(self)
- def findChildren(self, depth, request, callback, privileges=None, inherited_aces=None):
+ def findChildren(
+ self, depth, request, callback,
+ privileges=None, inherited_aces=None
+ ):
"""
See L{IDAVResource.findChildren}.
- This implementation works for C{depth} values of C{"0"}, C{"1"},
- and C{"infinity"}. As long as C{self.listChildren} is implemented
+ This implementation works for C{depth} values of C{"0"},
+ C{"1"}, and C{"infinity"}. As long as C{self.listChildren} is
+ implemented
"""
assert depth in ("0", "1", "infinity"), "Invalid depth: %s" % (depth,)
@@ -593,7 +640,10 @@
if privileges is None:
return child
- d = child.checkPrivileges(request, privileges, inherited_aces=inherited_aces)
+ d = child.checkPrivileges(
+ request, privileges,
+ inherited_aces=inherited_aces
+ )
d.addCallback(lambda _: child)
return d
@@ -604,7 +654,10 @@
if child.isCollection():
callback(child, childpath + "/")
if depth == "infinity":
- d = child.findChildren(depth, request, callback, privileges)
+ d = child.findChildren(
+ depth, request,
+ callback, privileges
+ )
d.addCallback(lambda x: reactor.callLater(0, getChild))
return d
else:
@@ -628,10 +681,191 @@
return completionDeferred
+ @inlineCallbacks
+ def findChildrenFaster(
+ self, depth, request, okcallback, badcallback,
+ names, privileges, inherited_aces
+ ):
+ """
+ See L{IDAVResource.findChildren}.
+
+ This implementation works for C{depth} values of C{"0"},
+ C{"1"}, and C{"infinity"}. As long as C{self.listChildren} is
+ implemented
+
+ @param depth: a C{str} for the depth: "0", "1" and "infinity"
+ only allowed.
+ @param request: the L{Request} for the current request in
+ progress
+ @param okcallback: a callback function used on all resources
+ that pass the privilege check, or C{None}
+ @param badcallback: a callback function used on all resources
+ that fail the privilege check, or C{None}
+ @param names: a C{list} of C{str}'s containing the names of
+ the child resources to lookup. If empty or C{None} all
+ children will be examined, otherwise only the ones in the
+ list.
+ @param privileges: a list of privileges to check.
+ @param inherited_aces: the list of parent ACEs that are
+ inherited by all children.
+ """
+ assert depth in ("0", "1", "infinity"), "Invalid depth: %s" % (depth,)
+
+ if depth == "0" or not self.isCollection():
+ returnValue(None)
+
+ # First find all depth 1 children
+ #children = []
+ #yield self.findChildren("1", request, lambda x, y: children.append((x, y)), privileges=None, inherited_aces=None)
+
+ children = []
+ basepath = request.urlForResource(self)
+ childnames = list(self.listChildren())
+ for childname in childnames:
+ if names and childname not in names:
+ continue
+ childpath = joinURL(basepath, childname)
+ child = (yield request.locateChildResource(self, childname))
+ if child is None:
+ children.append((None, childpath + "/"))
+ else:
+ if child.isCollection():
+ children.append((child, childpath + "/"))
+ else:
+ children.append((child, childpath))
+
+ # Generate (acl,supported_privs) map
+ aclmap = {}
+ for resource, url in children:
+ acl = (yield resource.accessControlList(
+ request, inheritance=False, inherited_aces=inherited_aces
+ ))
+ supportedPrivs = (yield resource.supportedPrivileges(request))
+ aclmap.setdefault(
+ (pickle.dumps(acl), supportedPrivs),
+ (acl, supportedPrivs, [])
+ )[2].append((resource, url))
+
+ # Now determine whether each ace satisfies privileges
+ #print aclmap
+ allowed_collections = []
+ for items in aclmap.itervalues():
+ checked = (yield self.checkACLPrivilege(
+ request, items[0], items[1], privileges, inherited_aces
+ ))
+ if checked:
+ for resource, url in items[2]:
+ if okcallback:
+ okcallback(resource, url)
+ if resource.isCollection():
+ allowed_collections.append((resource, url))
+ else:
+ if badcallback:
+ for resource, url in items[2]:
+ badcallback(resource, url)
+
+ # TODO: Depth: infinity support
+ if depth == "infinity":
+ for collection, url in allowed_collections:
+ collection_inherited_aces = (
+ yield collection.inheritedACEsforChildren(request)
+ )
+ yield collection.findChildrenFaster(
+ depth, request, okcallback, badcallback,
+ names, privileges,
+ inherited_aces=collection_inherited_aces
+ )
+
+ returnValue(None)
+
+ @inlineCallbacks
+ def checkACLPrivilege(
+ self, request, acl, privyset, privileges, inherited_aces
+ ):
+
+ if acl is None:
+ returnValue(False)
+
+ principal = self.currentPrincipal(request)
+
+ # Other principal types don't make sense as actors.
+ assert principal.children[0].name in ("unauthenticated", "href"), (
+ "Principal is not an actor: %r" % (principal,)
+ )
+
+ acl = self.fullAccessControlList(acl, inherited_aces)
+
+ pending = list(privileges)
+ denied = []
+
+ for ace in acl.children:
+ for privilege in tuple(pending):
+ if not self.matchPrivilege(
+ davxml.Privilege(privilege), ace.privileges, privyset
+ ):
+ continue
+
+ match = (yield self.matchPrincipal(
+ principal, ace.principal, request
+ ))
+
+ if match:
+ if ace.invert:
+ continue
+ else:
+ if not ace.invert:
+ continue
+
+ pending.remove(privilege)
+
+ if not ace.allow:
+ denied.append(privilege)
+
+ returnValue(len(denied) + len(pending) == 0)
+
+ def fullAccessControlList(self, acl, inherited_aces):
+ """
+ See L{IDAVResource.accessControlList}.
+
+ This implementation looks up the ACL in the private property
+ C{(L{twisted_private_namespace}, "acl")}.
+ If no ACL has been stored for this resource, it returns the value
+ returned by C{defaultAccessControlList}.
+ If access is disabled it will return C{None}.
+ """
+ #
+ # Inheritance is problematic. Here is what we do:
+ #
+ # 1. A private element <Twisted:inheritable> is defined for use inside
+ # of a <DAV:ace>. This private element is removed when the ACE is
+ # exposed via WebDAV.
+ #
+ # 2. When checking ACLs with inheritance resolution, the server must
+ # examine all parent resources of the current one looking for any
+ # <Twisted:inheritable> elements.
+ #
+ # If those are defined, the relevant ace is applied to the ACL on the
+ # current resource.
+ #
+
+ # Dynamically update privileges for those ace's that are inherited.
+ if acl:
+ aces = list(acl.children)
+ else:
+ aces = []
+
+ aces.extend(inherited_aces)
+
+ acl = davxml.ACL(*aces)
+
+ return acl
+
def supportedReports(self):
"""
See L{IDAVResource.supportedReports}.
- This implementation lists the three main ACL reports and expand-property.
+
+ This implementation lists the three main ACL reports and
+ expand-property.
"""
result = []
result.append(davxml.Report(davxml.ACLPrincipalPropSet(),))
@@ -648,22 +882,25 @@
"""
See L{IDAVResource.authorize}.
"""
- d = self.authenticate(request)
def whenAuthenticated(result):
privilegeCheck = self.checkPrivileges(request, privileges, recurse)
return privilegeCheck.addErrback(whenAccessDenied)
+
def whenAccessDenied(f):
f.trap(AccessDeniedError)
- # If we were unauthenticated to start with (no Authorization header
- # from client) then we should return an unauthorized response
- # instead to force the client to login if it can.
+ # If we were unauthenticated to start with (no
+ # Authorization header from client) then we should return
+ # an unauthorized response instead to force the client to
+ # login if it can.
- # We're not adding the headers here because this response class is
- # supposed to be a FORBIDDEN status code and "Authorization will
- # not help" according to RFC2616
+ # We're not adding the headers here because this response
+ # class is supposed to be a FORBIDDEN status code and
+ # "Authorization will not help" according to RFC2616
+
def translateError(response):
return Failure(HTTPError(response))
+
if request.authnUser == davxml.Principal(davxml.Unauthenticated()):
return UnauthorizedResponse.makeResponse(
request.credentialFactories,
@@ -671,33 +908,36 @@
else:
return translateError(
NeedPrivilegesResponse(request.uri, f.value.errors))
+
+ d = self.authenticate(request)
d.addCallback(whenAuthenticated)
return d
-
def authenticate(self, request):
"""
- Authenticate the given request against the portal, setting both
- C{request.authzUser} (a C{str}, the username for the purposes of
- authorization) and C{request.authnUser} (a C{str}, the username for the
- purposes of authentication) when it has been authenticated.
+ Authenticate the given request against the portal, setting
+ both C{request.authzUser} (a C{str}, the username for the
+ purposes of authorization) and C{request.authnUser} (a C{str},
+ the username for the purposes of authentication) when it has
+ been authenticated.
- In order to authenticate, the request must have been previously
- prepared by L{twext.web2.dav.auth.AuthenticationWrapper.hook} to have
- the necessary authentication metadata.
+ In order to authenticate, the request must have been
+ previously prepared by
+ L{twext.web2.dav.auth.AuthenticationWrapper.hook} to have the
+ necessary authentication metadata.
If the request was not thusly prepared, both C{authzUser} and
C{authnUser} will be L{davxml.Unauthenticated}.
@param request: the request which may contain authentication
- information and a reference to a portal to authenticate against.
-
+ information and a reference to a portal to authenticate
+ against.
@type request: L{twext.web2.iweb.IRequest}.
-
- @return: a L{Deferred} which fires with a 2-tuple of C{(authnUser,
- authzUser)} if either the request is unauthenticated OR contains
- valid credentials to authenticate as a principal, or errbacks with
- L{HTTPError} if the authentication scheme is unsupported, or the
+ @return: a L{Deferred} which fires with a 2-tuple of
+ C{(authnUser, authzUser)} if either the request is
+ unauthenticated OR contains valid credentials to
+ authenticate as a principal, or errbacks with L{HTTPError}
+ if the authentication scheme is unsupported, or the
credentials provided by the request are not valid.
"""
if not (hasattr(request, 'portal') and
@@ -711,28 +951,32 @@
if authHeader is not None:
if authHeader[0] not in request.credentialFactories:
- log.err("Client authentication scheme %s is not provided by server %s"
+ log.err("Client authentication scheme %s is not "
+ "provided by server %s"
% (authHeader[0], request.credentialFactories.keys()))
d = UnauthorizedResponse.makeResponse(
request.credentialFactories,
- request.remoteAddr)
+ request.remoteAddr
+ )
def _fail(response):
return Failure(HTTPError(response))
return d.addCallback(_fail)
else:
factory = request.credentialFactories[authHeader[0]]
- d = factory.decode(authHeader[1], request)
-
def gotCreds(creds):
return self.principalsForAuthID(
request, creds.username
).addCallback(gotDetails, creds)
- # Try to match principals in each principal collection on the resource
+
+ # Try to match principals in each principal collection
+ # on the resource
def gotDetails(details, creds):
authnPrincipal = IDAVPrincipalResource(details[0])
authzPrincipal = IDAVPrincipalResource(details[1])
- return PrincipalCredentials(authnPrincipal, authzPrincipal, creds)
+ return PrincipalCredentials(
+ authnPrincipal, authzPrincipal, creds
+ )
def login(pcreds):
return request.portal.login(
@@ -741,14 +985,22 @@
request.authnUser = result[1]
request.authzUser = result[2]
return (request.authnUser, request.authzUser)
+
def translateUnauthenticated(f):
f.trap(UnauthorizedLogin, LoginFailed)
log.msg("Authentication failed: %s" % (f.value,))
- return UnauthorizedResponse.makeResponse(
- request.credentialFactories, request.remoteAddr).addCallback(
- lambda response: Failure(HTTPError(response)))
- d.addCallback(gotCreds).addCallback(login).addCallbacks(
- gotAuth, translateUnauthenticated)
+ d = UnauthorizedResponse.makeResponse(
+ request.credentialFactories, request.remoteAddr
+ )
+ d.addCallback(
+ lambda response: Failure(HTTPError(response))
+ )
+ return d
+
+ d = factory.decode(authHeader[1], request)
+ d.addCallback(gotCreds)
+ d.addCallback(login)
+ d.addCallbacks(gotAuth, translateUnauthenticated)
return d
else:
request.authnUser = davxml.Principal(davxml.Unauthenticated())
@@ -762,7 +1014,8 @@
def currentPrincipal(self, request):
"""
@param request: the request being processed.
- @return: the current authorized principal, as derived from the given request.
+ @return: the current authorized principal, as derived from the
+ given request.
"""
if hasattr(request, "authzUser"):
return request.authzUser
@@ -780,8 +1033,8 @@
def defaultRootAccessControlList(self):
"""
- @return: the L{davxml.ACL} element containing the default access control
- list for this resource.
+ @return: the L{davxml.ACL} element containing the default
+ access control list for this resource.
"""
#
# The default behaviour is to allow GET access to everything
@@ -792,8 +1045,8 @@
def defaultAccessControlList(self):
"""
- @return: the L{davxml.ACL} element containing the default access control
- list for this resource.
+ @return: the L{davxml.ACL} element containing the default
+ access control list for this resource.
"""
#
# The default behaviour is no ACL; we should inherrit from the parent
@@ -810,17 +1063,20 @@
"""
self.writeDeadProperty(acl)
+ @inlineCallbacks
def mergeAccessControlList(self, new_acl, request):
"""
- Merges the supplied access control list with the one on this resource.
- Merging means change all the non-inherited and non-protected ace's in
- the original, and do not allow the new one to specify an inherited or
- protected access control entry. This is the behaviour required by the
- C{ACL} request. (RFC 3744, section 8.1).
+ Merges the supplied access control list with the one on this
+ resource. Merging means change all the non-inherited and
+ non-protected ace's in the original, and do not allow the new
+ one to specify an inherited or protected access control
+ entry. This is the behaviour required by the C{ACL}
+ request. (RFC 3744, section 8.1).
+
@param new_acl: an L{davxml.ACL} element
@param request: the request being processed.
- @return: a tuple of the C{DAV:error} precondition element if an error
- occurred, C{None} otherwise.
+ @return: a tuple of the C{DAV:error} precondition element if
+ an error occurred, C{None} otherwise.
This implementation stores the ACL in the private property
"""
@@ -831,7 +1087,8 @@
# 2. Check that ace's on incoming do not match an inherited ace
# 3. Check that ace's on incoming all have deny before grant
# 4. Check that ace's on incoming do not use abstract privilege
- # 5. Check that ace's on incoming are supported (and are not inherited themselves)
+ # 5. Check that ace's on incoming are supported
+ # (and are not inherited themselves)
# 6. Check that ace's on incoming have valid principals
# 7. Copy the original
# 8. Remove all non-inherited and non-protected - and also inherited
@@ -839,16 +1096,15 @@
# 10. Verify that new acl is not in conflict with itself
# 11. Update acl on the resource
- # Get the current access control list, preserving any private properties on the ACEs as
- # we will need to keep those when we change the ACL.
- old_acl = waitForDeferred(self.accessControlList(request, expanding=True))
- yield old_acl
- old_acl = old_acl.getResult()
+ # Get the current access control list, preserving any private
+ # properties on the ACEs as we will need to keep those when we
+ # change the ACL.
+ old_acl = (yield self.accessControlList(request, expanding=True))
+
# Check disabled
if old_acl is None:
- yield None
- return
+ returnValue(None)
# Need to get list of supported privileges
supported = []
@@ -863,11 +1119,11 @@
elif isinstance(item, davxml.SupportedPrivilege):
addSupportedPrivilege(item)
- supportedPrivs = waitForDeferred(self.supportedPrivileges(request))
- yield supportedPrivs
- supportedPrivs = supportedPrivs.getResult()
+ supportedPrivs = (yield self.supportedPrivileges(request))
for item in supportedPrivs.children:
- assert isinstance(item, davxml.SupportedPrivilege), "Not a SupportedPrivilege: %r" % (item,)
+ assert isinstance(item, davxml.SupportedPrivilege), (
+ "Not a SupportedPrivilege: %r" % (item,)
+ )
addSupportedPrivilege(item)
# Steps 1 - 6
@@ -877,64 +1133,81 @@
if (ace.principal == old_ace.principal):
# Step 1
if old_ace.protected:
- log.err("Attempt to overwrite protected ace %r on resource %r" % (old_ace, self))
- yield (davxml.dav_namespace, "no-protected-ace-conflict")
- return
+ log.err("Attempt to overwrite protected ace %r "
+ "on resource %r"
+ % (old_ace, self))
+ returnValue((
+ davxml.dav_namespace,
+ "no-protected-ace-conflict"
+ ))
# Step 2
#
- # RFC3744 says that we either enforce the inherited ace
- # conflict or we ignore it but use access control evaluation
- # to determine whether there is any impact. Given that we
- # have the "inheritable" behavior it does not make sense to
- # disallow overrides of inherited ACEs since "inheritable"
- # cannot itself be controlled via protocol.
+ # RFC3744 says that we either enforce the
+ # inherited ace conflict or we ignore it but use
+ # access control evaluation to determine whether
+ # there is any impact. Given that we have the
+ # "inheritable" behavior it does not make sense to
+ # disallow overrides of inherited ACEs since
+ # "inheritable" cannot itself be controlled via
+ # protocol.
#
# Otherwise, we'd use this logic:
#
#elif old_ace.inherited:
- # log.err("Attempt to overwrite inherited ace %r on resource %r" % (old_ace, self))
- # yield (davxml.dav_namespace, "no-inherited-ace-conflict")
- # return
+ # log.err("Attempt to overwrite inherited ace %r "
+ # "on resource %r" % (old_ace, self))
+ # returnValue((
+ # davxml.dav_namespace,
+ # "no-inherited-ace-conflict"
+ # ))
# Step 3
if ace.allow and got_deny:
- log.err("Attempt to set grant ace %r after deny ace on resource %r" % (ace, self))
- yield (davxml.dav_namespace, "deny-before-grant")
- return
+ log.err("Attempt to set grant ace %r after deny ace "
+ "on resource %r"
+ % (ace, self))
+ returnValue((davxml.dav_namespace, "deny-before-grant"))
got_deny = not ace.allow
- # Step 4: ignore as this server has no abstract privileges (FIXME: none yet?)
+ # Step 4: ignore as this server has no abstract privileges
+ # (FIXME: none yet?)
# Step 5
for privilege in ace.privileges:
if privilege.children[0] not in supported:
- log.err("Attempt to use unsupported privilege %r in ace %r on resource %r" % (privilege.children[0], ace, self))
- yield (davxml.dav_namespace, "not-supported-privilege")
- return
+ log.err("Attempt to use unsupported privilege %r "
+ "in ace %r on resource %r"
+ % (privilege.children[0], ace, self))
+ returnValue((
+ davxml.dav_namespace,
+ "not-supported-privilege"
+ ))
+
if ace.protected:
- log.err("Attempt to create protected ace %r on resource %r" % (ace, self))
- yield (davxml.dav_namespace, "no-ace-conflict")
- return
+ log.err("Attempt to create protected ace %r on resource %r"
+ % (ace, self))
+ returnValue((davxml.dav_namespace, "no-ace-conflict"))
+
if ace.inherited:
- log.err("Attempt to create inherited ace %r on resource %r" % (ace, self))
- yield (davxml.dav_namespace, "no-ace-conflict")
- return
+ log.err("Attempt to create inherited ace %r on resource %r"
+ % (ace, self))
+ returnValue((davxml.dav_namespace, "no-ace-conflict"))
# Step 6
- valid = waitForDeferred(self.validPrincipal(ace.principal, request))
- yield valid
- valid = valid.getResult()
+ valid = (yield self.validPrincipal(ace.principal, request))
if not valid:
- log.err("Attempt to use unrecognized principal %r in ace %r on resource %r" % (ace.principal, ace, self))
- yield (davxml.dav_namespace, "recognized-principal")
- return
+ log.err("Attempt to use unrecognized principal %r "
+ "in ace %r on resource %r"
+ % (ace.principal, ace, self))
+ returnValue((davxml.dav_namespace, "recognized-principal"))
# Step 8 & 9
#
- # Iterate through the old ones and replace any that are in the new set, or remove
- # the non-inherited/non-protected not in the new set
+ # Iterate through the old ones and replace any that are in the
+ # new set, or remove the non-inherited/non-protected not in
+ # the new set
#
new_aces = [ace for ace in new_acl.children]
new_set = []
@@ -954,51 +1227,56 @@
# Step 11
self.writeNewACEs(new_set)
- yield None
-
- mergeAccessControlList = deferredGenerator(mergeAccessControlList)
+ returnValue(None)
def writeNewACEs(self, new_aces):
"""
- Write a new ACL to the resource's property store.
- This is a separate method so that it can be overridden by
- resources that need to do extra processing of ACLs being set
- via the ACL command.
+ Write a new ACL to the resource's property store. This is a
+ separate method so that it can be overridden by resources that
+ need to do extra processing of ACLs being set via the ACL
+ command.
@param new_aces: C{list} of L{ACE} for ACL being set.
"""
self.setAccessControlList(davxml.ACL(*new_aces))
def matchPrivilege(self, privilege, ace_privileges, supportedPrivileges):
for ace_privilege in ace_privileges:
- if privilege == ace_privilege or ace_privilege.isAggregateOf(privilege, supportedPrivileges):
+ if (
+ privilege == ace_privilege or
+ ace_privilege.isAggregateOf(privilege, supportedPrivileges)
+ ):
return True
return False
- def checkPrivileges(self, request, privileges, recurse=False, principal=None, inherited_aces=None):
+ @inlineCallbacks
+ def checkPrivileges(
+ self, request, privileges, recurse=False,
+ principal=None, inherited_aces=None
+ ):
"""
Check whether the given principal has the given privileges.
(RFC 3744, section 5.5)
+
@param request: the request being processed.
- @param privileges: an iterable of L{davxml.WebDAVElement} elements
- denoting access control privileges.
+ @param privileges: an iterable of L{davxml.WebDAVElement}
+ elements denoting access control privileges.
@param recurse: C{True} if a recursive check on all child
resources of this resource should be performed as well,
C{False} otherwise.
@param principal: the L{davxml.Principal} to check privileges
for. If C{None}, it is deduced from C{request} by calling
L{currentPrincipal}.
- @param inherited_aces: a list of L{davxml.ACE}s corresponding to the precomputed
- inheritable aces from the parent resource hierarchy.
- @return: a L{Deferred} that callbacks with C{None} or errbacks with an
- L{AccessDeniedError}
+ @param inherited_aces: a list of L{davxml.ACE}s corresponding
+ to the precomputed inheritable aces from the parent
+ resource hierarchy.
+ @return: a L{Deferred} that callbacks with C{None} or errbacks
+ with an L{AccessDeniedError}
"""
if principal is None:
principal = self.currentPrincipal(request)
- supportedPrivs = waitForDeferred(self.supportedPrivileges(request))
- yield supportedPrivs
- supportedPrivs = supportedPrivs.getResult()
+ supportedPrivs = (yield self.supportedPrivileges(request))
# Other principals types don't make sense as actors.
assert principal.children[0].name in ("unauthenticated", "href"), (
@@ -1010,15 +1288,18 @@
resources = [(self, None)]
if recurse:
- x = self.findChildren("infinity", request, lambda x, y: resources.append((x,y)))
- x = waitForDeferred(x)
- yield x
- x.getResult()
+ yield self.findChildren(
+ "infinity", request,
+ lambda x, y: resources.append((x,y))
+ )
for resource, uri in resources:
- acl = waitForDeferred(resource.accessControlList(request, inherited_aces=inherited_aces))
- yield acl
- acl = acl.getResult()
+ acl = (yield
+ resource.accessControlList(
+ request,
+ inherited_aces=inherited_aces
+ )
+ )
# Check for disabled
if acl is None:
@@ -1030,12 +1311,15 @@
for ace in acl.children:
for privilege in tuple(pending):
- if not self.matchPrivilege(davxml.Privilege(privilege), ace.privileges, supportedPrivs):
+ if not self.matchPrivilege(
+ davxml.Privilege(privilege),
+ ace.privileges, supportedPrivs
+ ):
continue
- match = waitForDeferred(self.matchPrincipal(principal, ace.principal, request))
- yield match
- match = match.getResult()
+ match = (yield
+ self.matchPrincipal(principal, ace.principal, request)
+ )
if match:
if ace.invert:
@@ -1057,16 +1341,14 @@
if errors:
raise AccessDeniedError(errors,)
- yield None
+ returnValue(None)
- checkPrivileges = deferredGenerator(checkPrivileges)
-
def supportedPrivileges(self, request):
"""
See L{IDAVResource.supportedPrivileges}.
- This implementation returns a supported privilege set containing only
- the DAV:all privilege.
+ This implementation returns a supported privilege set
+ containing only the DAV:all privilege.
"""
return succeed(allPrivilegeSet)
@@ -1074,36 +1356,41 @@
"""
See L{IDAVResource.currentPrivileges}.
- This implementation returns a current privilege set containing only
- the DAV:all privilege.
+ This implementation returns a current privilege set containing
+ only the DAV:all privilege.
"""
current = self.currentPrincipal(request)
return self.privilegesForPrincipal(current, request)
- def accessControlList(self, request, inheritance=True, expanding=False, inherited_aces=None):
+ @inlineCallbacks
+ def accessControlList(
+ self, request, inheritance=True,
+ expanding=False, inherited_aces=None
+ ):
"""
See L{IDAVResource.accessControlList}.
This implementation looks up the ACL in the private property
- C{(L{twisted_private_namespace}, "acl")}.
- If no ACL has been stored for this resource, it returns the value
- returned by C{defaultAccessControlList}.
- If access is disabled it will return C{None}.
+ C{(L{twisted_private_namespace}, "acl")}. If no ACL has been
+ stored for this resource, it returns the value returned by
+ C{defaultAccessControlList}. If access is disabled it will
+ return C{None}.
"""
#
# Inheritance is problematic. Here is what we do:
#
- # 1. A private element <Twisted:inheritable> is defined for use inside
- # of a <DAV:ace>. This private element is removed when the ACE is
- # exposed via WebDAV.
+ # 1. A private element <Twisted:inheritable> is defined for
+ # use inside of a <DAV:ace>. This private element is
+ # removed when the ACE is exposed via WebDAV.
#
- # 2. When checking ACLs with inheritance resolution, the server must
- # examine all parent resources of the current one looking for any
- # <Twisted:inheritable> elements.
+ # 2. When checking ACLs with inheritance resolution, the
+ # server must examine all parent resources of the current
+ # one looking for any <Twisted:inheritable> elements.
#
# If those are defined, the relevant ace is applied to the ACL on the
# current resource.
#
+
myURL = None
def getMyURL():
@@ -1119,7 +1406,8 @@
acl = self.readDeadProperty(davxml.ACL)
except HTTPError, e:
assert e.response.code == responsecode.NOT_FOUND, (
- "Expected %s response from readDeadProperty() exception, not %s"
+ "Expected %s response from readDeadProperty() exception, "
+ "not %s"
% (responsecode.NOT_FOUND, e.response.code)
)
@@ -1144,21 +1432,18 @@
if myURL != "/":
parentURL = parentForURL(myURL)
- parent = waitForDeferred(request.locateResource(parentURL))
- yield parent
- parent = parent.getResult()
+ parent = (yield request.locateResource(parentURL))
if parent:
- parent_acl = waitForDeferred(
- parent.accessControlList(request, inheritance=True, expanding=True)
+ parent_acl = (yield
+ parent.accessControlList(
+ request, inheritance=True, expanding=True
+ )
)
- yield parent_acl
- parent_acl = parent_acl.getResult()
# Check disabled
if parent_acl is None:
- yield None
- return
+ returnValue(None)
for ace in parent_acl.children:
if ace.inherited:
@@ -1167,7 +1452,9 @@
# Adjust ACE for inherit on this resource
children = list(ace.children)
children.remove(TwistedACLInheritable())
- children.append(davxml.Inherited(davxml.HRef(parentURL)))
+ children.append(
+ davxml.Inherited(davxml.HRef(parentURL))
+ )
aces.append(davxml.ACE(*children))
else:
aces.extend(inherited_aces)
@@ -1186,47 +1473,53 @@
acl = davxml.ACL(*aces)
- yield acl
+ returnValue(acl)
- accessControlList = deferredGenerator(accessControlList)
-
def inheritedACEsforChildren(self, request):
"""
- Do some optimisation of access control calculation by determining any inherited ACLs outside of
- the child resource loop and supply those to the checkPrivileges on each child.
+ Do some optimisation of access control calculation by
+ determining any inherited ACLs outside of the child resource
+ loop and supply those to the checkPrivileges on each child.
@param request: the L{IRequest} for the request in progress.
- @return: a C{list} of L{Ace}s that child resources of this one will inherit.
+ @return: a C{list} of L{Ace}s that child resources of this one
+ will inherit.
"""
- # Get the parent ACLs with inheritance and preserve the <inheritable> element.
- parent_acl = waitForDeferred(self.accessControlList(request, inheritance=True, expanding=True))
- yield parent_acl
- parent_acl = parent_acl.getResult()
-
- # Check disabled
- if parent_acl is None:
- yield None
- return
+ # Get the parent ACLs with inheritance and preserve the
+ # <inheritable> element.
- # Filter out those that are not inheritable (and remove the inheritable element from those that are)
- aces = []
- for ace in parent_acl.children:
- if ace.inherited:
- aces.append(ace)
- elif TwistedACLInheritable() in ace.children:
- # Adjust ACE for inherit on this resource
- children = list(ace.children)
- children.remove(TwistedACLInheritable())
- children.append(davxml.Inherited(davxml.HRef(request.urlForResource(self))))
- aces.append(davxml.ACE(*children))
- yield aces
+ def gotACL(parent_acl):
+ # Check disabled
+ if parent_acl is None:
+ return None
- inheritedACEsforChildren = deferredGenerator(inheritedACEsforChildren)
+ # Filter out those that are not inheritable (and remove
+ # the inheritable element from those that are)
+ aces = []
+ for ace in parent_acl.children:
+ if ace.inherited:
+ aces.append(ace)
+ elif TwistedACLInheritable() in ace.children:
+ # Adjust ACE for inherit on this resource
+ children = list(ace.children)
+ children.remove(TwistedACLInheritable())
+ children.append(
+ davxml.Inherited(
+ davxml.HRef(request.urlForResource(self))
+ )
+ )
+ aces.append(davxml.ACE(*children))
+ return aces
+ d = self.accessControlList(request, inheritance=True, expanding=True)
+ d.addCallback(gotACL)
+ return d
+
def inheritedACLSet(self):
"""
- @return: a sequence of L{davxml.HRef}s from which ACLs are inherited.
+ @return: a sequence of L{davxml.HRef}s from which ACLs are
+ inherited.
This implementation returns an empty set.
"""
@@ -1234,26 +1527,28 @@
def principalsForAuthID(self, request, authid):
"""
- Return authentication and authorization prinicipal identifiers for the
- authentication identifer passed in. In this implementation authn and authz
- principals are the same.
+ Return authentication and authorization prinicipal identifiers
+ for the authentication identifer passed in. In this
+ implementation authn and authz principals are the same.
@param request: the L{IRequest} for the request in progress.
@param authid: a string containing the
authentication/authorization identifier for the principal
to lookup.
@return: a deferred tuple of two tuples. Each tuple is
- C{(principal, principalURI)} where: C{principal} is the L{Principal}
- that is found; {principalURI} is the C{str} URI of the principal.
- The first tuple corresponds to authentication identifiers,
- the second to authorization identifiers.
- It will errback with an HTTPError(responsecode.FORBIDDEN) if
- the principal isn't found.
+ C{(principal, principalURI)} where: C{principal} is the
+ L{Principal} that is found; {principalURI} is the C{str}
+ URI of the principal. The first tuple corresponds to
+ authentication identifiers, the second to authorization
+ identifiers. It will errback with an
+ HTTPError(responsecode.FORBIDDEN) if the principal isn't
+ found.
"""
authnPrincipal = self.findPrincipalForAuthID(authid)
if authnPrincipal is None:
- log.msg("Could not find the principal resource for user id: %s" % (authid,))
+ log.msg("Could not find the principal resource for user id: %s"
+ % (authid,))
raise HTTPError(responsecode.FORBIDDEN)
d = self.authorizationPrincipal(request, authid, authnPrincipal)
@@ -1262,16 +1557,17 @@
def findPrincipalForAuthID(self, authid):
"""
- Return authentication and authorization principal identifiers for the
- authentication identifer passed in. In this implementation authn and
- authz principals are the same.
+ Return authentication and authorization principal identifiers
+ for the authentication identifer passed in. In this
+ implementation authn and authz principals are the same.
@param authid: a string containing the
authentication/authorization identifier for the principal
to lookup.
- @return: a tuple of C{(principal, principalURI)} where: C{principal} is the L{Principal}
- that is found; {principalURI} is the C{str} URI of the principal.
- If not found return None.
+ @return: a tuple of C{(principal, principalURI)} where:
+ C{principal} is the L{Principal} that is found;
+ {principalURI} is the C{str} URI of the principal. If not
+ found return None.
"""
for collection in self.principalCollections():
principal = collection.principalForUser(authid)
@@ -1281,15 +1577,19 @@
def authorizationPrincipal(self, request, authid, authnPrincipal):
"""
- Determine the authorization principal for the given request and authentication principal.
- This implementation simply uses aht authentication principalk as the authoization principal.
+ Determine the authorization principal for the given request
+ and authentication principal. This implementation simply uses
+ aht authentication principalk as the authoization principal.
@param request: the L{IRequest} for the request in progress.
- @param authid: a string containing the uthentication/authorization identifier
- for the principal to lookup.
- @param authnPrincipal: the L{IDAVPrincipal} for the authenticated principal
- @return: a deferred result C{tuple} of (L{IDAVPrincipal}, C{str}) containing the authorization principal
- resource and URI respectively.
+ @param authid: a string containing the
+ authentication/authorization identifier for the principal
+ to lookup.
+ @param authnPrincipal: the L{IDAVPrincipal} for the
+ authenticated principal
+ @return: a deferred result C{tuple} of (L{IDAVPrincipal},
+ C{str}) containing the authorization principal resource
+ and URI respectively.
"""
return succeed(authnPrincipal)
@@ -1297,6 +1597,7 @@
"""
Check whether the two prinicpals are exactly the same in terms of
elements and data.
+
@param principal1: a L{Principal} to test.
@param principal2: a L{Principal} to test.
@return: C{True} if they are the same, C{False} otherwise.
@@ -1308,9 +1609,15 @@
if type(principal1) == type(principal2):
if isinstance(principal1, davxml.Property):
- return type(principal1.children[0]) == type(principal2.children[0])
+ return (
+ type(principal1.children[0]) ==
+ type(principal2.children[0])
+ )
elif isinstance(principal1, davxml.HRef):
- return str(principal1.children[0]) == str(principal2.children[0])
+ return (
+ str(principal1.children[0]) ==
+ str(principal2.children[0])
+ )
else:
return True
else:
@@ -1318,100 +1625,109 @@
def matchPrincipal(self, principal1, principal2, request):
"""
- Check whether the principal1 is a principal in the set defined by
- principal2.
- @param principal1: a L{Principal} to test. C{principal1} must contain
- a L{davxml.HRef} or L{davxml.Unauthenticated} element.
+ Check whether the principal1 is a principal in the set defined
+ by principal2.
+
+ @param principal1: a L{Principal} to test. C{principal1} must
+ contain a L{davxml.HRef} or L{davxml.Unauthenticated}
+ element.
@param principal2: a L{Principal} to test.
@param request: the request being processed.
@return: C{True} if they match, C{False} otherwise.
"""
# See RFC 3744, section 5.5.1
- principals = (principal1, principal2)
-
# The interesting part of a principal is it's one child
- principal1, principal2 = [p.children[0] for p in principals]
+ principal1 = principal1.children[0]
+ principal2 = principal2.children[0]
- if isinstance(principal2, davxml.All):
- yield True
- return
+ if not hasattr(request, "matchPrincipals"):
+ request.matchPrincipals = {}
- elif isinstance(principal2, davxml.Authenticated):
- if isinstance(principal1, davxml.Unauthenticated):
- yield False
- return
- elif isinstance(principal1, davxml.All):
- yield False
- return
- else:
- yield True
- return
+ cache_key = (str(principal1), str(principal2))
- elif isinstance(principal2, davxml.Unauthenticated):
- if isinstance(principal1, davxml.Unauthenticated):
- yield True
- return
- else:
- yield False
- return
+ match = request.matchPrincipals.get(cache_key, None)
+ if match is not None:
+ return succeed(match)
- elif isinstance(principal1, davxml.Unauthenticated):
- yield False
- return
+ def doMatch():
+ if isinstance(principal2, davxml.All):
+ return succeed(True)
- assert isinstance(principal1, davxml.HRef), "Not an HRef: %r" % (principal1,)
+ elif isinstance(principal2, davxml.Authenticated):
+ if isinstance(principal1, davxml.Unauthenticated):
+ return succeed(False)
+ elif isinstance(principal1, davxml.All):
+ return succeed(False)
+ else:
+ return succeed(True)
- principal2 = waitForDeferred(self.resolvePrincipal(principal2, request))
- yield principal2
- principal2 = principal2.getResult()
+ elif isinstance(principal2, davxml.Unauthenticated):
+ if isinstance(principal1, davxml.Unauthenticated):
+ return succeed(True)
+ else:
+ return succeed(False)
- assert principal2 is not None, "principal2 is None"
+ elif isinstance(principal1, davxml.Unauthenticated):
+ return succeed(False)
- # Compare two HRefs and do group membership test as well
- if principal1 == principal2:
- yield True
- return
-
- ismember = waitForDeferred(self.principalIsGroupMember(str(principal1), str(principal2), request))
- yield ismember
- ismember = ismember.getResult()
+ assert isinstance(principal1, davxml.HRef), (
+ "Not an HRef: %r" % (principal1,)
+ )
- if ismember:
- yield True
- return
-
- yield False
+ def resolved(principal2):
+ assert principal2 is not None, "principal2 is None"
- matchPrincipal = deferredGenerator(matchPrincipal)
+ # Compare two HRefs and do group membership test as well
+ if principal1 == principal2:
+ return True
- @deferredGenerator
+ return self.principalIsGroupMember(
+ str(principal1), str(principal2), request
+ )
+
+ d = self.resolvePrincipal(principal2, request)
+ d.addCallback(resolved)
+ return d
+
+ def cache(match):
+ request.matchPrincipals[cache_key] = match
+ return match
+
+ d = doMatch()
+ d.addCallback(cache)
+ return d
+
def principalIsGroupMember(self, principal1, principal2, request):
"""
Check whether one principal is a group member of another.
@param principal1: C{str} principalURL for principal to test.
- @param principal2: C{str} principalURL for possible group principal to test against.
+ @param principal2: C{str} principalURL for possible group
+ principal to test against.
@param request: the request being processed.
- @return: L{Deferred} with result C{True} if principal1 is a member of principal2, C{False} otherwise
+ @return: L{Deferred} with result C{True} if principal1 is a
+ member of principal2, C{False} otherwise
"""
-
- d = waitForDeferred(request.locateResource(principal2))
- yield d
- group = d.getResult()
+ def gotGroup(group):
+ # Get principal resource for principal2
+ if group and isinstance(group, DAVPrincipalResource):
+ def gotMembers(members):
+ for member in members:
+ if member.principalURL() == principal1:
+ return True
+ return False
- # Get principal resource for principal2
- if group and isinstance(group, DAVPrincipalResource):
- d = waitForDeferred(group.expandedGroupMembers())
- yield d
- members = d.getResult()
- for member in members:
- if member.principalURL() == principal1:
- yield True
- return
-
- yield False
+ d = group.expandedGroupMembers()
+ d.addCallback(gotMembers)
+ return d
+
+ return False
+ d = request.locateResource(principal2)
+ d.addCallback(gotGroup)
+ return d
+
def validPrincipal(self, ace_principal, request):
"""
Check whether the supplied principal is valid for this resource.
@@ -1419,8 +1735,9 @@
@param request: the request being processed.
@return C{True} if C{ace_principal} is valid, C{False} otherwise.
- This implementation tests for a valid element type and checks for an
- href principal that exists inside of a principal collection.
+ This implementation tests for a valid element type and checks
+ for an href principal that exists inside of a principal
+ collection.
"""
def defer():
#
@@ -1430,8 +1747,10 @@
real_principal = ace_principal.children[0]
if isinstance(real_principal, davxml.Property):
- # See comments in matchPrincipal(). We probably need some common code.
- log.err("Encountered a property principal (%s), but handling is not implemented. Invalid for ACL use."
+ # See comments in matchPrincipal(). We probably need
+ # some common code.
+ log.err("Encountered a property principal (%s), "
+ "but handling is not implemented."
% (real_principal,))
return False
@@ -1446,18 +1765,24 @@
"""
Check whether the supplied principal (in the form of an Href)
is valid for this resource.
+
@param href_principal: the L{Href} element to test
@param request: the request being processed.
- @return C{True} if C{href_principal} is valid, C{False} otherwise.
+ @return C{True} if C{href_principal} is valid, C{False}
+ otherwise.
- This implementation tests for a href element that corresponds to
- a principal resource and matches the principal-URL.
+ This implementation tests for a href element that corresponds
+ to a principal resource and matches the principal-URL.
"""
- # Must have the principal resource type and must match the principal-URL
+ # Must have the principal resource type and must match the
+ # principal-URL
def _matchPrincipalURL(resource):
- return isPrincipalResource(resource) and resource.principalURL() == str(href_principal)
+ return (
+ isPrincipalResource(resource) and
+ resource.principalURL() == str(href_principal)
+ )
d = request.locateResource(str(href_principal))
d.addCallback(_matchPrincipalURL)
@@ -1465,37 +1790,39 @@
def resolvePrincipal(self, principal, request):
"""
- Resolves a L{davxml.Principal} element into a L{davxml.HRef} element
- if possible. Specifically, the given C{principal}'s contained
- element is resolved.
+ Resolves a L{davxml.Principal} element into a L{davxml.HRef}
+ element if possible. Specifically, the given C{principal}'s
+ contained element is resolved.
- L{davxml.Property} is resolved to the URI in the contained property.
+ L{davxml.Property} is resolved to the URI in the contained
+ property.
L{davxml.Self} is resolved to the URI of this resource.
L{davxml.HRef} elements are returned as-is.
- All other principals, including meta-principals (eg. L{davxml.All}),
- resolve to C{None}.
+ All other principals, including meta-principals
+ (eg. L{davxml.All}), resolve to C{None}.
- @param principal: the L{davxml.Principal} child element to resolve.
+ @param principal: the L{davxml.Principal} child element to
+ resolve.
@param request: the request being processed.
@return: a deferred L{davxml.HRef} element or C{None}.
"""
if isinstance(principal, davxml.Property):
- # raise NotImplementedError("Property principals are not implemented.")
+ # NotImplementedError("Property principals are not implemented.")
#
- # We can't raise here without potentially crippling the server in a way
- # that can't be fixed over the wire, so let's refuse the match and log
- # an error instead.
+ # We can't raise here without potentially crippling the
+ # server in a way that can't be fixed over the wire, so
+ # let's refuse the match and log an error instead.
#
# Note: When fixing this, also fix validPrincipal()
#
- log.err("Encountered a property principal (%s), but handling is not implemented; invalid for ACL use."
+ log.err("Encountered a property principal (%s), "
+ "but handling is not implemented; invalid for ACL use."
% (principal,))
- yield None
- return
+ return succeed(None)
#
# FIXME: I think this is wrong - we need to get the
@@ -1504,82 +1831,83 @@
namespace = principal.attributes.get(["namespace"], dav_namespace)
name = principal.attributes["name"]
- principal = waitForDeferred(self.readProperty((namespace, name), request))
- yield principal
- try:
- principal = principal.getResult()
- except HTTPError, e:
- assert e.response.code == responsecode.NOT_FOUND, (
- "Expected %s response from readProperty() exception, not %s"
- % (responsecode.NOT_FOUND, e.response.code)
- )
- yield None
- return
+ def gotPrincipal(principal):
+ try:
+ principal = principal.getResult()
+ except HTTPError, e:
+ assert e.response.code == responsecode.NOT_FOUND, (
+ "%s (!= %s) status from readProperty() exception"
+ % (e.response.code, responsecode.NOT_FOUND)
+ )
+ return None
- if not isinstance(principal, davxml.Principal):
- log.err("Non-principal value in property {%s}%s referenced by property principal."
- % (namespace, name))
- yield None
- return
+ if not isinstance(principal, davxml.Principal):
+ log.err("Non-principal value in property {%s}%s "
+ "referenced by property principal."
+ % (namespace, name))
+ return None
- if len(principal.children) != 1:
- yield None
- return
+ if len(principal.children) != 1:
+ return None
- # The interesting part of a principal is it's one child
- principal = principal.children[0]
+ # The interesting part of a principal is it's one child
+ principal = principal.children[0]
+ # XXXXXX FIXME XXXXXX
+
+ d = self.readProperty((namespace, name), request)
+ d.addCallback(gotPrincipal)
+ return d
+
elif isinstance(principal, davxml.Self):
try:
self = IDAVPrincipalResource(self)
except TypeError:
- log.err("DAV:self ACE is set on non-principal resource %r" % (self,))
- yield None
- return
+ log.err("DAV:self ACE is set on non-principal resource %r"
+ % (self,))
+ return succeed(None)
principal = davxml.HRef(self.principalURL())
if isinstance(principal, davxml.HRef):
- yield principal
- return
- else:
- yield None
+ return succeed(principal)
- assert isinstance(principal, (davxml.All, davxml.Authenticated, davxml.Unauthenticated)), (
- "Not a meta-principal: %r" % (principal,)
- )
+ assert isinstance(principal, (
+ davxml.All,
+ davxml.Authenticated,
+ davxml.Unauthenticated
+ )), "Not a meta-principal: %r" % (principal,)
- resolvePrincipal = deferredGenerator(resolvePrincipal)
+ return succeed(None)
+ @inlineCallbacks
def privilegesForPrincipal(self, principal, request):
"""
See L{IDAVResource.privilegesForPrincipal}.
"""
# NB Return aggregate privileges expanded.
- acl = waitForDeferred(self.accessControlList(request))
- yield acl
- acl = acl.getResult()
+ acl = (yield self.accessControlList(request))
# Check disabled
if acl is None:
- yield []
+ returnValue(())
granted = []
denied = []
for ace in acl.children:
- # First see if the ace's principal affects the principal being tested.
- # FIXME: support the DAV:invert operation
+ # First see if the ace's principal affects the principal
+ # being tested. FIXME: support the DAV:invert operation
- match = waitForDeferred(self.matchPrincipal(principal, ace.principal, request))
- yield match
- match = match.getResult()
+ match = (yield
+ self.matchPrincipal(principal, ace.principal, request)
+ )
if match:
# Expand aggregate privileges
ps = []
- supportedPrivs = waitForDeferred(self.supportedPrivileges(request))
- yield supportedPrivs
- supportedPrivs = supportedPrivs.getResult()
+ supportedPrivs = (yield
+ self.supportedPrivileges(request)
+ )
for p in ace.privileges:
ps.extend(p.expandAggregate(supportedPrivs))
@@ -1590,12 +1918,10 @@
denied.extend([p for p in ps if p not in denied])
# Subtract denied from granted
- allowed = [p for p in granted if p not in denied]
+ allowed = tuple(p for p in granted if p not in denied)
- yield allowed
+ returnValue(allowed)
- privilegesForPrincipal = deferredGenerator(privilegesForPrincipal)
-
def matchACEinACL(self, acl, ace):
"""
Find an ACE in the ACL that matches the supplied ACE's principal.
@@ -1625,102 +1951,99 @@
##
"""
- The basic policy here is to define a private 'quota-root' property on a collection.
- That property will contain the maximum allowed bytes for the collections and all
- its contents.
+ The basic policy here is to define a private 'quota-root' property
+ on a collection. That property will contain the maximum allowed
+ bytes for the collections and all its contents.
- In order to determine the quota property values on a resource, the server must look
- for the private property on that resource and any of its parents. If found on a parent,
- then that parent should be queried for quota information. If not found, no quota
- exists for the resource.
+ In order to determine the quota property values on a resource, the
+ server must look for the private property on that resource and any
+ of its parents. If found on a parent, then that parent should be
+ queried for quota information. If not found, no quota exists for
+ the resource.
- To determine tha actual quota in use we will cache the used byte count on the quota-root
- collection in another private property. It is the servers responsibility to
- keep that property up to date by adjusting it after every PUT, DELETE, COPY,
- MOVE, MKCOL, PROPPATCH, ACL, POST or any other method that may affect the size of
- stored data. If the private property is not present, the server will fall back to
- getting the size by iterating over all resources (this is done in static.py).
-
+ To determine tha actual quota in use we will cache the used byte
+ count on the quota-root collection in another private property. It
+ is the servers responsibility to keep that property up to date by
+ adjusting it after every PUT, DELETE, COPY, MOVE, MKCOL,
+ PROPPATCH, ACL, POST or any other method that may affect the size
+ of stored data. If the private property is not present, the server
+ will fall back to getting the size by iterating over all resources
+ (this is done in static.py).
"""
def quota(self, request):
"""
- Get current available & used quota values for this resource's quota root
- collection.
+ Get current available & used quota values for this resource's
+ quota root collection.
- @return: an L{Defered} with result C{tuple} containing two C{int}'s the first is
- quota-available-bytes, the second is quota-used-bytes, or
- C{None} if quota is not defined on the resource.
+ @return: an L{Defered} with result C{tuple} containing two
+ C{int}'s the first is quota-available-bytes, the second is
+ quota-used-bytes, or C{None} if quota is not defined on
+ the resource.
"""
# See if already cached
- if not hasattr(request, "quota"):
+ if hasattr(request, "quota"):
+ if request.quota.has_key(self):
+ return succeed(request.quota[self])
+ else:
request.quota = {}
- if request.quota.has_key(self):
- yield request.quota[self]
- return
# Check this resource first
if self.isCollection():
qroot = self.quotaRoot(request)
if qroot is not None:
- used = waitForDeferred(self.currentQuotaUse(request))
- yield used
- used = used.getResult()
- available = qroot - used
- if available < 0:
- available = 0
- request.quota[self] = (available, used)
- yield request.quota[self]
- return
+ def gotUsage(used):
+ available = qroot - used
+ if available < 0:
+ available = 0
+ request.quota[self] = (available, used)
+ return (available, used)
+
+ d = self.currentQuotaUse(request)
+ d.addCallback(gotUsage)
+ return d
# Check the next parent
url = request.urlForResource(self)
if url != "/":
- parent = waitForDeferred(request.locateResource(parentForURL(url)))
- yield parent
- parent = parent.getResult()
- d = waitForDeferred(parent.quota(request))
- yield d
- request.quota[self] = d.getResult()
+ def gotQuota(quota):
+ request.quota[self] = quota
+ return quota
+
+ d = request.locateResource(parentForURL(url))
+ d.addCallback(lambda p: p.quota(request))
+ d.addCallback(gotQuota)
+ return d
else:
request.quota[self] = None
- yield request.quota[self]
- return
+ return succeed(request.quota[self])
- quota = deferredGenerator(quota)
-
def hasQuota(self, request):
"""
- Check whether this resource is undre quota control by checking each parent to see if
- it has a quota root.
+ Check whether this resource is undre quota control by checking
+ each parent to see if it has a quota root.
@return: C{True} if under quota control, C{False} if not.
"""
# Check this one first
if self.hasQuotaRoot(request):
- yield True
- return
+ return succeed(True)
# Look at each parent
try:
url = request.urlForResource(self)
if url != "/":
- parent = waitForDeferred(request.locateResource(parentForURL(url)))
- yield parent
- parent = parent.getResult()
- d = waitForDeferred(parent.hasQuota(request))
- yield d
- yield d.getResult()
+ d = request.locateResource(parentForURL(url))
+ d.addCallback(lambda p: p.hasQuota(request))
+ return d
else:
- yield False
+ return succeed(False)
except NoURLForResourceError:
- yield False
-
- hasQuota = deferredGenerator(hasQuota)
-
+ return succeed(False)
+
def hasQuotaRoot(self, request):
"""
@return: a C{True} if this resource has quota root, C{False} otherwise.
@@ -1729,14 +2052,16 @@
def quotaRoot(self, request):
"""
- @return: a C{int} containing the maximum allowed bytes if this collection
- is quota-controlled, or C{None} if not quota controlled.
+ @return: a C{int} containing the maximum allowed bytes if this
+ collection is quota-controlled, or C{None} if not quota
+ controlled.
"""
if self.hasDeadProperty(TwistedQuotaRootProperty):
return int(str(self.readDeadProperty(TwistedQuotaRootProperty)))
else:
return None
+ @inlineCallbacks
def quotaRootParent(self, request):
"""
Return the next quota root above this resource.
@@ -1748,24 +2073,22 @@
url = request.urlForResource(self)
while (url != "/"):
url = parentForURL(url)
- parent = waitForDeferred(request.locateResource(url))
- yield parent
- parent = parent.getResult()
+ parent = (yield request.locateResource(url))
if parent.hasQuotaRoot(request):
- yield parent
- return
+ returnValue(parent)
- yield None
-
- quotaRootParent = deferredGenerator(quotaRootParent)
+ returnValue(None)
def setQuotaRoot(self, request, maxsize):
"""
- @param maxsize: a C{int} containing the maximum allowed bytes for the contents
- of this collection, or C{None} tp remove quota restriction.
+ @param maxsize: a C{int} containing the maximum allowed bytes
+ for the contents of this collection, or C{None} tp remove
+ quota restriction.
"""
assert self.isCollection(), "Only collections can have a quota root"
- assert maxsize is None or isinstance(maxsize, int), "maxsize must be an int or None"
+ assert maxsize is None or isinstance(maxsize, int), (
+ "maxsize must be an int or None"
+ )
if maxsize is not None:
self.writeDeadProperty(TwistedQuotaRootProperty(str(maxsize)))
@@ -1776,22 +2099,26 @@
def quotaSize(self, request):
"""
- Get the size of this resource (if its a collection get total for all children as well).
- TODO: Take into account size of dead-properties.
+ Get the size of this resource (if its a collection get total
+ for all children as well). TODO: Take into account size of
+ dead-properties.
@return: a C{int} containing the size of the resource.
"""
unimplemented(self)
+ @inlineCallbacks
def checkQuota(self, request, available):
"""
- Check to see whether all quota roots have sufficient available bytes.
- We currently do not use hierarchical quota checks - i.e. only the most
- immediate quota root parent is checked for quota.
+ Check to see whether all quota roots have sufficient available
+ bytes. We currently do not use hierarchical quota checks -
+ i.e. only the most immediate quota root parent is checked for
+ quota.
- @param available: a C{int} containing the additional quota required.
- @return: C{True} if there is sufficient quota remaining on all quota roots,
- C{False} otherwise.
+ @param available: a C{int} containing the additional quota
+ required.
+ @return: C{True} if there is sufficient quota remaining on all
+ quota roots, C{False} otherwise.
"""
quotaroot = self
@@ -1800,65 +2127,59 @@
quota = quotaroot.quotaRoot(request)
if quota is not None:
if available > quota[0]:
- yield False
- return
+ returnValue(False)
# Check the next parent with a quota root
- quotaroot = waitForDeferred(quotaroot.quotaRootParent(request))
- yield quotaroot
- quotaroot = quotaroot.getResult()
+ quotaroot = (yield quotaroot.quotaRootParent(request))
- yield True
+ returnValue(True)
- checkQuota = deferredGenerator(checkQuota)
-
def quotaSizeAdjust(self, request, adjust):
"""
- Update the quota used value on all quota root parents of this resource.
+ Update the quota used value on all quota root parents of this
+ resource.
- @param adjust: a C{int} containing the number of bytes added (positive) or
- removed (negative) that should be used to adjust the cached total.
+ @param adjust: a C{int} containing the number of bytes added
+ (positive) or removed (negative) that should be used to
+ adjust the cached total.
"""
# Check this resource first
if self.isCollection():
if self.hasQuotaRoot(request):
- d = waitForDeferred(self.updateQuotaUse(request, adjust))
- yield d
- d.getResult()
- yield None
- return
+ return self.updateQuotaUse(request, adjust)
# Check the next parent
url = request.urlForResource(self)
if url != "/":
- parent = waitForDeferred(request.locateResource(parentForURL(url)))
- yield parent
- parent = parent.getResult()
- d = waitForDeferred(parent.quotaSizeAdjust(request, adjust))
- yield d
- d.getResult()
+ d = request.locateResource(parentForURL(url))
+ d.addCallback(lambda p: p.quotaSizeAdjust(request, adjust))
+ return d
- yield None
+ return succeed(None)
- quotaSizeAdjust = deferredGenerator(quotaSizeAdjust)
-
def currentQuotaUse(self, request):
"""
- Get the cached quota use value, or if not present (or invalid) determine
- quota use by brute force.
+ Get the cached quota use value, or if not present (or invalid)
+ determine quota use by brute force.
- @return: an L{Deferred} with a C{int} result containing the current used byte if this collection
- is quota-controlled, or C{None} if not quota controlled.
+ @return: an L{Deferred} with a C{int} result containing the
+ current used byte if this collection is quota-controlled,
+ or C{None} if not quota controlled.
"""
assert self.isCollection(), "Only collections can have a quota root"
- assert self.hasQuotaRoot(request), "Quota use only on quota root collection"
+ assert self.hasQuotaRoot(request), (
+ "Quota use only on quota root collection"
+ )
# Try to get the cached value property
if self.hasDeadProperty(TwistedQuotaUsedProperty):
- return succeed(int(str(self.readDeadProperty(TwistedQuotaUsedProperty))))
+ return succeed(
+ int(str(self.readDeadProperty(TwistedQuotaUsedProperty)))
+ )
else:
- # Do brute force size determination and cache the result in the private property
+ # Do brute force size determination and cache the result
+ # in the private property
def _defer(result):
self.writeDeadProperty(TwistedQuotaUsedProperty(str(result)))
return result
@@ -1870,10 +2191,12 @@
"""
Update the quota used value on this resource.
- @param adjust: a C{int} containing the number of bytes added (positive) or
- removed (negative) that should be used to adjust the cached total.
- @return: an L{Deferred} with a C{int} result containing the current used byte if this collection
- is quota-controlled, or C{None} if not quota controlled.
+ @param adjust: a C{int} containing the number of bytes added
+ (positive) or removed (negative) that should be used to
+ adjust the cached total.
+ @return: an L{Deferred} with a C{int} result containing the
+ current used byte if this collection is quota-controlled,
+ or C{None} if not quota controlled.
"""
assert self.isCollection(), "Only collections can have a quota root"
@@ -1885,8 +2208,11 @@
if size >= 0:
self.writeDeadProperty(TwistedQuotaUsedProperty(str(size)))
else:
- # Remove the dead property and re-read to do brute force quota calc
- log.msg("Attempt to set quota used to a negative value: %s (adjustment: %s)" % (size, adjust,))
+ # Remove the dead property and re-read to do brute
+ # force quota calc
+ log.msg("Attempt to set quota used to a negative value: %s "
+ "(adjustment: %s)"
+ % (size, adjust,))
self.removeDeadProperty(TwistedQuotaUsedProperty)
return self.currentQuotaUse(request)
@@ -1907,7 +2233,13 @@
# If this is a collection and the URI doesn't end in "/", redirect.
#
if self.isCollection() and request.path[-1:] != "/":
- return RedirectResponse(request.unparseURL(path=urllib.quote(urllib.unquote(request.path), safe=':/')+'/'))
+ return RedirectResponse(
+ request.unparseURL(
+ path=urllib.quote(
+ urllib.unquote(request.path),
+ safe=':/')+'/'
+ )
+ )
def setHeaders(response):
response = IResponse(response)
@@ -1915,19 +2247,21 @@
response.headers.setHeader("dav", self.davComplianceClasses())
#
- # If this is a collection and the URI doesn't end in "/", add a
- # Content-Location header. This is needed even if we redirect such
- # requests (as above) in the event that this resource was created or
- # modified by the request.
+ # If this is a collection and the URI doesn't end in "/",
+ # add a Content-Location header. This is needed even if
+ # we redirect such requests (as above) in the event that
+ # this resource was created or modified by the request.
#
if self.isCollection() and request.uri[-1:] != "/":
- response.headers.setHeader("content-location", request.uri + "/")
+ response.headers.setHeader(
+ "content-location", request.uri + "/"
+ )
return response
def onError(f):
- # If we get an HTTPError, run its response through setHeaders() as
- # well.
+ # If we get an HTTPError, run its response through
+ # setHeaders() as well.
f.trap(HTTPError)
return setHeaders(f.value.response)
@@ -1938,7 +2272,10 @@
"""
DAV resource with no children.
"""
- def findChildren(self, depth, request, callback, privileges=None, inherited_aces=None):
+ def findChildren(
+ self, depth, request, callback,
+ privileges=None, inherited_aces=None
+ ):
return succeed(None)
class DAVPrincipalResource (DAVResource):
@@ -1975,14 +2312,21 @@
if namespace == dav_namespace:
if name == "alternate-URI-set":
- return davxml.AlternateURISet(*[davxml.HRef(u) for u in self.alternateURIs()])
+ return davxml.AlternateURISet(*[
+ davxml.HRef(u) for u in self.alternateURIs()
+ ])
if name == "principal-URL":
- return davxml.PrincipalURL(davxml.HRef(self.principalURL()))
+ return davxml.PrincipalURL(
+ davxml.HRef(self.principalURL())
+ )
if name == "group-member-set":
def callback(members):
- return davxml.GroupMemberSet(*[davxml.HRef(p.principalURL()) for p in members])
+ return davxml.GroupMemberSet(*[
+ davxml.HRef(p.principalURL())
+ for p in members
+ ])
d = self.groupMembers()
d.addCallback(callback)
@@ -1990,7 +2334,10 @@
if name == "group-membership":
def callback(memberships):
- return davxml.GroupMembership(*[davxml.HRef(g.principalURL()) for g in memberships])
+ return davxml.GroupMembership(*[
+ davxml.HRef(g.principalURL())
+ for g in memberships
+ ])
d = self.groupMemberships()
d.addCallback(callback)
@@ -1998,11 +2345,16 @@
if name == "resourcetype":
if self.isCollection():
- return davxml.ResourceType(davxml.Collection(), davxml.Principal())
+ return davxml.ResourceType(
+ davxml.Collection(),
+ davxml.Principal()
+ )
else:
return davxml.ResourceType(davxml.Principal())
- return super(DAVPrincipalResource, self).readProperty(qname, request)
+ return super(DAVPrincipalResource, self).readProperty(
+ qname, request
+ )
return maybeDeferred(defer)
@@ -2014,8 +2366,9 @@
"""
See L{IDAVPrincipalResource.alternateURIs}.
- This implementation returns C{()}. Subclasses should override this
- method to provide alternate URIs for this resource if appropriate.
+ This implementation returns C{()}. Subclasses should override
+ this method to provide alternate URIs for this resource if
+ appropriate.
"""
return ()
@@ -2023,29 +2376,29 @@
"""
See L{IDAVPrincipalResource.principalURL}.
- This implementation raises L{NotImplementedError}. Subclasses must
- override this method to provide the principal URL for this resource.
+ This implementation raises L{NotImplementedError}. Subclasses
+ must override this method to provide the principal URL for
+ this resource.
"""
unimplemented(self)
-
def groupMembers(self):
"""
- This implementation returns a Deferred which fires with C{()}, which is
- appropriate for non-group principals. Subclasses should override this
- method to provide member URLs for this resource if appropriate.
+ This implementation returns a Deferred which fires with C{()},
+ which is appropriate for non-group principals. Subclasses
+ should override this method to provide member URLs for this
+ resource if appropriate.
@see: L{IDAVPrincipalResource.groupMembers}.
"""
return succeed(())
-
def expandedGroupMembers(self):
"""
- This implementation returns a Deferred which fires with C{()}, which is
- appropriate for non-group principals. Subclasses should override this
- method to provide expanded member URLs for this resource if
- appropriate.
+ This implementation returns a Deferred which fires with C{()},
+ which is appropriate for non-group principals. Subclasses
+ should override this method to provide expanded member URLs
+ for this resource if appropriate.
@see: L{IDAVPrincipalResource.expandedGroupMembers}
"""
@@ -2056,36 +2409,37 @@
"""
See L{IDAVPrincipalResource.groupMemberships}.
- This implementation raises L{NotImplementedError}. Subclasses must
- override this method to provide the group URLs for this resource.
+ This implementation raises L{NotImplementedError}. Subclasses
+ must override this method to provide the group URLs for this
+ resource.
"""
unimplemented(self)
- @deferredGenerator
def principalMatch(self, href):
"""
- Check whether the supplied principal matches this principal or is a
- member of this principal resource.
+ Check whether the supplied principal matches this principal or
+ is a member of this principal resource.
@param href: the L{HRef} to test.
- @return: True if there is a match, False otherwise
+ @return: True if there is a match, False otherwise.
"""
uri = str(href)
if self.principalURL() == uri:
- yield True
- return
+ return succeed(True)
else:
- d = waitForDeferred(self.expandedGroupMembers())
- yield d
- members = d.getResult()
- member_uris = [member.principalURL() for member in members]
- yield uri in member_uris
+ d = self.expandedGroupMembers()
+ d.addCallback(
+ lambda members:
+ uri in [member.principalURL() for member in members]
+ )
+ return d
+
class DAVPrincipalCollectionResource (DAVResource):
"""
WebDAV principal collection resource. (RFC 3744, section 5.8)
- This is an abstract class; subclasses must implement C{principalForUser} in
- order to properly implement it.
+ This is an abstract class; subclasses must implement
+ C{principalForUser} in order to properly implement it.
"""
implements(IDAVPrincipalCollectionResource)
@@ -2116,24 +2470,25 @@
@raise: L{NotImplementedError}
"""
raise NotImplementedError(
- "%s did not implement principalForUser" % (self.__class__))
+ "%s did not implement principalForUser" % (self.__class__)
+ )
-
class AccessDeniedError(Exception):
def __init__(self, errors):
"""
- An error to be raised when some request fails to meet sufficient access
- privileges for a resource.
+ An error to be raised when some request fails to meet
+ sufficient access privileges for a resource.
- @param errors: sequence of tuples, one for each resource for which one or
- more of the given privileges are not granted, in the form
- C{(uri, privileges)}, where uri is a URL path relative to
- resource or C{None} if the error was in this resource,
- privileges is a sequence of the privileges which are not
- granted a subset thereof.
+ @param errors: sequence of tuples, one for each resource for
+ which one or more of the given privileges are not granted,
+ in the form C{(uri, privileges)}, where uri is a URL path
+ relative to resource or C{None} if the error was in this
+ resource, privileges is a sequence of the privileges which
+ are not granted a subset thereof.
"""
- Exception.__init__(self, "Access denied for some resources: %r" % (errors,))
+ Exception.__init__(self, "Access denied for some resources: %r"
+ % (errors,))
self.errors = errors
##
@@ -2150,8 +2505,9 @@
class TwistedACLInheritable (davxml.WebDAVEmptyElement):
"""
- When set on an ACE, this indicates that the ACE privileges should be inherited by
- all child resources within the resource with this ACE.
+ When set on an ACE, this indicates that the ACE privileges should
+ be inherited by all child resources within the resource with this
+ ACE.
"""
namespace = twisted_dav_namespace
name = "inheritable"
@@ -2168,23 +2524,27 @@
davxml.registerElement(TwistedGETContentMD5)
-"""
-When set on a collection, this property indicates that the collection has a quota limit for
-the size of all resources stored in the collection (and any associate meta-data such as properties).
-The value is a number - the maximum size in bytes allowed.
-"""
+
class TwistedQuotaRootProperty (davxml.WebDAVTextElement):
+ """
+ When set on a collection, this property indicates that the
+ collection has a quota limit for the size of all resources stored
+ in the collection (and any associate meta-data such as
+ properties). The value is a number - the maximum size in bytes
+ allowed.
+ """
namespace = twisted_private_namespace
name = "quota-root"
davxml.registerElement(TwistedQuotaRootProperty)
-"""
-When set on a collection, this property contains the cached running total of the size of all
-resources stored in the collection (and any associate meta-data such as properties).
-The value is a number - the size in bytes used.
-"""
class TwistedQuotaUsedProperty (davxml.WebDAVTextElement):
+ """
+ When set on a collection, this property contains the cached
+ running total of the size of all resources stored in the
+ collection (and any associate meta-data such as properties). The
+ value is a number - the size in bytes used.
+ """
namespace = twisted_private_namespace
name = "quota-used"
@@ -2222,46 +2582,79 @@
davPrivilegeSet = davxml.SupportedPrivilegeSet(
davxml.SupportedPrivilege(
davxml.Privilege(davxml.All()),
- davxml.Description("all privileges", **{"xml:lang": "en"}),
+ davxml.Description(
+ "all privileges",
+ **{"xml:lang": "en"}
+ ),
davxml.SupportedPrivilege(
davxml.Privilege(davxml.Read()),
- davxml.Description("read resource", **{"xml:lang": "en"}),
+ davxml.Description(
+ "read resource",
+ **{"xml:lang": "en"}
+ ),
),
davxml.SupportedPrivilege(
davxml.Privilege(davxml.Write()),
- davxml.Description("write resource", **{"xml:lang": "en"}),
+ davxml.Description(
+ "write resource",
+ **{"xml:lang": "en"}
+ ),
davxml.SupportedPrivilege(
davxml.Privilege(davxml.WriteProperties()),
- davxml.Description("write resource properties", **{"xml:lang": "en"}),
+ davxml.Description(
+ "write resource properties",
+ **{"xml:lang": "en"}
+ ),
),
davxml.SupportedPrivilege(
davxml.Privilege(davxml.WriteContent()),
- davxml.Description("write resource content", **{"xml:lang": "en"}),
+ davxml.Description(
+ "write resource content",
+ **{"xml:lang": "en"}
+ ),
),
davxml.SupportedPrivilege(
davxml.Privilege(davxml.Bind()),
- davxml.Description("add child resource", **{"xml:lang": "en"}),
+ davxml.Description(
+ "add child resource",
+ **{"xml:lang": "en"}
+ ),
),
davxml.SupportedPrivilege(
davxml.Privilege(davxml.Unbind()),
- davxml.Description("remove child resource", **{"xml:lang": "en"}),
+ davxml.Description(
+ "remove child resource",
+ **{"xml:lang": "en"}
+ ),
),
),
davxml.SupportedPrivilege(
davxml.Privilege(davxml.Unlock()),
- davxml.Description("unlock resource without ownership of lock", **{"xml:lang": "en"}),
+ davxml.Description(
+ "unlock resource without ownership of lock",
+ **{"xml:lang": "en"}
+ ),
),
davxml.SupportedPrivilege(
davxml.Privilege(davxml.ReadACL()),
- davxml.Description("read resource access control list", **{"xml:lang": "en"}),
+ davxml.Description(
+ "read resource access control list",
+ **{"xml:lang": "en"}
+ ),
),
davxml.SupportedPrivilege(
davxml.Privilege(davxml.WriteACL()),
- davxml.Description("write resource access control list", **{"xml:lang": "en"}),
+ davxml.Description(
+ "write resource access control list",
+ **{"xml:lang": "en"}
+ ),
),
davxml.SupportedPrivilege(
davxml.Privilege(davxml.ReadCurrentUserPrivilegeSet()),
- davxml.Description("read privileges for current principal", **{"xml:lang": "en"}),
+ davxml.Description(
+ "read privileges for current principal",
+ **{"xml:lang": "en"}
+ ),
),
),
)
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/web2/metafd.py (from rev 5438, CalendarServer/trunk/twext/web2/metafd.py)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/web2/metafd.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/web2/metafd.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,251 @@
+
+##
+# Copyright (c) 2010 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twisted.internet.tcp import Server
+from twext.internet.tcp import MaxAcceptTCPServer
+
+from twisted.internet import reactor
+
+from twisted.application.service import MultiService, Service
+
+from twext.web2.channel.http import HTTPFactory
+
+from twext.internet.sendfdport import (
+ InheritedPort, InheritedSocketDispatcher, InheritingProtocolFactory)
+
+
+
+class JustEnoughLikeAPort(object):
+ """
+ Fake out just enough of L{tcp.Port} to be acceptable to
+ L{tcp.Server}...
+ """
+ _realPortNumber = 'inherited'
+
+
+
+class ReportingHTTPService(Service, object):
+ """
+ Service which starts up an HTTP server that can report back to its parent
+ process via L{InheritedPort}.
+ """
+
+ _connectionCount = 0
+
+ def __init__(self, site, fd, contextFactory):
+ self.contextFactory = contextFactory
+ # Unlike other 'factory' constructions, config.MaxRequests and
+ # config.MaxAccepts are dealt with in the master process, so we don't
+ # need to propagate them here.
+ self.site = site
+ self.fd = fd
+
+
+ def startService(self):
+ """
+ Start reading on the inherited port.
+ """
+ Service.startService(self)
+ self.reportingFactory = ReportingHTTPFactory(self.site, vary=True)
+ self.reportingFactory.inheritedPort = InheritedPort(
+ self.fd, self.createTransport, self.reportingFactory
+ )
+ self.reportingFactory.inheritedPort.startReading()
+
+
+ def stopService(self):
+ """
+ Stop reading on the inherited port.
+ """
+ Service.stopService(self)
+ # XXX stopping should really be destructive, because otherwise we will
+ # always leak a file descriptor; i.e. this shouldn't be restartable.
+ # XXX this needs to return a Deferred.
+ self.reportingFactory.inheritedPort.stopReading()
+
+
+ def createTransport(self, skt, data, protocol):
+ """
+ Create a TCP transport, from a socket object passed by the parent.
+ """
+ self._connectionCount += 1
+ transport = Server(skt, protocol,
+ skt.getpeername(), JustEnoughLikeAPort,
+ self._connectionCount, reactor)
+ if data == 'SSL':
+ transport.startTLS(self.contextFactory)
+ transport.startReading()
+ return transport
+
+
+
+class ReportingHTTPFactory(HTTPFactory):
+ """
+ An L{HTTPFactory} which reports its status to a
+ L{twext.internet.sendfdport.InheritedPort}.
+
+ @ivar inheritedPort: an L{InheritedPort} to report status (the current
+ number of outstanding connections) to. Since this - the
+ L{ReportingHTTPFactory} - needs to be instantiated to be passed to
+ L{InheritedPort}'s constructor, this attribute must be set afterwards
+ but before any connections have occurred.
+ """
+
+ def _report(self, message):
+ """
+ Report a status message to the parent.
+ """
+ self.inheritedPort.reportStatus(message)
+
+
+ def addConnectedChannel(self, channel):
+ """
+ Add the connected channel, and report the current number of open
+ channels to the listening socket in the parent process.
+ """
+ HTTPFactory.addConnectedChannel(self, channel)
+ self._report("+")
+
+
+ def removeConnectedChannel(self, channel):
+ """
+ Remove the connected channel, and report the current number of open
+ channels to the listening socket in the parent process.
+ """
+ HTTPFactory.removeConnectedChannel(self, channel)
+ self._report("-")
+
+
+
+class ConnectionLimiter(MultiService, object):
+ """
+ Connection limiter for use with L{InheritedSocketDispatcher}.
+
+ This depends on statuses being reported by L{ReportingHTTPFactory}
+ """
+
+ def __init__(self, maxAccepts, maxRequests):
+ """
+ Create a L{ConnectionLimiter} with an associated dispatcher and
+ list of factories.
+ """
+ MultiService.__init__(self)
+ self.factories = []
+ # XXX dispatcher needs to be a service, so that it can shut down its
+ # sub-sockets.
+ self.dispatcher = InheritedSocketDispatcher(self)
+ self.maxAccepts = maxAccepts
+ self.maxRequests = maxRequests
+
+
+ def addPortService(self, description, port, interface, backlog):
+ """
+ Add a L{MaxAcceptTCPServer} to bind a TCP port to a socket description.
+ """
+ lipf = LimitingInheritingProtocolFactory(self, description)
+ self.factories.append(lipf)
+ MaxAcceptTCPServer(
+ port, lipf,
+ interface=interface,
+ backlog=backlog
+ ).setServiceParent(self)
+
+
+ # implementation of implicit statusWatcher interface required by
+ # InheritedSocketDispatcher
+
+ def statusFromMessage(self, previousStatus, message):
+ """
+ Determine a subprocess socket's status from its previous status and a
+ status message.
+ """
+ if message == '-':
+ result = self.intWithNoneAsZero(previousStatus) - 1
+ # A connection has gone away in a subprocess; we should start
+ # accepting connections again if we paused (see
+ # newConnectionStatus)
+ for f in self.factories:
+ f.myServer.myPort.startReading()
+ else:
+ # '+' is just an acknowledgement of newConnectionStatus, so we can
+ # ignore it.
+ result = self.intWithNoneAsZero(previousStatus)
+ return result
+
+
+ def newConnectionStatus(self, previousStatus):
+ """
+ Determine the effect of a new connection being sent on a subprocess
+ socket.
+ """
+ current = self.outstandingRequests + 1
+ maximum = self.maxRequests
+ overloaded = (current >= maximum)
+ if overloaded:
+ for f in self.factories:
+ f.myServer.myPort.stopReading()
+
+ result = self.intWithNoneAsZero(previousStatus) + 1
+ return result
+
+
+ def intWithNoneAsZero(self, x):
+ """
+ Convert 'x' to an C{int}, unless x is C{None}, in which case return 0.
+ """
+ if x is None:
+ return 0
+ else:
+ return int(x)
+
+
+ @property
+ def outstandingRequests(self):
+ outstanding = 0
+ for status in self.dispatcher.statuses:
+ outstanding += self.intWithNoneAsZero(status)
+ return outstanding
+
+
+
+class LimitingInheritingProtocolFactory(InheritingProtocolFactory):
+ """
+ An L{InheritingProtocolFactory} that supports the implicit factory contract
+ required by L{MaxAcceptTCPServer}/L{MaxAcceptTCPPort}.
+
+ @ivar outstandingRequests: a read-only property for the number of currently
+ active connections.
+
+ @ivar maxAccepts: The maximum number of times to call 'accept()' in a
+ single reactor loop iteration.
+
+ @ivar maxRequests: The maximum number of concurrent connections to accept
+ at once - note that this is for the I{entire server}, whereas the
+ value in the configuration file is for only a single process.
+ """
+
+ def __init__(self, limiter, description):
+ super(LimitingInheritingProtocolFactory, self).__init__(
+ limiter.dispatcher, description)
+ self.limiter = limiter
+ self.maxAccepts = limiter.maxAccepts
+ self.maxRequests = limiter.maxRequests
+
+
+ @property
+ def outstandingRequests(self):
+ return self.limiter.outstandingRequests
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/web2/server.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/web2/server.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twext/web2/server.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -320,6 +320,12 @@
def process(self):
"Process a request."
+ log.msg("%s %s %s" % (
+ self.method,
+ unquote(self.uri),
+ "HTTP/%s.%s" % self.clientproto
+ ))
+
try:
self.checkExpect()
resp = self.preprocessRequest()
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twisted/plugins/kqueuereactor.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twisted/plugins/kqueuereactor.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twisted/plugins/kqueuereactor.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,5 +1,5 @@
from twisted.application.reactors import Reactor
caldav_kqueue = Reactor(
- 'caldav_kqueue', 'kqreactor',
+ 'caldav_kqueue', 'twext.internet.kqreactor',
'kqueue(2)-based reactor.')
Deleted: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/cache.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/cache.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/cache.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,350 +0,0 @@
-##
-# Copyright (c) 2008-2010 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-import cPickle
-import hashlib
-import uuid
-
-from zope.interface import implements
-
-from twisted.internet.defer import succeed, maybeDeferred
-from twext.web2.dav.util import allDataFromStream
-from twext.web2.http import Response
-from twext.web2.iweb import IResource
-from twext.web2.stream import MemoryStream
-
-from twext.python.log import LoggingMixIn
-
-from twistedcaldav.memcachepool import CachePoolUserMixIn, defaultCachePool
-from twistedcaldav.config import config
-
-
-class DisabledCacheNotifier(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def changed(self):
- return succeed(None)
-
-
-class DisabledCache(object):
- def getResponseForRequest(self, request):
- return succeed(None)
-
- def cacheResponseForRequest(self, request, response):
- return succeed(response)
-
-
-class URINotFoundException(Exception):
- def __init__(self, uri):
- self.uri = uri
-
-
- def __repr__(self):
- return "%s: Could not find URI %r" % (
- self.__class__.__name__,
- self.uri)
-
-
-class MemcacheChangeNotifier(LoggingMixIn, CachePoolUserMixIn):
-
- def __init__(self, resource, cachePool=None, cacheHandle="Default"):
- self._resource = resource
- self._cachePool = cachePool
- self._cachePoolHandle = cacheHandle
-
- def _newCacheToken(self):
- return str(uuid.uuid4())
-
- def changed(self):
- """
- Change the cache token for a resource
-
- return: A L{Deferred} that fires when the token has been changed.
- """
- url = self._resource.url()
-
- self.log_debug("Changing Cache Token for %r" % (url,))
- return self.getCachePool().set(
- 'cacheToken:%s' % (url,),
- self._newCacheToken(), expireTime=config.ResponseCacheTimeout*60)
-
-
-class BaseResponseCache(LoggingMixIn):
- """
- A base class which provides some common operations
- """
- def _principalURI(self, principal):
- return str(principal.children[0])
-
-
- def _uriNotFound(self, f, uri):
- f.trap(AttributeError)
- raise URINotFoundException(uri)
-
-
- def _getRecordForURI(self, uri, request):
- def _getRecord(resrc):
- if hasattr(resrc, 'record'):
- return resrc.record
-
- try:
- return request.locateResource(uri).addCallback(
- _getRecord).addErrback(self._uriNotFound, uri)
- except AssertionError:
- raise URINotFoundException(uri)
-
-
- def _canonicalizeURIForRequest(self, uri, request):
- try:
- return request.locateResource(uri).addCallback(
- lambda resrc: resrc.url()).addErrback(self._uriNotFound, uri)
- except AssertionError:
- raise URINotFoundException(uri)
-
-
- def _getURIs(self, request):
- def _getSecondURI(rURI):
- return self._canonicalizeURIForRequest(
- self._principalURI(request.authnUser),
- request).addCallback(lambda pURI: (pURI, rURI))
-
- d = self._canonicalizeURIForRequest(request.uri, request)
- d.addCallback(_getSecondURI)
-
- return d
-
-
- def _requestKey(self, request):
- def _getBody(uris):
- return allDataFromStream(request.stream).addCallback(
- lambda body: (body, uris))
-
- def _getKey((requestBody, (pURI, rURI))):
- if requestBody is not None:
- request.stream = MemoryStream(requestBody)
- request.stream.doStartReading = None
-
- request.cacheKey = (request.method,
- pURI,
- rURI,
- request.headers.getHeader('depth'),
- hash(requestBody))
-
- return request.cacheKey
-
- d = _getBody((self._principalURI(request.authnUser), request.uri))
- d.addCallback(_getKey)
- return d
-
-
- def _getResponseBody(self, key, response):
- d1 = allDataFromStream(response.stream)
- d1.addCallback(lambda responseBody: (key, responseBody))
- return d1
-
-
-class MemcacheResponseCache(BaseResponseCache, CachePoolUserMixIn):
- def __init__(self, docroot, cachePool=None):
- self._docroot = docroot
- self._cachePool = cachePool
-
-
- def _tokenForURI(self, uri, cachePoolHandle=None):
- """
- Get a property store for the given C{uri}.
-
- @param uri: The URI we'd like the token for.
- @return: A C{str} representing the token for the URI.
- """
-
- if cachePoolHandle:
- return defaultCachePool(cachePoolHandle).get('cacheToken:%s' % (uri,))
- else:
- return self.getCachePool().get('cacheToken:%s' % (uri,))
-
-
- def _getTokens(self, request):
- def _tokensForURIs((pURI, rURI)):
- tokens = []
- d1 = self._tokenForURI(pURI, "PrincipalToken")
- d1.addCallback(tokens.append)
- d1.addCallback(lambda _ign: self._getRecordForURI(pURI, request))
- d1.addCallback(lambda dToken: tokens.append(hash(dToken)))
- d1.addCallback(lambda _ign: self._tokenForURI(rURI))
- d1.addCallback(tokens.append)
- d1.addCallback(lambda _ign: tokens)
- return d1
-
- d = self._getURIs(request)
- d.addCallback(_tokensForURIs)
- return d
-
-
- def _hashedRequestKey(self, request):
- def _hashKey(key):
- oldkey = key
- request.cacheKey = key = hashlib.md5(
- ':'.join([str(t) for t in key])).hexdigest()
- self.log_debug("hashing key for get: %r to %r" % (oldkey, key))
- return request.cacheKey
-
- d = self._requestKey(request)
- d.addCallback(_hashKey)
- return d
-
-
- def getResponseForRequest(self, request):
- def _checkTokens(curTokens, expectedTokens, (code, headers, body)):
- if curTokens[0] != expectedTokens[0]:
- self.log_debug(
- "Principal token doesn't match for %r: %r != %r" % (
- request.cacheKey,
- curTokens[0],
- expectedTokens[0]))
- return None
-
- if curTokens[1] != expectedTokens[1]:
- self.log_debug(
- "Directory Record Token doesn't match for %r: %r != %r" % (
- request.cacheKey,
- curTokens[1],
- expectedTokens[1]))
- return None
-
- if curTokens[2] != expectedTokens[2]:
- self.log_debug(
- "URI token doesn't match for %r: %r != %r" % (
- request.cacheKey,
- curTokens[1],
- expectedTokens[1]))
- return None
-
- r = Response(code,
- stream=MemoryStream(body))
-
- for key, value in headers.iteritems():
- r.headers.setRawHeaders(key, value)
-
- return r
-
- def _unpickleResponse((flags, value), key):
- if value is None:
- self.log_debug("Not in cache: %r" % (key,))
- return None
-
- self.log_debug("Found in cache: %r = %r" % (key, value))
-
- (principalToken, directoryToken, uriToken,
- resp) = cPickle.loads(value)
- d2 = self._getTokens(request)
-
- d2.addCallback(_checkTokens,
- (principalToken,
- directoryToken,
- uriToken),
- resp)
-
- return d2
-
- def _getCached(key):
- self.log_debug("Checking cache for: %r" % (key,))
- d1 = self.getCachePool().get(key)
- return d1.addCallback(_unpickleResponse, key)
-
- def _handleExceptions(f):
- f.trap(URINotFoundException)
- self.log_debug("Could not locate URI: %r" % (f.value,))
- return None
-
- d = self._hashedRequestKey(request)
- d.addCallback(_getCached)
- d.addErrback(_handleExceptions)
- return d
-
-
- def cacheResponseForRequest(self, request, response):
- def _makeCacheEntry((pToken, dToken, uToken), (key, responseBody)):
- cacheEntry = cPickle.dumps(
- (pToken,
- dToken,
- uToken,
- (response.code,
- dict(list(response.headers.getAllRawHeaders())),
- responseBody)))
-
- self.log_debug("Adding to cache: %r = %r" % (key, cacheEntry))
- return self.getCachePool().set(key, cacheEntry,
- expireTime=config.ResponseCacheTimeout*60).addCallback(
- lambda _: response)
-
- def _cacheResponse((key, responseBody)):
-
- response.headers.removeHeader('date')
- response.stream = MemoryStream(responseBody)
-
- d1 = self._getTokens(request)
- d1.addCallback(_makeCacheEntry, (key, responseBody))
- return d1
-
- def _handleExceptions(f):
- f.trap(URINotFoundException)
- self.log_debug("Could not locate URI: %r" % (f.value,))
- return response
-
- if hasattr(request, 'cacheKey'):
- d = succeed(request.cacheKey)
- else:
- d = self._hashedRequestKey(request)
-
- d.addCallback(self._getResponseBody, response)
- d.addCallback(_cacheResponse)
- d.addErrback(_handleExceptions)
- return d
-
-
-class _CachedResponseResource(object):
- implements(IResource)
-
- def __init__(self, response):
- self._response = response
-
- def renderHTTP(self, request):
- if not hasattr(request, "extendedLogItems"):
- request.extendedLogItems = {}
- request.extendedLogItems["cached"] = "1"
- return self._response
-
- def locateChild(self, request, segments):
- return self, []
-
-
-class PropfindCacheMixin(object):
- def renderHTTP(self, request):
- def _cacheResponse(responseCache, response):
- return responseCache.cacheResponseForRequest(request, response)
-
- def _getResponseCache(response):
- d1 = request.locateResource("/")
- d1.addCallback(lambda resource: resource.responseCache)
- d1.addCallback(_cacheResponse, response)
- return d1
-
- d = maybeDeferred(super(PropfindCacheMixin, self).renderHTTP, request)
-
- if request.method == 'PROPFIND':
- d.addCallback(_getResponseCache)
- return d
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/config.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/config.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/config.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -75,10 +75,14 @@
dict.__delattr__(self, attr)
class ConfigProvider(object):
- """Configuration provider, abstraction for config storage/format/defaults"""
+ """
+ Configuration provider, abstraction for config storage/format/defaults.
+ """
def __init__(self, defaults=None):
- """Create configuration provider with given defaults"""
+ """
+ Create configuration provider with given defaults.
+ """
self._configFileName = None
if defaults is None:
self._defaults = ConfigDict()
@@ -86,41 +90,53 @@
self._defaults = ConfigDict(copy.deepcopy(defaults))
def getDefaults(self):
- """Return defaults"""
+ """
+ Return defaults.
+ """
return self._defaults
def setDefaults(self, defaults):
- """Change defaults"""
+ """
+ Change defaults.
+ """
self._defaults = ConfigDict(copy.deepcopy(defaults))
def getConfigFileName(self):
- """Return current configuration file path+name"""
+ """
+ Return current configuration file path and name.
+ """
return self._configFileName
def setConfigFileName(self, configFileName):
- """Change configuration file path+name for next load operations"""
+ """
+ Change configuration file path and name for next load operations.
+ """
self._configFileName = configFileName
if self._configFileName:
self._configFileName = os.path.abspath(configFileName)
def hasErrors(self):
- """Return true if last load operation encountered any errors"""
+ """
+ Return true if last load operation encountered any errors.
+ """
return False
def loadConfig(self):
- """Load the configuration, return a dictionary of settings"""
+ """
+ Load the configuration, return a dictionary of settings.
+ """
return self._defaults
class Config(object):
-
def __init__(self, provider=None):
if not provider:
self._provider = ConfigProvider()
else:
self._provider = provider
- self._preUpdateHooks = list()
- self._postUpdateHooks = list()
+ self._updating = False
+ self._preUpdateHooks = []
+ self._postUpdateHooks = []
self.reset()
def __setattr__(self, attr, value):
@@ -128,8 +144,12 @@
self._data[attr] = value
else:
self.__dict__[attr] = value
+ self.__dict__["_dirty"] = True
def __getattr__(self, attr):
+ if self._dirty:
+ self.update()
+
if attr in self._data:
return self._data[attr]
raise AttributeError(attr)
@@ -148,26 +168,18 @@
lastDict[attr] = ConfigDict()
lastDict = lastDict.__getattr__(part)
configItem = parts[-1]
+
if configItem in lastDict:
return lastDict[configItem]
else:
lastDict[configItem] = defaultValue
return defaultValue
- def getInt(self, attr, defaultValue):
- return int(self.get(attr, defaultValue))
-
- def addPreUpdateHook(self, hook):
- if isinstance(hook, list) or isinstance(hook, tuple):
- self._preUpdateHooks.extend(hook)
- else:
- self._preUpdateHooks.append(hook)
+ def addPreUpdateHooks(self, hooks):
+ self._preUpdateHooks.extend(hooks)
- def addPostUpdateHook(self, hook):
- if isinstance(hook, list) or isinstance(hook, tuple):
- self._postUpdateHooks.extend(hook)
- else:
- self._postUpdateHooks.append(hook)
+ def addPostUpdateHooks(self, hooks):
+ self._postUpdateHooks.extend(hooks)
def getProvider(self):
return self._provider
@@ -184,9 +196,14 @@
_mergeData(self._provider.getDefaults(), items)
self.update(items)
- def update(self, items):
+ def update(self, items=None):
+ if self._updating:
+ return
+ self._updating = True
+
if not isinstance(items, ConfigDict):
items = ConfigDict(items)
+
# Call hooks
for hook in self._preUpdateHooks:
hook(self._data, items)
@@ -194,6 +211,9 @@
for hook in self._postUpdateHooks:
hook(self._data)
+ self._updating = False
+ self._dirty = False
+
def load(self, configFile):
self._provider.setConfigFileName(configFile)
configDict = ConfigDict(self._provider.loadConfig())
@@ -201,7 +221,7 @@
self.update(configDict)
else:
raise ConfigurationError("Invalid configuration in %s"
- % (self._provider.getConfigFileName(), ))
+ % (self._provider.getConfigFileName(),))
def reload(self):
configDict = ConfigDict(self._provider.loadConfig())
@@ -215,6 +235,7 @@
def reset(self):
self._data = ConfigDict(copy.deepcopy(self._provider.getDefaults()))
+ self._dirty = True
def _mergeData(oldData, newData):
for key, value in newData.iteritems():
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/aggregate.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/aggregate.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/aggregate.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -69,6 +69,15 @@
self.realmName = realmName
self._recordTypes = recordTypes
+ # FIXME: This is a temporary workaround until new data store is in
+ # place. During the purging of deprovisioned users' data, we need
+ # to be able to look up records by guid and shortName. The purge
+ # tool sticks temporary fake records in here.
+ self._tmpRecords = {
+ "guids" : { },
+ "shortNames" : { },
+ }
+
def __repr__(self):
return "<%s (%s): %r>" % (self.__class__.__name__, self.realmName, self._recordTypes)
@@ -111,9 +120,25 @@
return records
def recordWithShortName(self, recordType, shortName):
+
+ # FIXME: These temporary records shouldn't be needed when we move
+ # to the new data store API. They're currently needed when purging
+ # deprovisioned users' data.
+ record = self._tmpRecords["shortNames"].get(shortName, None)
+ if record:
+ return record
+
return self._query("recordWithShortName", recordType, shortName)
def recordWithUID(self, uid):
+
+ # FIXME: These temporary records shouldn't be needed when we move
+ # to the new data store API. They're currently needed when purging
+ # deprovisioned users' data.
+ record = self._tmpRecords["guids"].get(uid, None)
+ if record:
+ return record
+
return self._queryAll("recordWithUID", uid)
def recordWithAuthID(self, authID):
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/appleopendirectory.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/appleopendirectory.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/appleopendirectory.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -400,6 +400,15 @@
emailAddresses = recordEmailAddresses,
memberGUIDs = (),
)
+
+ # (Copied from below)
+ # Look up augment information
+ # TODO: this needs to be deferred but for now we hard code
+ # the deferred result because we know it is completing
+ # immediately.
+ d = augment.AugmentService.getAugmentRecord(record.guid)
+ d.addCallback(lambda x:record.addAugmentInformation(x))
+
yield record
except KeyError:
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/augment.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/augment.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/augment.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -263,6 +263,9 @@
# Add to first file in list
self._doAddToFile(self.xmlFiles[0], new_records)
+ # This is required to invalidate self.db
+ self.lastCached = 0
+
return succeed(None)
def _doAddToFile(self, xmlfile, records):
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/cachingdirectory.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/cachingdirectory.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/cachingdirectory.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -28,10 +28,10 @@
import time
-import memcacheclient
import base64
from twext.python.log import LoggingMixIn
+from twext.python.memcacheclient import ClientFactory, MemcacheError
from twistedcaldav.config import config
from twistedcaldav.directory.directory import DirectoryService, DirectoryRecord, DirectoryError, UnknownRecordTypeError
@@ -160,7 +160,7 @@
def _getMemcacheClient(self, refresh=False):
if refresh or not hasattr(self, "memcacheClient"):
- self.memcacheClient = memcacheclient.ClientFactory.getClient(['%s:%s' %
+ self.memcacheClient = ClientFactory.getClient(['%s:%s' %
(config.Memcached.Pools.Default.BindAddress, config.Memcached.Pools.Default.Port)],
debug=0, pickleProtocol=2)
return self.memcacheClient
@@ -194,11 +194,11 @@
record = self._getMemcacheClient().get(key)
if record is not None and isinstance(record, DirectoryRecord):
record.service = self
- except memcacheclient.MemcacheError:
+ except MemcacheError:
self.log_error("Could not read from memcache, retrying")
try:
record = self._getMemcacheClient(refresh=True).get(key)
- except memcacheclient.MemcacheError:
+ except MemcacheError:
self.log_error("Could not read from memcache again, giving up")
del self.memcacheClient
raise DirectoryMemcacheError("Failed to read from memcache")
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/calendaruserproxy.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/calendaruserproxy.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/calendaruserproxy.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -192,22 +192,9 @@
principals.append(principal)
newUIDs.add(principal.principalUID())
- # Get the old set of UIDs
- oldUIDs = (yield self._index().getMembers(self.uid))
-
# Change membership
yield self.setGroupMemberSetPrincipals(principals)
-
- # Invalidate the primary principal's cache, and any principal's whose
- # membership status changed
- yield self.parent.cacheNotifier.changed()
-
- changedUIDs = newUIDs.symmetric_difference(oldUIDs)
- for uid in changedUIDs:
- principal = self.pcollection.principalForUID(uid)
- if principal:
- yield principal.cacheNotifier.changed()
-
+
returnValue(True)
def setGroupMemberSetPrincipals(self, principals):
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/calendaruserproxyloader.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/calendaruserproxyloader.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/calendaruserproxyloader.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -134,7 +134,5 @@
db = calendaruserproxy.ProxyDBService
for item in self.items:
guid, write_proxies, read_proxies = item
- for proxy in write_proxies:
- yield db.setGroupMembers("%s#%s" % (guid, "calendar-proxy-write"), (proxy,))
- for proxy in read_proxies:
- yield db.setGroupMembers("%s#%s" % (guid, "calendar-proxy-read"), (proxy,))
+ yield db.setGroupMembers("%s#%s" % (guid, "calendar-proxy-write"), write_proxies)
+ yield db.setGroupMembers("%s#%s" % (guid, "calendar-proxy-read"), read_proxies)
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/opendirectorybacker.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/opendirectorybacker.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/opendirectorybacker.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -453,11 +453,6 @@
self.directoryBackedAddressBook.fp.restat()
self.directoryBackedAddressBook.writeDeadProperty(newAddressBookCTag)
- if hasattr(self.directoryBackedAddressBook, 'cacheNotifier'):
- yield self.directoryBackedAddressBook.cacheNotifier.changed()
- #else:
- # self.log_info("%r does not have a cacheNotifier but the CTag changed"
- # % (self.directoryBackedAddressBook,))
finally:
self.log_debug("unlocking: \"%s\")" % self._tmpDirAddressBookLockPath)
yield tmpDirLock.release()
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/principal.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/principal.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/principal.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -54,7 +54,6 @@
from twistedcaldav.authkerb import NegotiateCredentials
from twistedcaldav.config import config
-from twistedcaldav.cache import DisabledCacheNotifier, PropfindCacheMixin
from twistedcaldav.directory import calendaruserproxy
from twistedcaldav.directory import augment
from twistedcaldav.directory.calendaruserproxy import CalendarUserProxyPrincipalResource
@@ -510,7 +509,7 @@
def principalCollections(self):
return self.parent.principalCollections()
-class DirectoryPrincipalResource (PropfindCacheMixin, PermissionsMixIn, DAVPrincipalResource, DAVFile):
+class DirectoryPrincipalResource (PermissionsMixIn, DAVPrincipalResource, DAVFile):
"""
Directory principal resource.
"""
@@ -521,8 +520,6 @@
(calendarserver_namespace, "email-address-set"),
)
- cacheNotifierFactory = DisabledCacheNotifier
-
def __init__(self, parent, record):
"""
@param parent: the parent of this resource.
@@ -530,8 +527,6 @@
"""
super(DirectoryPrincipalResource, self).__init__(NotFilePath(isdir=True))
- self.cacheNotifier = self.cacheNotifierFactory(self, cacheHandle="PrincipalToken")
-
if self.isCollection():
slash = "/"
else:
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/accounts.xml
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/accounts.xml 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/accounts.xml 2010-04-07 19:27:31 UTC (rev 5439)
@@ -60,6 +60,27 @@
<name>No Calendar</name>
<email-address>nocalendar at example.com</email-address>
</user>
+ <user>
+ <uid>usera</uid>
+ <guid>7423F94A-6B76-4A3A-815B-D52CFD77935D</guid>
+ <password>a</password>
+ <name>a</name>
+ <email-address>a at example.com</email-address>
+ </user>
+ <user>
+ <uid>userb</uid>
+ <guid>8A985493-EE2C-4665-94CF-4DFEA3A89500</guid>
+ <password>b</password>
+ <name>b</name>
+ <email-address>b at example.com</email-address>
+ </user>
+ <user>
+ <uid>userc</uid>
+ <guid>9FF60DAD-0BDE-4508-8C77-15F0CA5C8DD2</guid>
+ <password>c</password>
+ <name>c</name>
+ <email-address>c at example.com</email-address>
+ </user>
<user repeat="2">
<uid>user%02d</uid>
<guid>user%02d</guid>
@@ -159,7 +180,7 @@
<uid>mercury</uid>
<guid>mercury</guid>
<password>mercury</password>
- <name>Mecury Seven</name>
+ <name>Mercury Seven</name>
<email-address>mercury at example.com</email-address>
</location>
<location>
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/augments.xml
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/augments.xml 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/augments.xml 2010-04-07 19:27:31 UTC (rev 5439)
@@ -139,4 +139,19 @@
<enable-calendar>true</enable-calendar>
<enable-addressbook>true</enable-addressbook>
</record>
+ <record>
+ <uid>7423F94A-6B76-4A3A-815B-D52CFD77935D</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ </record>
+ <record>
+ <uid>8A985493-EE2C-4665-94CF-4DFEA3A89500</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ </record>
+ <record>
+ <uid>9FF60DAD-0BDE-4508-8C77-15F0CA5C8DD2</uid>
+ <enable>true</enable>
+ <enable-calendar>true</enable-calendar>
+ </record>
</augments>
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/proxies.xml
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/proxies.xml 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/proxies.xml 2010-04-07 19:27:31 UTC (rev 5439)
@@ -52,4 +52,11 @@
<member>recursive2_coasts</member>
</read-only-proxies>
</record>
+ <record>
+ <guid>7423F94A-6B76-4A3A-815B-D52CFD77935D</guid>
+ <proxies>
+ <member>8A985493-EE2C-4665-94CF-4DFEA3A89500</member>
+ <member>9FF60DAD-0BDE-4508-8C77-15F0CA5C8DD2</member>
+ </proxies>
+ </record>
</proxies>
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/resources/caldavd.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/resources/caldavd.plist 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/resources/caldavd.plist 2010-04-07 19:27:31 UTC (rev 5439)
@@ -719,11 +719,7 @@
</array>
</dict>
- <!-- Response Caching -->
- <key>ResponseCacheTimeout</key>
- <integer>30</integer> <!-- in minutes -->
-
<!--
Twisted
-->
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/test_modify.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/test_modify.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/test_modify.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -129,3 +129,25 @@
directory.createRecord("resources", guid="resource01", shortNames=("resource01",), uid="resource01")
self.assertRaises(DirectoryError, directory.createRecord, "resources", guid="resource01", shortNames=("resource01",), uid="resource01")
+
+ def test_missingShortNames(self):
+ directory = getDirectory()
+
+ directory.createRecord("resources", guid="resource01")
+
+ record = directory.recordWithUID("resource01")
+ self.assertEquals(record.shortNames[0], "resource01")
+
+ directory.updateRecord("resources", guid="resource01",
+ fullName="Resource #1")
+
+ record = directory.recordWithUID("resource01")
+ self.assertEquals(record.shortNames[0], "resource01")
+ self.assertEquals(record.fullName, "Resource #1")
+
+ def test_missingGUID(self):
+ directory = getDirectory()
+
+ record = directory.createRecord("resources")
+
+ self.assertEquals(record.shortNames[0], record.guid)
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/test_opendirectory.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/test_opendirectory.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/test_opendirectory.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -248,15 +248,41 @@
def lookupMethod(obj, compound, casei, recordType, attributes, count=0):
if dsattributes.kDSStdRecordTypeUsers in recordType:
return [
- ('morgen', {'dsAttrTypeStandard:RecordType': 'dsRecTypeStandard:Users', 'dsAttrTypeStandard:AppleMetaNodeLocation': '/LDAPv3/127.0.0.1', 'dsAttrTypeStandard:RecordName': ['morgen', 'Morgen Sagen'], 'dsAttrTypeStandard:FirstName': 'Morgen', 'dsAttrTypeStandard:GeneratedUID': '83479230-821E-11DE-B6B0-DBB02C6D659D', 'dsAttrTypeStandard:LastName': 'Sagen', 'dsAttrTypeStandard:EMailAddress': 'morgen at example.com', 'dsAttrTypeStandard:RealName': 'Morgen Sagen'}),
- ('morehouse', {'dsAttrTypeStandard:RecordType': 'dsRecTypeStandard:Users', 'dsAttrTypeStandard:AppleMetaNodeLocation': '/LDAPv3/127.0.0.1', 'dsAttrTypeStandard:RecordName': ['morehouse', 'Joe Morehouse'], 'dsAttrTypeStandard:FirstName': 'Joe', 'dsAttrTypeStandard:GeneratedUID': '98342930-90DC-11DE-A842-A29601FB13E8', 'dsAttrTypeStandard:LastName': 'Morehouse', 'dsAttrTypeStandard:EMailAddress': 'morehouse at example.com', 'dsAttrTypeStandard:RealName': 'Joe Morehouse'}),
+ ('morgen',
+ {
+ 'dsAttrTypeStandard:RecordType': 'dsRecTypeStandard:Users',
+ 'dsAttrTypeStandard:AppleMetaNodeLocation': '/LDAPv3/127.0.0.1',
+ 'dsAttrTypeStandard:RecordName': ['morgen', 'Morgen Sagen'],
+ 'dsAttrTypeStandard:FirstName': 'Morgen',
+ 'dsAttrTypeStandard:GeneratedUID': '83479230-821E-11DE-B6B0-DBB02C6D659D',
+ 'dsAttrTypeStandard:LastName': 'Sagen',
+ 'dsAttrTypeStandard:EMailAddress': 'morgen at example.com',
+ 'dsAttrTypeStandard:RealName': 'Morgen Sagen'
+ }),
+ ('morehouse',
+ {
+ 'dsAttrTypeStandard:RecordType': 'dsRecTypeStandard:Users',
+ 'dsAttrTypeStandard:AppleMetaNodeLocation': '/LDAPv3/127.0.0.1',
+ 'dsAttrTypeStandard:RecordName': ['morehouse', 'Joe Morehouse'],
+ 'dsAttrTypeStandard:FirstName': 'Joe',
+ 'dsAttrTypeStandard:GeneratedUID': '98342930-90DC-11DE-A842-A29601FB13E8',
+ 'dsAttrTypeStandard:LastName': 'Morehouse',
+ 'dsAttrTypeStandard:EMailAddress': 'morehouse at example.com',
+ 'dsAttrTypeStandard:RealName': 'Joe Morehouse'
+ }),
]
else:
return []
- fields = [('fullName', 'mor', True, u'starts-with'), ('emailAddresses', 'mor', True, u'starts-with'), ('firstName', 'mor', True, u'starts-with'), ('lastName', 'mor', True, u'starts-with')]
+ fields = [
+ ('fullName', 'mor', True, u'starts-with'),
+ ('emailAddresses', 'mor', True, u'starts-with'),
+ ('firstName', 'mor', True, u'starts-with'),
+ ('lastName', 'mor', True, u'starts-with')
+ ]
- results = (yield self.service().recordsMatchingFields(fields, lookupMethod=lookupMethod))
+ results = (yield self.service().recordsMatchingFields(fields,
+ lookupMethod=lookupMethod))
results = list(results)
self.assertEquals(len(results), 2)
for record in results:
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/test_principal.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/test_principal.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/test_principal.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -34,8 +34,6 @@
from twistedcaldav.directory.principal import DirectoryPrincipalResource
from twistedcaldav.directory.principal import DirectoryCalendarPrincipalResource
-from twistedcaldav.cache import DisabledCacheNotifier
-
import twistedcaldav.test.util
@@ -297,15 +295,6 @@
# DirectoryPrincipalResource
##
- def test_cacheNotifier(self):
- """
- Each DirectoryPrincipalResource should have a cacheNotifier attribute
- that is an instance of XattrCacheChangeNotifier
- """
- for provisioningResource, recordType, recordResource, record in self._allRecords():
- self.failUnless(isinstance(recordResource.cacheNotifier,
- DisabledCacheNotifier))
-
def test_displayName(self):
"""
DirectoryPrincipalResource.displayName()
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/test_proxyprincipalmembers.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/test_proxyprincipalmembers.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/test/test_proxyprincipalmembers.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -22,7 +22,6 @@
from twistedcaldav.directory.test.test_xmlfile import xmlFile, augmentsFile,\
proxiesFile
from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
-from twistedcaldav.directory.principal import DirectoryPrincipalResource
from twistedcaldav.directory.xmlaccountsparser import XMLAccountsParser
from twistedcaldav.directory.xmlfile import XMLDirectoryService
@@ -57,6 +56,10 @@
yield XMLCalendarUserProxyLoader(proxiesFile.path).updateProxyDB()
+ def tearDown(self):
+ """ Empty the proxy db between tests """
+ return calendaruserproxy.ProxyDBService.clean()
+
def _getPrincipalByShortName(self, type, name):
provisioningResource = self.principalRootResources[self.directoryService.__class__.__name__]
return provisioningResource.principalForShortName(type, name)
@@ -131,6 +134,19 @@
proxies = sorted([principal.displayName() for principal in proxies])
self.assertEquals(proxies, sorted(expectedProxies))
+ @inlineCallbacks
+ def test_multipleProxyAssignmentsAtOnce(self):
+ yield self._proxyForTest(
+ DirectoryService.recordType_users, "userb",
+ ('a',),
+ True
+ )
+ yield self._proxyForTest(
+ DirectoryService.recordType_users, "userc",
+ ('a',),
+ True
+ )
+
def test_groupMembersRegular(self):
"""
DirectoryPrincipalResource.expandedGroupMembers()
@@ -286,42 +302,12 @@
set(["5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1",
"8B4288F6-CC82-491D-8EF9-642EF4F3E7D0"]))
- @inlineCallbacks
- def test_setGroupMemberSetNotifiesPrincipalCaches(self):
- class StubCacheNotifier(object):
- changedCount = 0
- def changed(self):
- self.changedCount += 1
- return succeed(None)
- user = self._getPrincipalByShortName(self.directoryService.recordType_users, "cdaboo")
-
- proxyGroup = user.getChild("calendar-proxy-write")
-
- notifier = StubCacheNotifier()
-
- oldCacheNotifier = DirectoryPrincipalResource.cacheNotifierFactory
-
- try:
- DirectoryPrincipalResource.cacheNotifierFactory = (lambda _1, _2, **kwargs: notifier)
-
- self.assertEquals(notifier.changedCount, 0)
-
- yield proxyGroup.setGroupMemberSet(
- davxml.GroupMemberSet(
- davxml.HRef.fromString(
- "/XMLDirectoryService/__uids__/5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1/")),
- None)
-
- self.assertEquals(notifier.changedCount, 1)
- finally:
- DirectoryPrincipalResource.cacheNotifierFactory = oldCacheNotifier
-
def test_proxyFor(self):
return self._proxyForTest(
DirectoryService.recordType_users, "wsanchez",
- ("Mecury Seven", "Gemini Twelve", "Apollo Eleven", "Orion", ),
+ ("Mercury Seven", "Gemini Twelve", "Apollo Eleven", "Orion", ),
True
)
@@ -336,7 +322,7 @@
yield self._proxyForTest(
DirectoryService.recordType_users, "wsanchez",
- ("Mecury Seven", "Gemini Twelve", "Apollo Eleven", "Orion", ),
+ ("Mercury Seven", "Gemini Twelve", "Apollo Eleven", "Orion", ),
True
)
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/xmlfile.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/xmlfile.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/directory/xmlfile.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -327,6 +327,9 @@
if guid is None:
guid = str(uuid4())
+ if not shortNames:
+ shortNames = (guid,)
+
# Make sure latest XML records are read in
self._lastCheck = 0
accounts = self._accounts()
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/extensions.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/extensions.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/extensions.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -20,7 +20,6 @@
"""
__all__ = [
- "SudoSACLMixin",
"DAVResource",
"DAVPrincipalResource",
"DAVFile",
@@ -29,14 +28,15 @@
"CachingPropertyStore",
]
-import cPickle as pickle
import urllib
import cgi
import time
-from twisted.internet.defer import succeed, DeferredList, inlineCallbacks, returnValue
-from twisted.internet.defer import maybeDeferred
+from twisted.internet.defer import succeed, DeferredList
+from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.cred.error import LoginFailed, UnauthorizedLogin
+
+import twext.web2.server
from twext.web2 import responsecode
from twext.web2.auth.wrapper import UnauthorizedResponse
from twext.web2.http import HTTPError, Response, RedirectResponse
@@ -52,12 +52,12 @@
from twext.web2.dav.static import DAVFile as SuperDAVFile
from twext.web2.dav.resource import DAVResource as SuperDAVResource
from twext.web2.dav.resource import DAVPrincipalResource as SuperDAVPrincipalResource
-from twext.web2.dav.util import joinURL
from twext.web2.dav.method import prop_common
from twext.web2.dav.method.report import max_number_of_matches
from twext.python.log import Logger, LoggingMixIn
+import twistedcaldav
from twistedcaldav import customxml
from twistedcaldav.customxml import calendarserver_namespace
from twistedcaldav.util import Alternator, printTracebacks
@@ -68,7 +68,18 @@
log = Logger()
-class SudoSACLMixin (object):
+if twistedcaldav.__version__:
+ twext.web2.server.VERSION = "CalendarServer/%s %s" % (
+ twistedcaldav.__version__.replace(" ", ""),
+ twext.web2.server.VERSION,
+ )
+else:
+ twext.web2.server.VERSION = "CalendarServer/? %s" % (
+ twext.web2.server.VERSION,
+ )
+
+
+class SudoersMixin (object):
"""
Mixin class to let DAVResource, and DAVFile subclasses know about
sudoer principals and how to find their AuthID.
@@ -173,7 +184,7 @@
Check for sudo users before regular users.
"""
if type(creds) is str:
- return super(SudoSACLMixin, self).findPrincipalForAuthID(creds)
+ return super(SudoersMixin, self).findPrincipalForAuthID(creds)
for collection in self.principalCollections():
principal = collection.principalForShortName(
@@ -256,29 +267,10 @@
raise HTTPError(responsecode.FORBIDDEN)
else:
# No proxy - do default behavior
- result = (yield super(SudoSACLMixin, self).authorizationPrincipal(request, authID, authnPrincipal))
+ result = (yield super(SudoersMixin, self).authorizationPrincipal(request, authID, authnPrincipal))
returnValue(result)
-def updateCacheTokenOnCallback(f):
- def wrapper(self, *args, **kwargs):
- if hasattr(self, "cacheNotifier"):
- def updateToken(response):
- d = self.cacheNotifier.changed()
- d.addCallback(lambda _: response)
- return d
- d = maybeDeferred(f, self, *args, **kwargs)
-
- if hasattr(self, "cacheNotifier"):
- d.addCallback(updateToken)
-
- return d
- else:
- return f(self, *args, **kwargs)
-
- return wrapper
-
-
class DirectoryPrincipalPropertySearchMixIn(object):
@inlineCallbacks
@@ -449,205 +441,13 @@
returnValue(MultiStatusResponse(responses))
-class DAVResource (DirectoryPrincipalPropertySearchMixIn, SudoSACLMixin, SuperDAVResource, LoggingMixIn):
+class DAVResource (DirectoryPrincipalPropertySearchMixIn, SudoersMixin, SuperDAVResource, LoggingMixIn):
"""
Extended L{twext.web2.dav.resource.DAVResource} implementation.
"""
- def renderHTTP(self, request):
- log.info("%s %s %s" % (request.method, urllib.unquote(request.uri), "HTTP/%s.%s" % request.clientproto))
- return super(DAVResource, self).renderHTTP(request)
-
- @updateCacheTokenOnCallback
- def http_PROPPATCH(self, request):
- return super(DAVResource, self).http_PROPPATCH(request)
-
-
- @updateCacheTokenOnCallback
- def http_DELETE(self, request):
- return super(DAVResource, self).http_DELETE(request)
-
-
- @updateCacheTokenOnCallback
- def http_ACL(self, request):
- return super(DAVResource, self).http_ACL(request)
-
-
http_REPORT = http_REPORT
- @inlineCallbacks
- def findChildrenFaster(self, depth, request, okcallback, badcallback, names, privileges, inherited_aces):
- """
- See L{IDAVResource.findChildren}.
-
- This implementation works for C{depth} values of C{"0"}, C{"1"},
- and C{"infinity"}. As long as C{self.listChildren} is implemented
-
- @param depth: a C{str} for the depth: "0", "1" and "infinity" only allowed.
- @param request: the L{Request} for the current request in progress
- @param okcallback: a callback function used on all resources that pass the privilege check,
- or C{None}
- @param badcallback: a callback function used on all resources that fail the privilege check,
- or C{None}
- @param names: a C{list} of C{str}'s containing the names of the child resources to lookup. If
- empty or C{None} all children will be examined, otherwise only the ones in the list.
- @param privileges: a list of privileges to check.
- @param inherited_aces: the list of parent ACEs that are inherited by all children.
- """
- assert depth in ("0", "1", "infinity"), "Invalid depth: %s" % (depth,)
-
- if depth == "0" or not self.isCollection():
- returnValue(None)
-
- # First find all depth 1 children
- #children = []
- #yield self.findChildren("1", request, lambda x, y: children.append((x, y)), privileges=None, inherited_aces=None)
-
- children = []
- basepath = request.urlForResource(self)
- childnames = list(self.listChildren())
- for childname in childnames:
- if names and childname not in names:
- continue
- childpath = joinURL(basepath, childname)
- child = (yield request.locateChildResource(self, childname))
- if child is None:
- children.append((None, childpath + "/"))
- else:
- if child.isCollection():
- children.append((child, childpath + "/"))
- else:
- children.append((child, childpath))
-
- # Generate (acl,supported_privs) map
- aclmap = {}
- for resource, url in children:
- acl = (yield resource.accessControlList(request, inheritance=False, inherited_aces=inherited_aces))
- supportedPrivs = (yield resource.supportedPrivileges(request))
- aclmap.setdefault((pickle.dumps(acl), supportedPrivs), (acl, supportedPrivs, []))[2].append((resource, url))
-
- # Now determine whether each ace satisfies privileges
- #print aclmap
- allowed_collections = []
- for items in aclmap.itervalues():
- checked = (yield self.checkACLPrivilege(request, items[0], items[1], privileges, inherited_aces))
- if checked:
- for resource, url in items[2]:
- if okcallback:
- okcallback(resource, url)
- if resource.isCollection():
- allowed_collections.append((resource, url))
- else:
- if badcallback:
- for resource, url in items[2]:
- badcallback(resource, url)
-
- # TODO: Depth: infinity support
- if depth == "infinity":
- for collection, url in allowed_collections:
- collection_inherited_aces = (yield collection.inheritedACEsforChildren(request))
- yield collection.findChildrenFaster(depth, request, okcallback, badcallback, names, privileges, inherited_aces=collection_inherited_aces)
-
- returnValue(None)
-
- @inlineCallbacks
- def checkACLPrivilege(self, request, acl, privyset, privileges, inherited_aces):
-
- if acl is None:
- returnValue(False)
-
- principal = self.currentPrincipal(request)
-
- # Other principal types don't make sense as actors.
- assert principal.children[0].name in ("unauthenticated", "href"), \
- "Principal is not an actor: %r" % (principal,)
-
- acl = self.fullAccessControlList(acl, inherited_aces)
-
- pending = list(privileges)
- denied = []
-
- for ace in acl.children:
- for privilege in tuple(pending):
- if not self.matchPrivilege(davxml.Privilege(privilege), ace.privileges, privyset):
- continue
-
- match = (yield self.matchPrincipal(principal, ace.principal, request))
-
- if match:
- if ace.invert:
- continue
- else:
- if not ace.invert:
- continue
-
- pending.remove(privilege)
-
- if not ace.allow:
- denied.append(privilege)
-
- returnValue(len(denied) + len(pending) == 0)
-
- def fullAccessControlList(self, acl, inherited_aces):
- """
- See L{IDAVResource.accessControlList}.
-
- This implementation looks up the ACL in the private property
- C{(L{twisted_private_namespace}, "acl")}.
- If no ACL has been stored for this resource, it returns the value
- returned by C{defaultAccessControlList}.
- If access is disabled it will return C{None}.
- """
- #
- # Inheritance is problematic. Here is what we do:
- #
- # 1. A private element <Twisted:inheritable> is defined for use inside
- # of a <DAV:ace>. This private element is removed when the ACE is
- # exposed via WebDAV.
- #
- # 2. When checking ACLs with inheritance resolution, the server must
- # examine all parent resources of the current one looking for any
- # <Twisted:inheritable> elements.
- #
- # If those are defined, the relevant ace is applied to the ACL on the
- # current resource.
- #
-
- # Dynamically update privileges for those ace's that are inherited.
- if acl:
- aces = list(acl.children)
- else:
- aces = []
-
- aces.extend(inherited_aces)
-
- acl = davxml.ACL(*aces)
-
- return acl
-
- @inlineCallbacks
- def matchPrincipal(self, principal1, principal2, request):
- """
- Implementation of DAVResource.matchPrincipal that caches the principal match
- for the duration of a request. This avoids having to do repeated group membership
- tests when privileges on multiple resources are determined.
- """
-
- if not hasattr(request, "matchPrincipalCache"):
- request.matchPrincipalCache = {}
-
- # The interesting part of a principal is it's one child
- principals = (principal1, principal2)
- cache_key = tuple([str(p.children[0]) for p in principals])
-
- match = request.matchPrincipalCache.get(cache_key, None)
- if match is None:
- match = (yield super(DAVResource, self).matchPrincipal(principal1, principal2, request))
- request.matchPrincipalCache[cache_key] = match
-
- returnValue(match)
-
-
class DAVPrincipalResource (DirectoryPrincipalPropertySearchMixIn, SuperDAVPrincipalResource, LoggingMixIn):
"""
Extended L{twext.web2.dav.static.DAVFile} implementation.
@@ -659,10 +459,6 @@
(calendarserver_namespace, "record-type"),
)
- def renderHTTP(self, request):
- log.info("%s %s %s" % (request.method, urllib.unquote(request.uri), "HTTP/%s.%s" % request.clientproto))
- return super(DAVPrincipalResource, self).renderHTTP(request)
-
http_REPORT = http_REPORT
@inlineCallbacks
@@ -728,7 +524,7 @@
return succeed(davxml.ResourceType(davxml.Principal()))
-class DAVFile (SudoSACLMixin, SuperDAVFile, LoggingMixIn):
+class DAVFile (SudoersMixin, SuperDAVFile, LoggingMixIn):
"""
Extended L{twext.web2.dav.static.DAVFile} implementation.
"""
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/mail.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/mail.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/mail.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -768,7 +768,7 @@
- def outbound(self, originator, recipient, calendar, language='en'):
+ def outbound(self, originator, recipient, calendar, language='en', send=True):
# create token, send email
component = calendar.masterComponent()
@@ -804,15 +804,15 @@
if method != "REPLY":
# Invites and cancellations:
- # Reuse or generate a token based on originator, recipient, and
+ # Reuse or generate a token based on originator, toAddr, and
# event uid
- token = self.db.getToken(originator, recipient, icaluid)
+ token = self.db.getToken(originator, toAddr, icaluid)
if token is None:
- token = self.db.createToken(originator, recipient, icaluid)
- self.log_debug("Mail gateway created token %s for %s (originator), %s (recipient) and %s (icaluid)" % (token, originator, recipient, icaluid))
+ token = self.db.createToken(originator, toAddr, icaluid)
+ self.log_debug("Mail gateway created token %s for %s (originator), %s (recipient) and %s (icaluid)" % (token, originator, toAddr, icaluid))
inviteState = "new"
else:
- self.log_debug("Mail gateway reusing token %s for %s (originator), %s (recipient) and %s (icaluid)" % (token, originator, recipient, icaluid))
+ self.log_debug("Mail gateway reusing token %s for %s (originator), %s (recipient) and %s (icaluid)" % (token, originator, toAddr, icaluid))
inviteState = "update"
fullServerAddress = settings['Address']
@@ -863,34 +863,38 @@
orgCN, attendees, formattedFrom, addressWithToken, recipient,
language=language)
- self.log_debug("Sending: %s" % (message,))
- def _success(result, msgId, fromAddr, toAddr):
- self.log_info("Mail gateway sent message %s from %s to %s" %
- (msgId, fromAddr, toAddr))
- return True
+ if send:
+ self.log_debug("Sending: %s" % (message,))
+ def _success(result, msgId, fromAddr, toAddr):
+ self.log_info("Mail gateway sent message %s from %s to %s" %
+ (msgId, fromAddr, toAddr))
+ return True
- def _failure(failure, msgId, fromAddr, toAddr):
- self.log_error("Mail gateway failed to send message %s from %s to %s (Reason: %s)" %
- (msgId, fromAddr, toAddr, failure.getErrorMessage()))
- return False
+ def _failure(failure, msgId, fromAddr, toAddr):
+ self.log_error("Mail gateway failed to send message %s from %s to %s (Reason: %s)" %
+ (msgId, fromAddr, toAddr, failure.getErrorMessage()))
+ return False
- deferred = defer.Deferred()
+ deferred = defer.Deferred()
- if settings["UseSSL"]:
- contextFactory = ssl.ClientContextFactory()
- else:
- contextFactory = None
+ if settings["UseSSL"]:
+ contextFactory = ssl.ClientContextFactory()
+ else:
+ contextFactory = None
- factory = ESMTPSenderFactory(settings['Username'], settings['Password'],
- fromAddr, toAddr, StringIO(str(message)), deferred,
- contextFactory=contextFactory,
- requireAuthentication=False,
- requireTransportSecurity=settings["UseSSL"])
+ factory = ESMTPSenderFactory(settings['Username'], settings['Password'],
+ fromAddr, toAddr, StringIO(str(message)), deferred,
+ contextFactory=contextFactory,
+ requireAuthentication=False,
+ requireTransportSecurity=settings["UseSSL"])
- reactor.connectTCP(settings['Server'], settings['Port'], factory)
- deferred.addCallback(_success, msgId, fromAddr, toAddr)
- deferred.addErrback(_failure, msgId, fromAddr, toAddr)
- return deferred
+ reactor.connectTCP(settings['Server'], settings['Port'], factory)
+ deferred.addCallback(_success, msgId, fromAddr, toAddr)
+ deferred.addErrback(_failure, msgId, fromAddr, toAddr)
+ return deferred
+ else:
+ return succeed((inviteState, calendar, orgEmail, orgCN, attendees,
+ formattedFrom, recipient, addressWithToken))
def getIconPath(self, details, canceled, language='en'):
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/memcacheprops.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/memcacheprops.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/memcacheprops.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -32,10 +32,9 @@
except ImportError:
from md5 import new as md5
-from memcacheclient import ClientFactory as MemcacheClientFactory, MemcacheError, TokenMismatchError
-
from twext.python.log import LoggingMixIn, Logger
-
+from twext.python.memcacheclient import ClientFactory
+from twext.python.memcacheclient import MemcacheError, TokenMismatchError
from twext.python.filepath import CachingFilePath as FilePath
from twext.web2 import responsecode
from twext.web2.http import HTTPError, StatusResponse
@@ -61,7 +60,7 @@
log.info("Instantiating memcache connection for MemcachePropertyCollection")
- MemcachePropertyCollection._memcacheClient = MemcacheClientFactory.getClient([
+ MemcachePropertyCollection._memcacheClient = ClientFactory.getClient([
"%s:%s" % (config.Memcached.Pools.Default.BindAddress, config.Memcached.Pools.Default.Port)
],
debug=0,
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/method/report_common.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/method/report_common.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/method/report_common.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -482,6 +482,8 @@
# Short-cut - if an fbtype exists we can use that
if type == "VEVENT" and aggregated_resources[key][0][3] != '?':
+
+ matchedResource = False
# Look at each instance
for float, start, end, fbtype in aggregated_resources[key]:
@@ -520,7 +522,14 @@
# Double check for overlap
if clipped:
+ matchedResource = True
fbinfo[fbtype_index_mapper.get(fbtype, 0)].append(clipped)
+
+ if matchedResource:
+ # Check size of results is within limit
+ matchtotal += 1
+ if matchtotal > max_number_of_matches:
+ raise NumberOfMatchesWithinLimits(max_number_of_matches)
else:
calendar = (yield calresource.iCalendarForUser(request, name))
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/notify.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/notify.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/notify.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -29,7 +29,6 @@
The icalserver tap creates a NotificationClient object at startup;
it deals with passing along notifications to the notification server.
-These notifications originate from cache.py:MemcacheChangeNotifier.changed().
"""
# TODO: add CalDAVTester test for examining new xmpp-uri property
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/resource.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/resource.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/resource.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -40,7 +40,7 @@
from twext.web2.dav.http import ErrorResponse
from twisted.internet import reactor
-from twisted.internet.defer import Deferred, maybeDeferred, succeed
+from twisted.internet.defer import Deferred, succeed
from twisted.internet.defer import inlineCallbacks, returnValue
from twext.web2 import responsecode
from twext.web2.dav import davxml
@@ -52,11 +52,8 @@
from twext.web2.dav.util import joinURL, parentForURL, unimplemented, normalizeURL
from twext.web2.http import HTTPError, RedirectResponse, StatusResponse, Response
from twext.web2.http_headers import MimeType
-from twext.web2.iweb import IResponse
from twext.web2.stream import MemoryStream
-import twext.web2.server
-import twistedcaldav
from twistedcaldav import caldavxml, customxml
from twistedcaldav import carddavxml
from twistedcaldav.carddavxml import carddav_namespace
@@ -75,11 +72,6 @@
from twistedcaldav.vcard import Component as vComponent
-if twistedcaldav.__version__:
- serverVersion = twext.web2.server.VERSION + " TwistedCardDAV/" + twistedcaldav.__version__
-else:
- serverVersion = twext.web2.server.VERSION + " TwistedCardDAV/?"
-
##
# Sharing Conts
##
@@ -101,42 +93,13 @@
shareAcceptStatesByXML["DELETED"] = customxml.InviteStatusDeleted()
class CalDAVComplianceMixIn(object):
-
def davComplianceClasses(self):
- if config.Scheduling.CalDAV.OldDraftCompatibility:
- extra_compliance = caldavxml.caldav_full_compliance
- else:
- extra_compliance = caldavxml.caldav_implicit_compliance
- if config.EnableProxyPrincipals:
- extra_compliance += customxml.calendarserver_proxy_compliance
- if config.EnablePrivateEvents:
- extra_compliance += customxml.calendarserver_private_events_compliance
- if config.Scheduling.CalDAV.get("EnablePrivateComments", True):
- extra_compliance += customxml.calendarserver_private_comments_compliance
- extra_compliance += customxml.calendarserver_principal_property_search_compliance
- if config.EnableCardDAV:
- extra_compliance += carddavxml.carddav_compliance
- if config.Sharing.Enabled:
- extra_compliance += customxml.calendarserver_sharing_compliance
- return tuple(super(CalDAVComplianceMixIn, self).davComplianceClasses()) + extra_compliance
+ return (
+ tuple(super(CalDAVComplianceMixIn, self).davComplianceClasses())
+ + config.CalDAVComplianceClasses
+ )
-def updateCacheTokenOnCallback(f):
- def fun(self, *args, **kwargs):
- def _updateToken(response):
- return self.cacheNotifier.changed().addCallback(
- lambda _: response)
-
- d = maybeDeferred(f, self, *args, **kwargs)
-
- if hasattr(self, 'cacheNotifier'):
- d.addCallback(_updateToken)
-
- return d
-
- return fun
-
-
class CalDAVResource (CalDAVComplianceMixIn, SharedCollectionMixin, DAVResource, LoggingMixIn):
"""
CalDAV resource.
@@ -191,32 +154,7 @@
return super(CalDAVResource, self).render(request)
- def renderHTTP(self, request):
- response = maybeDeferred(super(CalDAVResource, self).renderHTTP, request)
- def setHeaders(response):
- response = IResponse(response)
- response.headers.setHeader("server", serverVersion)
-
- return response
-
- response.addCallback(setHeaders)
-
- return response
-
- @updateCacheTokenOnCallback
- def http_PROPPATCH(self, request):
- return super(CalDAVResource, self).http_PROPPATCH(request)
-
- @updateCacheTokenOnCallback
- def http_DELETE(self, request):
- return super(CalDAVResource, self).http_DELETE(request)
-
- @updateCacheTokenOnCallback
- def http_ACL(self, request):
- return super(CalDAVResource, self).http_ACL(request)
-
-
##
# WebDAV
##
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/sharing.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/sharing.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/sharing.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -30,7 +30,6 @@
from twistedcaldav import customxml, caldavxml
from twistedcaldav.config import config
from twistedcaldav.customxml import SharedCalendar
-from twistedcaldav.extensions import updateCacheTokenOnCallback
from twistedcaldav.sql import AbstractSQLDatabase, db_prefix
from uuid import uuid4
from vobject.icalendar import dateTimeToString, utc
@@ -907,7 +906,6 @@
# Add to collections
yield notifications.addNotification(request, notificationUID, xmltype, xmldata)
- @updateCacheTokenOnCallback
def xmlPOSTNoAuth(self, encoding, request):
def _handleErrorResponse(error):
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/static.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/static.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/static.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -99,7 +99,6 @@
from twistedcaldav.sharing import SharedHomeMixin
from twistedcaldav.timezoneservice import TimezoneServiceResource
from twistedcaldav.vcardindex import AddressBookIndex
-from twistedcaldav.cache import DisabledCacheNotifier, PropfindCacheMixin
from twistedcaldav.notify import getPubSubConfiguration, getPubSubXMPPURI
from twistedcaldav.notify import getPubSubHeartbeatURI, getPubSubPath
from twistedcaldav.notify import ClientNotifier, getNodeCacher
@@ -559,12 +558,6 @@
log.debug("%r does not have a clientNotifier but the CTag changed"
% (self,))
- if hasattr(self, 'cacheNotifier'):
- return self.cacheNotifier.changed()
- else:
- log.debug("%r does not have a cacheNotifier but the CTag changed"
- % (self,))
-
return succeed(True)
##
@@ -910,12 +903,10 @@
def url(self):
return joinURL(self.parent.url(), self.record.uid)
-class CalendarHomeFile (PropfindCacheMixin, AutoProvisioningFileMixIn, SharedHomeMixin, DirectoryCalendarHomeResource, CalDAVFile):
+class CalendarHomeFile (AutoProvisioningFileMixIn, SharedHomeMixin, DirectoryCalendarHomeResource, CalDAVFile):
"""
Calendar home collection resource.
"""
- cacheNotifierFactory = DisabledCacheNotifier
-
liveProperties = CalDAVFile.liveProperties + (
(customxml.calendarserver_namespace, "xmpp-uri"),
(customxml.calendarserver_namespace, "xmpp-heartbeat-uri"),
@@ -926,7 +917,6 @@
"""
@param path: the path to the file which will back the resource.
"""
- self.cacheNotifier = self.cacheNotifierFactory(self)
self.clientNotifier = ClientNotifier(self)
CalDAVFile.__init__(self, path)
DirectoryCalendarHomeResource.__init__(self, parent, record)
@@ -963,7 +953,6 @@
if cls is not None:
child = cls(self.fp.child(name).path, self)
- child.cacheNotifier = self.cacheNotifier
child.clientNotifier = self.clientNotifier
return child
return self.createSimilarFile(self.fp.child(name).path)
@@ -973,7 +962,6 @@
return self
else:
similar = CalDAVFile(path, principalCollections=self.principalCollections())
- similar.cacheNotifier = self.cacheNotifier
similar.clientNotifier = self.clientNotifier
return similar
@@ -1479,12 +1467,10 @@
def createSimilarFile(self, path):
raise HTTPError(responsecode.NOT_FOUND)
-class AddressBookHomeFile (PropfindCacheMixin, AutoProvisioningFileMixIn, DirectoryAddressBookHomeResource, CalDAVFile):
+class AddressBookHomeFile (AutoProvisioningFileMixIn, DirectoryAddressBookHomeResource, CalDAVFile):
"""
Address book home collection resource.
"""
- cacheNotifierFactory = DisabledCacheNotifier
-
liveProperties = CalDAVFile.liveProperties + (
(customxml.calendarserver_namespace, "xmpp-uri"),
)
@@ -1493,7 +1479,6 @@
"""
@param path: the path to the file which will back the resource.
"""
- self.cacheNotifier = self.cacheNotifierFactory(self)
self.clientNotifier = ClientNotifier(self)
CalDAVFile.__init__(self, path)
DirectoryAddressBookHomeResource.__init__(self, parent, record)
@@ -1514,7 +1499,6 @@
if cls is not None:
child = cls(self.fp.child(name).path, self)
- child.cacheNotifier = self.cacheNotifier
child.clientNotifier = self.clientNotifier
return child
@@ -1525,7 +1509,6 @@
return self
else:
similar = CalDAVFile(path, principalCollections=self.principalCollections())
- similar.cacheNotifier = self.cacheNotifier
similar.clientNotifier = self.clientNotifier
return similar
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/stdconfig.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/stdconfig.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/stdconfig.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -26,6 +26,7 @@
from twext.python.log import Logger, InvalidLogLevelError
from twext.python.log import clearLogLevels, setLogLevelForNamespace
+from twistedcaldav import caldavxml, customxml, carddavxml
from twistedcaldav.config import ConfigProvider, ConfigurationError
from twistedcaldav.config import config, _mergeData, fullServerPath
from twistedcaldav.partitions import partitions
@@ -34,6 +35,7 @@
log = Logger()
+
DEFAULT_CONFIG_FILE = "/etc/caldavd/caldavd.plist"
DEFAULT_CARDDAV_CONFIG_FILE = "/etc/carddavd/carddavd.plist"
@@ -123,11 +125,11 @@
# default. For example, it may be the address of a load balancer or
# proxy which forwards connections to the server.
#
- "ServerHostName": "", # Network host name.
- "HTTPPort": 0, # HTTP port (0 to disable HTTP)
- "SSLPort" : 0, # SSL port (0 to disable HTTPS)
+ "ServerHostName": "", # Network host name.
+ "HTTPPort": 0, # HTTP port (0 to disable HTTP)
+ "SSLPort" : 0, # SSL port (0 to disable HTTPS)
"RedirectHTTPToHTTPS" : False, # If True, all nonSSL requests redirected to an SSL Port
- "SSLMethod" : "SSLv3_METHOD", # SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD
+ "SSLMethod" : "SSLv3_METHOD", # SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD
#
# Network address configuration information
@@ -137,14 +139,18 @@
"BindAddresses": [], # List of IP addresses to bind to [empty = all]
"BindHTTPPorts": [], # List of port numbers to bind to for HTTP [empty = same as "Port"]
"BindSSLPorts" : [], # List of port numbers to bind to for SSL [empty = same as "SSLPort"]
- "InheritFDs": [], # File descriptors to inherit for HTTP requests (empty = don't inherit)
+ "InheritFDs" : [], # File descriptors to inherit for HTTP requests (empty = don't inherit)
"InheritSSLFDs": [], # File descriptors to inherit for HTTPS requests (empty = don't inherit)
+ "MetaFD": 0, # Inherited file descriptor to call recvmsg() on to recive sockets (none = don't inherit)
+ "UseMetaFD": True, # Use a 'meta' FD, i.e. an FD to transmit other
+ # FDs to slave processes.
+
#
# Types of service provided
#
"EnableCalDAV" : True, # Enable CalDAV service
- "EnableCardDAV" : False, # Enable CardDAV service
+ "EnableCardDAV" : True, # Enable CardDAV service
# XXX CardDAV
"DirectoryAddressBook": {
@@ -269,6 +275,9 @@
#
"AccessLogFile" : "access.log", # Apache-style access log
"ErrorLogFile" : "error.log", # Server activity log
+ "ErrorLogEnabled" : True, # True = use log file, False = stdout
+ "ErrorLogRotateMB" : 10, # Rotate error log after so many megabytes
+ "ErrorLogMaxRotatedFiles" : 5, # Retain this many error log files
"PIDFile" : "caldavd.pid",
"RotateAccessLog" : False,
"EnableExtendedAccessLog": True,
@@ -451,10 +460,10 @@
# Partitioning
#
"Partitioning" : {
- "Enabled": False, # Partitioning enabled or not
- "ServerPartitionID": "", # Unique ID for this server's partition instance.
+ "Enabled": False, # Partitioning enabled or not
+ "ServerPartitionID": "", # Unique ID for this server's partition instance.
"PartitionConfigFile": "partitions.plist", # File path for partition information
- "MaxClients": 5, # Pool size for connections to each partition
+ "MaxClients": 5, # Pool size for connections to each partition
},
#
@@ -545,7 +554,6 @@
},
"EnableKeepAlive": True,
- "ResponseCacheTimeout": 30, # Minutes
"Includes": [], # Other plists to parse after this one
}
@@ -824,10 +832,6 @@
raise ConfigurationError("Invalid log level: %s" % (e.level))
def _updateNotifications(configDict):
- #
- # Notifications
- #
-
# Reloading not supported -- requires process running as root
if getattr(configDict, "_reloading", False):
return
@@ -919,10 +923,6 @@
CalendarPrincipalResource.enableSharing(configDict.Sharing.Enabled)
def _updatePartitions(configDict):
- #
- # Partitions
- #
-
if configDict.Partitioning.Enabled:
partitions.setSelfPartition(configDict.Partitioning.ServerPartitionID)
partitions.setMaxClients(configDict.Partitioning.MaxClients)
@@ -931,6 +931,29 @@
else:
partitions.clear()
+def _updateCompliance(configDict):
+ if configDict.Scheduling.CalDAV.OldDraftCompatibility:
+ compliance = caldavxml.caldav_full_compliance
+ else:
+ compliance = caldavxml.caldav_implicit_compliance
+
+ if configDict.EnableProxyPrincipals:
+ compliance += customxml.calendarserver_proxy_compliance
+ if configDict.EnablePrivateEvents:
+ compliance += customxml.calendarserver_private_events_compliance
+ if configDict.Scheduling.CalDAV.EnablePrivateComments:
+ compliance += customxml.calendarserver_private_comments_compliance
+ if configDict.EnableCardDAV:
+ compliance += carddavxml.carddav_compliance
+
+ compliance += customxml.calendarserver_principal_property_search_compliance
+
+ if config.Sharing.Enabled:
+ compliance += customxml.calendarserver_sharing_compliance
+
+ configDict.CalDAVComplianceClasses = compliance
+
+
PRE_UPDATE_HOOKS = (
_preUpdateDirectoryService,
_preUpdateDirectoryAddressBookBackingDirectoryService,
@@ -949,6 +972,7 @@
_updateScheduling,
_updateSharing,
_updatePartitions,
+ _updateCompliance,
)
def _cleanup(configDict, defaultDict):
@@ -993,5 +1017,5 @@
return cleanDict
config.setProvider(PListConfigProvider(DEFAULT_CONFIG))
-config.addPreUpdateHook(PRE_UPDATE_HOOKS)
-config.addPostUpdateHook(POST_UPDATE_HOOKS)
+config.addPreUpdateHooks(PRE_UPDATE_HOOKS)
+config.addPostUpdateHooks(POST_UPDATE_HOOKS)
Deleted: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_cache.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_cache.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_cache.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,479 +0,0 @@
-##
-# Copyright (c) 2008-2010 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-from new import instancemethod
-import hashlib
-import cPickle
-
-from twisted.internet.defer import succeed, maybeDeferred
-
-from twext.web2.dav import davxml
-from twext.web2.dav.util import allDataFromStream
-from twext.web2.stream import MemoryStream
-from twext.web2.http_headers import Headers
-
-from twistedcaldav.cache import MemcacheResponseCache
-from twistedcaldav.cache import MemcacheChangeNotifier
-from twistedcaldav.cache import PropfindCacheMixin
-
-from twistedcaldav.test.util import InMemoryMemcacheProtocol
-from twistedcaldav.test.util import TestCase
-
-
-def _newCacheToken(self):
- called = getattr(self, '_called', 0)
-
- token = 'token%d' % (called,)
- setattr(self, '_called', called + 1)
- return token
-
-
-
-class StubRequest(object):
- resources = {}
-
- def __init__(self, method, uri, authnUser, depth='1', body=None):
- self.method = method
- self.uri = uri
- self.authnUser = davxml.Principal(davxml.HRef.fromString(authnUser))
- self.headers = Headers({'depth': depth})
-
- if body is None:
- body = "foobar"
-
- self.body = body
- self.stream = MemoryStream(body)
-
-
- def locateResource(self, uri):
- assert uri[0] == '/', "URI path didn't begin with '/': %s" % (uri,)
- return succeed(self.resources.get(uri))
-
-
-
-class StubResponse(object):
- def __init__(self, code, headers, body):
- self.code = code
- self.headers = Headers(headers)
- self.body = body
- self.stream = MemoryStream(body)
-
-
-
-class StubURLResource(object):
- def __init__(self, url, record=None):
- self._url = url
-
- if record is not None:
- self.record = record
-
- def url(self):
- return self._url
-
-
-
-class MemCacheChangeNotifierTests(TestCase):
- def setUp(self):
- TestCase.setUp(self)
- self.memcache = InMemoryMemcacheProtocol()
- self.ccn = MemcacheChangeNotifier(
- StubURLResource(':memory:'),
- cachePool=self.memcache)
-
- self.ccn._newCacheToken = instancemethod(_newCacheToken,
- self.ccn,
- MemcacheChangeNotifier)
-
- def assertToken(self, expectedToken):
- token = self.memcache._cache['cacheToken::memory:'][1]
- self.assertEquals(token, expectedToken)
-
-
- def test_cacheTokenPropertyIsProvisioned(self):
- d = self.ccn.changed()
- d.addCallback(lambda _: self.assertToken('token0'))
- return d
-
-
- def test_changedChangesToken(self):
- d = self.ccn.changed()
- d.addCallback(lambda _: self.ccn.changed())
- d.addCallback(lambda _: self.assertToken('token1'))
- return d
-
-
- def tearDown(self):
- for call in self.memcache._timeouts.itervalues():
- call.cancel()
- MemcacheChangeNotifier._memcacheProtocol = None
-
-
-
-class BaseCacheTestMixin(object):
- def setUp(self):
- StubRequest.resources = {
- '/calendars/__uids__/cdaboo/': StubURLResource(
- '/calendars/__uids__/cdaboo/'),
- '/calendars/users/cdaboo/': StubURLResource(
- '/calendars/__uids__/cdaboo/'),
- '/principals/__uids__/cdaboo/': StubURLResource(
- '/principals/__uids__/cdaboo/', record='directoryToken0'),
- '/calendars/__uids__/dreid/': StubURLResource(
- '/calendars/__uids__/dreid/'),
- '/principals/__uids__/dreid/': StubURLResource(
- '/principals/__uids__/dreid/', record='directoryToken0')}
-
-
- def tearDown(self):
- StubRequest.resources = {}
-
-
- def assertResponse(self, response, expected):
- self.assertNotEquals(response, None, "Got None instead of a response.")
- self.assertEquals(response.code, expected[0])
- self.assertEquals(set(response.headers.getAllRawHeaders()),
- set(expected[1].getAllRawHeaders()))
-
- d = allDataFromStream(response.stream)
- d.addCallback(self.assertEquals, expected[2])
- return d
-
-
- def test_getResponseForRequestMultiHomedRequestURI(self):
- request = StubRequest(
- 'PROPFIND',
- '/calendars/users/cdaboo/',
- '/principals/__uids__/cdaboo/')
-
- d = self.rc.getResponseForRequest(request)
-
- d.addCallback(self.assertEquals, None)
- return d
-
-
- def test_getResponseForRequestURINotFound(self):
- request = StubRequest(
- 'PROPFIND',
- '/calendars/__uids__/wsanchez/',
- '/calendars/__uids__/dreid/')
-
- d = self.rc.getResponseForRequest(request)
- d.addCallback(self.assertEquals, None)
- return d
-
-
- def test_getResponseForRequestMultiHomedPrincipalURI(self):
- request = StubRequest(
- 'PROPFIND',
- '/calendars/__uids__/cdaboo/',
- '/principals/users/cdaboo/')
-
- d = self.rc.getResponseForRequest(request)
-
- d.addCallback(self.assertEquals, None)
- return d
-
-
- def test_getResponseForRequestNotInCache(self):
- d = self.rc.getResponseForRequest(StubRequest(
- 'PROPFIND',
- '/calendars/__uids__/dreid/',
- '/principals/__uids__/dreid/'))
-
- d.addCallback(self.assertEquals, None)
- return d
-
-
- def test_getResponseForRequestInCache(self):
- d = self.rc.getResponseForRequest(StubRequest(
- 'PROPFIND',
- '/calendars/__uids__/cdaboo/',
- '/principals/__uids__/cdaboo/'))
-
- d.addCallback(self.assertResponse, self.expected_response)
- return d
-
-
- def test_getResponseForRequestPrincipalTokenChanged(self):
- self.tokens['/principals/__uids__/cdaboo/'] = 'principalToken1'
-
- d = self.rc.getResponseForRequest(StubRequest(
- 'PROPFIND',
- '/calendars/__uids__/cdaboo/',
- '/principals/__uids__/cdaboo/'))
-
- d.addCallback(self.assertEquals, None)
- return d
-
-
- def test_getResponseForRequestUriTokenChanged(self):
- self.tokens['/calendars/__uids__/cdaboo/'] = 'uriToken1'
-
- d = self.rc.getResponseForRequest(StubRequest(
- 'PROPFIND',
- '/calendars/__uids__/cdaboo/',
- '/principals/__uids__/cdaboo/'))
-
- d.addCallback(self.assertEquals, None)
- return d
-
-
- def test_getResponseForDepthZero(self):
- d = self.rc.getResponseForRequest(StubRequest(
- 'PROPFIND',
- '/calendars/__uids__/cdaboo/',
- '/principals/__uids__/cdaboo/',
- depth='0'))
-
- d.addCallback(self.assertEquals, None)
- return d
-
-
- def test_getResponseForBody(self):
- d = self.rc.getResponseForRequest(StubRequest(
- 'PROPFIND',
- '/calendars/__uids__/cdaboo/',
- '/principals/__uids__/cdaboo/',
- body='bazbax'))
-
- d.addCallback(self.assertEquals, None)
- return d
-
-
- def test_getResponseForUnauthenticatedRequest(self):
- d = self.rc.getResponseForRequest(StubRequest(
- 'PROPFIND',
- '/calendars/__uids__/cdaboo/',
- '{DAV:}unauthenticated',
- body='bazbax'))
-
- d.addCallback(self.assertEquals, None)
- return d
-
-
- def test_cacheUnauthenticatedResponse(self):
- expected_response = StubResponse(401, {}, "foobar")
-
- d = self.rc.cacheResponseForRequest(
- StubRequest('PROPFIND',
- '/calendars/__uids__/cdaboo/',
- '{DAV:}unauthenticated'),
- expected_response)
-
- d.addCallback(self.assertResponse,
- (expected_response.code,
- expected_response.headers,
- expected_response.body))
-
- return d
-
-
- def test_cacheResponseForRequest(self):
- expected_response = StubResponse(200, {}, "Foobar")
-
- def _assertResponse(ign):
- d1 = self.rc.getResponseForRequest(StubRequest(
- 'PROPFIND',
- '/principals/__uids__/dreid/',
- '/principals/__uids__/dreid/'))
-
-
- d1.addCallback(self.assertResponse,
- (expected_response.code,
- expected_response.headers,
- expected_response.body))
- return d1
-
-
- d = self.rc.cacheResponseForRequest(
- StubRequest('PROPFIND',
- '/principals/__uids__/dreid/',
- '/principals/__uids__/dreid/'),
- expected_response)
-
- d.addCallback(_assertResponse)
- return d
-
-
- def test_recordHashChangeInvalidatesCache(self):
- StubRequest.resources[
- '/principals/__uids__/cdaboo/'].record = 'directoryToken1'
-
- d = self.rc.getResponseForRequest(
- StubRequest(
- 'PROPFIND',
- '/calendars/__uids__/cdaboo/',
- '/principals/__uids__/cdaboo/'))
-
- d.addCallback(self.assertEquals, None)
- return d
-
-
-
-class MemcacheResponseCacheTests(BaseCacheTestMixin, TestCase):
- def setUp(self):
- super(MemcacheResponseCacheTests, self).setUp()
-
- memcacheStub = InMemoryMemcacheProtocol()
- self.rc = MemcacheResponseCache(None, cachePool=memcacheStub)
- self.rc.logger.setLevel('debug')
- self.tokens = {}
-
- self.tokens['/calendars/__uids__/cdaboo/'] = 'uriToken0'
- self.tokens['/principals/__uids__/cdaboo/'] = 'principalToken0'
- self.tokens['/principals/__uids__/dreid/'] = 'principalTokenX'
-
- def _getToken(uri, cachePoolHandle=None):
- return succeed(self.tokens.get(uri))
-
- self.rc._tokenForURI = _getToken
-
- self.expected_response = (200, Headers({}), "Foo")
-
- expected_key = hashlib.md5(':'.join([str(t) for t in (
- 'PROPFIND',
- '/principals/__uids__/cdaboo/',
- '/calendars/__uids__/cdaboo/',
- '1',
- hash('foobar'),
- )])).hexdigest()
-
- memcacheStub._cache[expected_key] = (
- 0, #flags
- cPickle.dumps((
- 'principalToken0',
- hash('directoryToken0'),
- 'uriToken0',
- (self.expected_response[0],
- dict(list(self.expected_response[1].getAllRawHeaders())),
- self.expected_response[2]))))
-
- self.memcacheStub = memcacheStub
-
- def tearDown(self):
- for call in self.memcacheStub._timeouts.itervalues():
- call.cancel()
-
- def test_givenURIsForKeys(self):
- expected_response = (200, Headers({}), "Foobarbaz")
-
- _key = (
- 'PROPFIND',
- '/principals/__uids__/cdaboo/',
- '/calendars/users/cdaboo/',
- '1',
- hash('foobar'),
- )
-
- expected_key = hashlib.md5(':'.join([str(t) for t in _key])).hexdigest()
-
- self.memcacheStub._cache[expected_key] = (
- 0, #flags
- cPickle.dumps((
- 'principalToken0',
- hash('directoryToken0'),
- 'uriToken0',
- (expected_response[0],
- dict(list(expected_response[1].getAllRawHeaders())),
- expected_response[2]))))
-
- d = self.rc.getResponseForRequest(
- StubRequest('PROPFIND',
- '/calendars/users/cdaboo/',
- '/principals/__uids__/cdaboo/'))
-
- d.addCallback(self.assertResponse, expected_response)
- return d
-
-
-
-class StubResponseCacheResource(object):
- def __init__(self):
- self.cache = {}
- self.responseCache = self
-
-
- def getResponseForRequest(self, request):
- if request in self.cache:
- return self.cache[request]
-
-
- def cacheResponseForRequest(self, request, response):
- self.cache[request] = response
- return response
-
-
-
-class TestRenderMixin(object):
- davHeaders = ('foo',)
-
- def renderHTTP(self, request):
- self.response.headers.setHeader('dav', self.davHeaders)
-
- return self.response
-
-
-
-class TestCachingResource(PropfindCacheMixin, TestRenderMixin):
- def __init__(self, response):
- self.response = response
-
-
-
-class PropfindCacheMixinTests(TestCase):
- """
- Test the PropfindCacheMixin
- """
- def setUp(self):
- TestCase.setUp(self)
- self.resource = TestCachingResource(StubResponse(200, {}, "foobar"))
- self.responseCache = StubResponseCacheResource()
-
- def test_DAVHeaderCached(self):
- """
- Test that the DAV header set in renderHTTP is cached.
- """
- def _checkCache(response):
- self.assertEquals(response.headers.getHeader('dav'),
- ('foo',))
- self.assertEquals(
- self.responseCache.cache[request].headers.getHeader('dav'),
- ('foo',))
-
- request = StubRequest('PROPFIND', '/', '/')
- request.resources['/'] = self.responseCache
-
- d = maybeDeferred(self.resource.renderHTTP, request)
- d.addCallback(_checkCache)
-
- return d
-
-
- def test_onlyCachePropfind(self):
- """
- Test that we only cache the result of a propfind request.
- """
- def _checkCache(response):
- self.assertEquals(self.responseCache.getResponseForRequest(request),
- None)
-
- request = StubRequest('GET', '/', '/')
- request.resources['/'] = self.responseCache
-
- d = maybeDeferred(self.resource.renderHTTP, request)
- d.addCallback(_checkCache)
-
- return d
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_config.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_config.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_config.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -59,12 +59,19 @@
def tearDown(self):
config.setDefaults(DEFAULT_CONFIG)
config.reset()
- config.update(None) # Make sure the update hooks get called
def testDefaults(self):
for key, value in DEFAULT_CONFIG.iteritems():
- self.assertEquals(getattr(config, key), value)
+ if key in ("ServerHostName",):
+ # Value is calculated and may vary
+ continue
+ self.assertEquals(
+ getattr(config, key), value,
+ "config[%r] == %r, expected %r"
+ % (key, getattr(config, key), value)
+ )
+
def testLoadConfig(self):
self.assertEquals(config.ResponseCompression, True)
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_index.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_index.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_index.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -84,9 +84,8 @@
old_timeout = config.UIDReservationTimeOut
config.UIDReservationTimeOut = 1
- def _finally(result):
+ def _finally():
config.UIDReservationTimeOut = old_timeout
- return result
d = self.db.isReservedUID(uid)
d.addCallback(self.assertFalse)
@@ -96,7 +95,7 @@
d.addCallback(lambda _: deferLater(reactor, 2, lambda: None))
d.addCallback(lambda _: self.db.isReservedUID(uid))
d.addCallback(self.assertFalse)
- d.addBoth(_finally)
+ self.addCleanup(_finally)
return d
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_mail.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_mail.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_mail.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -16,6 +16,10 @@
from twistedcaldav.mail import *
from twistedcaldav.test.util import TestCase
+from twistedcaldav.ical import Component
+from twistedcaldav.config import config
+
+from twisted.internet.defer import inlineCallbacks
import email
import os
@@ -107,6 +111,139 @@
self.assertEquals(organizer, "mailto:user01 at example.com")
+ @inlineCallbacks
+ def test_outbound(self):
+ """
+ Make sure outbound( ) stores tokens properly so they can be looked up
+ """
+
+ config.Scheduling.iMIP.Sending.Address = "server at example.com"
+
+ data = (
+ # Initial invite
+ (
+ """BEGIN:VCALENDAR
+VERSION:2.0
+METHOD:REQUEST
+BEGIN:VEVENT
+UID:CFDD5E46-4F74-478A-9311-B3FF905449C3
+DTSTART:20100325T154500Z
+DTEND:20100325T164500Z
+ATTENDEE;CN=The Attendee;CUTYPE=INDIVIDUAL;PARTSTAT=NEEDS-ACTION;RSVP=TRUE:mailto:attendee at example.com
+ATTENDEE;CN=The Organizer;CUTYPE=INDIVIDUAL;EMAIL=organizer at example.com;PARTSTAT=ACCEPTED:urn:uuid:C3B38B00-4166-11DD-B22C-A07C87E02F6A
+ORGANIZER;CN=The Organizer;EMAIL=organizer at example.com:urn:uuid:C3B38B00-4166-11DD-B22C-A07C87E02F6A
+SUMMARY:testing outbound( )
+END:VEVENT
+END:VCALENDAR
+""",
+ "CFDD5E46-4F74-478A-9311-B3FF905449C3",
+ "mailto:organizer at example.com",
+ "mailto:attendee at example.com",
+ "new",
+ "organizer at example.com",
+ "The Organizer",
+ [
+ (u'The Attendee', u'attendee at example.com'),
+ (u'The Organizer', None)
+ ],
+ "The Organizer <organizer at example.com>",
+ "attendee at example.com",
+ ),
+
+ # Update
+ (
+ """BEGIN:VCALENDAR
+VERSION:2.0
+METHOD:REQUEST
+BEGIN:VEVENT
+UID:CFDD5E46-4F74-478A-9311-B3FF905449C3
+DTSTART:20100325T154500Z
+DTEND:20100325T164500Z
+ATTENDEE;CN=The Attendee;CUTYPE=INDIVIDUAL;PARTSTAT=NEEDS-ACTION;RSVP=TRUE:mailto:attendee at example.com
+ATTENDEE;CN=The Organizer;CUTYPE=INDIVIDUAL;EMAIL=organizer at example.com;PARTSTAT=ACCEPTED:urn:uuid:C3B38B00-4166-11DD-B22C-A07C87E02F6A
+ORGANIZER;CN=The Organizer;EMAIL=organizer at example.com:urn:uuid:C3B38B00-4166-11DD-B22C-A07C87E02F6A
+SUMMARY:testing outbound( ) *update*
+END:VEVENT
+END:VCALENDAR
+""",
+ "CFDD5E46-4F74-478A-9311-B3FF905449C3",
+ "mailto:organizer at example.com",
+ "mailto:attendee at example.com",
+ "update",
+ "organizer at example.com",
+ "The Organizer",
+ [
+ (u'The Attendee', u'attendee at example.com'),
+ (u'The Organizer', None)
+ ],
+ "The Organizer <organizer at example.com>",
+ "attendee at example.com",
+ ),
+
+ # Reply
+ (
+ """BEGIN:VCALENDAR
+VERSION:2.0
+METHOD:REPLY
+BEGIN:VEVENT
+UID:DFDD5E46-4F74-478A-9311-B3FF905449C4
+DTSTART:20100325T154500Z
+DTEND:20100325T164500Z
+ATTENDEE;CN=The Attendee;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:mailto:attendee at example.com
+ORGANIZER;CN=The Organizer;EMAIL=organizer at example.com:mailto:organizer at example.com
+SUMMARY:testing outbound( ) *reply*
+END:VEVENT
+END:VCALENDAR
+""",
+ None,
+ "mailto:attendee at example.com",
+ "mailto:organizer at example.com",
+ "reply",
+ "organizer at example.com",
+ "The Organizer",
+ [
+ (u'The Attendee', u'attendee at example.com'),
+ ],
+ "attendee at example.com",
+ "organizer at example.com",
+ ),
+
+ )
+ for (inputCalendar, UID, inputOriginator, inputRecipient, inviteState,
+ outputOrganizerEmail, outputOrganizerName, outputAttendeeList,
+ outputFrom, outputRecipient) in data:
+
+ (actualInviteState, actualCalendar, actualOrganizerEmail,
+ actualOrganizerName, actualAttendeeList, actualFrom,
+ actualRecipient, actualReplyTo) = (yield self.handler.outbound(
+ inputOriginator,
+ inputRecipient,
+ Component.fromString(inputCalendar.replace("\n", "\r\n")),
+ send=False)
+ )
+
+ self.assertEquals(actualInviteState, inviteState)
+ self.assertEquals(actualOrganizerEmail, outputOrganizerEmail)
+ self.assertEquals(actualOrganizerName, outputOrganizerName)
+ self.assertEquals(actualAttendeeList, outputAttendeeList)
+ self.assertEquals(actualFrom, outputFrom)
+ self.assertEquals(actualRecipient, outputRecipient)
+
+ if UID: # The organizer is local, and server is sending to remote
+ # attendee
+
+ token = self.handler.db.getToken(inputOriginator,
+ inputRecipient, UID)
+ self.assertNotEquals(token, None)
+ self.assertEquals(actualReplyTo,
+ "server+%s at example.com" % (token,))
+
+ else: # Reply only -- the attendee is local, and server is sending
+ # reply to remote organizer
+
+ self.assertEquals(actualReplyTo, actualFrom)
+
+
class MailGatewayTokensDatabaseTests(TestCase):
def setUp(self):
Deleted: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_static.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_static.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/test_static.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,67 +0,0 @@
-##
-# Copyright (c) 2008 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-from twistedcaldav.static import CalendarHomeFile, CalDAVFile
-from twistedcaldav.cache import DisabledCacheNotifier
-from twistedcaldav.test.util import StubCacheChangeNotifier
-from twistedcaldav.test.util import TestCase
-
-class StubParentResource(object):
- def principalCollections(self):
- return set([])
-
-
-class CalendarHomeFileTests(TestCase):
- def setUp(self):
- TestCase.setUp(self)
- self.calendarHome = CalendarHomeFile(self.mktemp(),
- StubParentResource(),
- object())
-
-
- def test_hasCacheNotifier(self):
- self.failUnless(isinstance(self.calendarHome.cacheNotifier,
- DisabledCacheNotifier))
-
-
- def test_childrenHaveCacheNotifier(self):
- child = self.calendarHome.createSimilarFile('/fake/path')
- self.assertEquals(child.cacheNotifier, self.calendarHome.cacheNotifier)
-
-
-class CalDAVFileTests(TestCase):
- def setUp(self):
- TestCase.setUp(self)
- self.caldavFile = CalDAVFile(self.mktemp())
- self.caldavFile.fp.createDirectory()
- self.caldavFile.cacheNotifier = StubCacheChangeNotifier()
- self.assertEquals(self.caldavFile.cacheNotifier.changedCount, 0)
- self.caldavFile.isCollection = (lambda: True)
-
-
- def test_updateCTagNotifiesCache(self):
- d = self.caldavFile.updateCTag()
- d.addCallback(
- lambda _:
- self.assertEquals(self.caldavFile.cacheNotifier.changedCount, 1)
- )
- return d
-
-
- def test_updateCTagDoesntFailWithoutACacheNotifier(self):
- del self.caldavFile.cacheNotifier
- d = self.caldavFile.updateCTag()
- return d
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/util.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/twistedcaldav/test/util.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -27,20 +27,19 @@
import xattr
from twisted.python.failure import Failure
+from twisted.internet.base import DelayedCall
from twisted.internet.defer import succeed, fail
-from twext.web2.http import HTTPError, StatusResponse
from twisted.internet.error import ProcessDone
from twisted.internet.protocol import ProcessProtocol
+from twext.python.memcacheclient import ClientFactory
+import twext.web2.dav.test.util
+from twext.web2.http import HTTPError, StatusResponse
+
from twistedcaldav import memcacher
from twistedcaldav.config import config
-from twistedcaldav.stdconfig import _updateDataStore
from twistedcaldav.static import CalDAVFile
-import memcacheclient
-import twext.web2.dav.test.util
-
-from twisted.internet.base import DelayedCall
DelayedCall.debug = True
def _todo(f, why):
@@ -62,7 +61,6 @@
os.mkdir(serverroot)
config.ServerRoot = serverroot
config.ConfigRoot = "config"
- _updateDataStore(config)
if not os.path.exists(config.DataRoot):
os.makedirs(config.DataRoot)
@@ -73,7 +71,7 @@
config.Memcached.Pools.Default.ClientEnabled = False
config.Memcached.Pools.Default.ServerEnabled = False
- memcacheclient.ClientFactory.allowTestCache = True
+ ClientFactory.allowTestCache = True
memcacher.Memcacher.allowTestCache = True
def createHierarchy(self, structure, root=None):
@@ -121,16 +119,32 @@
actual.remove(childName)
if childName.startswith("*"):
+ if "/" in childName:
+ childName, matching = childName.split("/")
+ else:
+ matching = False
ext = childName.split(".")[1]
found = False
for actualFile in actual:
if actualFile.endswith(ext):
- actual.remove(actualFile)
- found = True
- break
+ matches = True
+ if matching:
+ matches = False
+ # We want to target only the wildcard file containing
+ # the matching string
+ actualPath = os.path.join(parent, actualFile)
+ with open(actualPath) as child:
+ contents = child.read()
+ if matching in contents:
+ matches = True
+
+ if matches:
+ actual.remove(actualFile)
+ found = True
+ break
if found:
- continue
-
+ # continue
+ childName = actualFile
childPath = os.path.join(parent, childName)
@@ -140,9 +154,18 @@
if childStructure.has_key("@contents"):
# This is a file
- if childStructure["@contents"] is None:
+ expectedContents = childStructure["@contents"]
+ if expectedContents is None:
# We don't care about the contents
pass
+ elif isinstance(expectedContents, tuple):
+ with open(childPath) as child:
+ contents = child.read()
+ for term in expectedContents:
+ if term not in contents:
+ print "Contents mismatch:", childPath
+ print "Expecting match:\n%s\n\nActual:\n%s\n" % (term, contents)
+ return False
else:
with open(childPath) as child:
contents = child.read()
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/file.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/file.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/file.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -38,7 +38,8 @@
from txdav.propertystore.xattr import PropertyStore
-from txcaldav.icalendarstore import ICalendarHome, ICalendar, ICalendarObject
+from txcaldav.icalendarstore import ICalendarStore, ICalendarHome
+from txcaldav.icalendarstore import ICalendar, ICalendarObject
from txcaldav.icalendarstore import CalendarNameNotAllowedError
from txcaldav.icalendarstore import CalendarObjectNameNotAllowedError
from txcaldav.icalendarstore import CalendarAlreadyExistsError
@@ -54,7 +55,7 @@
class CalendarStore(LoggingMixIn):
- # FIXME: Do we need an interface?
+ implements(ICalendarStore)
calendarHomeClass = property(lambda _: CalendarHome)
@@ -67,25 +68,29 @@
self.path = path
if not path.isdir():
- # FIXME: If we add a CalendarStore interface, this should
- # be CalendarStoreNotFoundError.
+ # FIXME: Add CalendarStoreNotFoundError?
raise NotFoundError("No such calendar store")
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.path.path)
- def calendarHomeWithUID(self, uid):
+ def calendarHomeWithUID(self, uid, create=False):
if uid.startswith("."):
return None
- childPath = self.path.child(uid)
+ assert len(uid) >= 4
- if childPath.isdir():
- return CalendarHome(childPath, self)
- else:
- return None
+ childPath = self.path.child(uid[0:2]).child(uid[2:4]).child(uid)
+ if not childPath.isdir():
+ if create:
+ childPath.makedirs()
+ else:
+ return None
+ return CalendarHome(childPath, self)
+
+
class CalendarHome(LoggingMixIn):
implements(ICalendarHome)
@@ -370,6 +375,7 @@
self._properties = PropertyStore(self.path)
return self._properties
+
class Index (object):
#
# OK, here's where we get ugly.
Deleted: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/1.ics
===================================================================
--- CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/1.ics 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/1.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,28 +0,0 @@
-BEGIN:VCALENDAR
-VERSION:2.0
-PRODID:-//Apple Inc.//iCal 4.0.1//EN
-CALSCALE:GREGORIAN
-BEGIN:VEVENT
-ATTENDEE;CN="Wilfredo Sanchez";CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:mailt
- o:wsanchez at apple.com
-ATTENDEE;CN="Cyrus Daboo";CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:mailto:cda
- boo at apple.com
-DTEND;TZID=US/Pacific:20090324T124500
-TRANSP:OPAQUE
-ORGANIZER;CN="Wilfredo Sanchez":mailto:wsanchez at apple.com
-UID:uid1
-DTSTAMP:20090326T145447Z
-LOCATION:Wilfredo's Office
-SEQUENCE:2
-X-APPLE-EWS-BUSYSTATUS:BUSY
-SUMMARY:CalDAV protocol updates
-DTSTART;TZID=US/Pacific:20090324T121500
-CREATED:20090326T145440Z
-BEGIN:VALARM
-X-WR-ALARMUID:DB39AB67-449C-441C-89D2-D740B5F41A73
-TRIGGER;VALUE=DATE-TIME:20090324T180009Z
-ATTACH;VALUE=URI:Basso
-ACTION:AUDIO
-END:VALARM
-END:VEVENT
-END:VCALENDAR
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/1.ics (from rev 5438, CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/1.ics)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/1.ics (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/1.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,28 @@
+BEGIN:VCALENDAR
+VERSION:2.0
+PRODID:-//Apple Inc.//iCal 4.0.1//EN
+CALSCALE:GREGORIAN
+BEGIN:VEVENT
+ATTENDEE;CN="Wilfredo Sanchez";CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:mailt
+ o:wsanchez at apple.com
+ATTENDEE;CN="Cyrus Daboo";CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:mailto:cda
+ boo at apple.com
+DTEND;TZID=US/Pacific:20090324T124500
+TRANSP:OPAQUE
+ORGANIZER;CN="Wilfredo Sanchez":mailto:wsanchez at apple.com
+UID:uid1
+DTSTAMP:20090326T145447Z
+LOCATION:Wilfredo's Office
+SEQUENCE:2
+X-APPLE-EWS-BUSYSTATUS:BUSY
+SUMMARY:CalDAV protocol updates
+DTSTART;TZID=US/Pacific:20090324T121500
+CREATED:20090326T145440Z
+BEGIN:VALARM
+X-WR-ALARMUID:DB39AB67-449C-441C-89D2-D740B5F41A73
+TRIGGER;VALUE=DATE-TIME:20090324T180009Z
+ATTACH;VALUE=URI:Basso
+ACTION:AUDIO
+END:VALARM
+END:VEVENT
+END:VCALENDAR
Deleted: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/2.ics
===================================================================
--- CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/2.ics 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/2.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,48 +0,0 @@
-BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//Cyrusoft International\, Inc.//Mulberry v4.0//EN
-BEGIN:VTIMEZONE
-TZID:US/Eastern
-LAST-MODIFIED:20040110T032845Z
-BEGIN:STANDARD
-DTSTART:20001026T020000
-RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
-TZNAME:EST
-TZOFFSETFROM:-0400
-TZOFFSETTO:-0500
-END:STANDARD
-BEGIN:DAYLIGHT
-DTSTART:20000404T020000
-RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
-TZNAME:EDT
-TZOFFSETFROM:-0500
-TZOFFSETTO:-0400
-END:DAYLIGHT
-END:VTIMEZONE
-BEGIN:VEVENT
-UID:uid2
-DTSTART;TZID=US/Eastern:20060102T140000
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-RRULE:FREQ=DAILY;COUNT=5
-SUMMARY:event 6-%ctr
-END:VEVENT
-BEGIN:VEVENT
-UID:uid2
-RECURRENCE-ID;TZID=US/Eastern:20060104T140000
-DTSTART;TZID=US/Eastern:20060104T160000
-DURATION:PT1H
-CREATED:20060102T190000Z
-DESCRIPTION:Some notes
-DTSTAMP:20051222T210507Z
-SUMMARY:event 6-%ctr changed
-BEGIN:VALARM
-ACTION:AUDIO
-TRIGGER;RELATED=START:-PT10M
-X-MULBERRY-ALARM-STATUS:PENDING
-X-MULBERRY-SPEAK-TEXT:
-END:VALARM
-END:VEVENT
-END:VCALENDAR
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/2.ics (from rev 5438, CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/2.ics)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/2.ics (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/2.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,48 @@
+BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//Cyrusoft International\, Inc.//Mulberry v4.0//EN
+BEGIN:VTIMEZONE
+TZID:US/Eastern
+LAST-MODIFIED:20040110T032845Z
+BEGIN:STANDARD
+DTSTART:20001026T020000
+RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
+TZNAME:EST
+TZOFFSETFROM:-0400
+TZOFFSETTO:-0500
+END:STANDARD
+BEGIN:DAYLIGHT
+DTSTART:20000404T020000
+RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
+TZNAME:EDT
+TZOFFSETFROM:-0500
+TZOFFSETTO:-0400
+END:DAYLIGHT
+END:VTIMEZONE
+BEGIN:VEVENT
+UID:uid2
+DTSTART;TZID=US/Eastern:20060102T140000
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+RRULE:FREQ=DAILY;COUNT=5
+SUMMARY:event 6-%ctr
+END:VEVENT
+BEGIN:VEVENT
+UID:uid2
+RECURRENCE-ID;TZID=US/Eastern:20060104T140000
+DTSTART;TZID=US/Eastern:20060104T160000
+DURATION:PT1H
+CREATED:20060102T190000Z
+DESCRIPTION:Some notes
+DTSTAMP:20051222T210507Z
+SUMMARY:event 6-%ctr changed
+BEGIN:VALARM
+ACTION:AUDIO
+TRIGGER;RELATED=START:-PT10M
+X-MULBERRY-ALARM-STATUS:PENDING
+X-MULBERRY-SPEAK-TEXT:
+END:VALARM
+END:VEVENT
+END:VCALENDAR
Deleted: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/3.ics
===================================================================
--- CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/3.ics 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/3.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,33 +0,0 @@
-BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//Cyrusoft International\, Inc.//Mulberry v4.0//EN
-BEGIN:VTIMEZONE
-TZID:US/Pacific
-LAST-MODIFIED:20040110T032845Z
-BEGIN:STANDARD
-DTSTART:20001026T020000
-RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
-TZNAME:PST
-TZOFFSETFROM:-0700
-TZOFFSETTO:-0800
-END:STANDARD
-BEGIN:DAYLIGHT
-DTSTART:20000404T020000
-RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
-TZNAME:PDT
-TZOFFSETFROM:-0800
-TZOFFSETTO:-0700
-END:DAYLIGHT
-END:VTIMEZONE
-BEGIN:VEVENT
-UID:uid3
-DTSTART;TZID=US/Pacific:20060101T130000
-DURATION:PT1H
-CREATED:20060101T210000Z
-DTSTAMP:20051222T210146Z
-LAST-MODIFIED:20051222T210203Z
-SEQUENCE:1
-SUMMARY:event 3-%ctr
-END:VEVENT
-END:VCALENDAR
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/3.ics (from rev 5438, CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/3.ics)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/3.ics (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_1/3.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,33 @@
+BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//Cyrusoft International\, Inc.//Mulberry v4.0//EN
+BEGIN:VTIMEZONE
+TZID:US/Pacific
+LAST-MODIFIED:20040110T032845Z
+BEGIN:STANDARD
+DTSTART:20001026T020000
+RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
+TZNAME:PST
+TZOFFSETFROM:-0700
+TZOFFSETTO:-0800
+END:STANDARD
+BEGIN:DAYLIGHT
+DTSTART:20000404T020000
+RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
+TZNAME:PDT
+TZOFFSETFROM:-0800
+TZOFFSETTO:-0700
+END:DAYLIGHT
+END:VTIMEZONE
+BEGIN:VEVENT
+UID:uid3
+DTSTART;TZID=US/Pacific:20060101T130000
+DURATION:PT1H
+CREATED:20060101T210000Z
+DTSTAMP:20051222T210146Z
+LAST-MODIFIED:20051222T210203Z
+SEQUENCE:1
+SUMMARY:event 3-%ctr
+END:VEVENT
+END:VCALENDAR
Deleted: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/24204e8682b99527cbda64d7423acda7.ics
===================================================================
--- CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/24204e8682b99527cbda64d7423acda7.ics 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/24204e8682b99527cbda64d7423acda7.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,32 +0,0 @@
-BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//Cyrusoft International\, Inc.//Mulberry v4.0//EN
-BEGIN:VTIMEZONE
-TZID:US/Mountain
-LAST-MODIFIED:20040110T032845Z
-BEGIN:STANDARD
-DTSTART:20001026T020000
-RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
-TZNAME:MST
-TZOFFSETFROM:-0600
-TZOFFSETTO:-0700
-END:STANDARD
-BEGIN:DAYLIGHT
-DTSTART:20000404T020000
-RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
-TZNAME:MDT
-TZOFFSETFROM:-0700
-TZOFFSETTO:-0600
-END:DAYLIGHT
-END:VTIMEZONE
-BEGIN:VEVENT
-UID:9A6519F71822CD45840C3440-%ctr at ninevah.local
-DTSTART;TZID=US/Mountain:20060101T110000
-DURATION:PT1H
-CREATED:20060101T160000Z
-DESCRIPTION:Some notes
-DTSTAMP:20051222T210052Z
-SUMMARY:event 2-%ctr
-END:VEVENT
-END:VCALENDAR
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/24204e8682b99527cbda64d7423acda7.ics (from rev 5438, CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/24204e8682b99527cbda64d7423acda7.ics)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/24204e8682b99527cbda64d7423acda7.ics (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/24204e8682b99527cbda64d7423acda7.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,32 @@
+BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//Cyrusoft International\, Inc.//Mulberry v4.0//EN
+BEGIN:VTIMEZONE
+TZID:US/Mountain
+LAST-MODIFIED:20040110T032845Z
+BEGIN:STANDARD
+DTSTART:20001026T020000
+RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
+TZNAME:MST
+TZOFFSETFROM:-0600
+TZOFFSETTO:-0700
+END:STANDARD
+BEGIN:DAYLIGHT
+DTSTART:20000404T020000
+RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
+TZNAME:MDT
+TZOFFSETFROM:-0700
+TZOFFSETTO:-0600
+END:DAYLIGHT
+END:VTIMEZONE
+BEGIN:VEVENT
+UID:9A6519F71822CD45840C3440-%ctr at ninevah.local
+DTSTART;TZID=US/Mountain:20060101T110000
+DURATION:PT1H
+CREATED:20060101T160000Z
+DESCRIPTION:Some notes
+DTSTAMP:20051222T210052Z
+SUMMARY:event 2-%ctr
+END:VEVENT
+END:VCALENDAR
Deleted: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/61038c41bd02ae5daf9f7fe9d54199fd.ics
===================================================================
--- CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/61038c41bd02ae5daf9f7fe9d54199fd.ics 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/61038c41bd02ae5daf9f7fe9d54199fd.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,31 +0,0 @@
-BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//Cyrusoft International\, Inc.//Mulberry v4.0//EN
-BEGIN:VTIMEZONE
-TZID:US/Eastern
-LAST-MODIFIED:20040110T032845Z
-BEGIN:STANDARD
-DTSTART:20001026T020000
-RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
-TZNAME:EST
-TZOFFSETFROM:-0400
-TZOFFSETTO:-0500
-END:STANDARD
-BEGIN:DAYLIGHT
-DTSTART:20000404T020000
-RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
-TZNAME:EDT
-TZOFFSETFROM:-0500
-TZOFFSETTO:-0400
-END:DAYLIGHT
-END:VTIMEZONE
-BEGIN:VEVENT
-UID:54E181BC7CCC373042B28842-%ctr at ninevah.local
-DTSTART;TZID=US/Eastern:20060101T100000
-DURATION:PT1H
-CREATED:20060101T150000Z
-DTSTAMP:20051222T205953Z
-SUMMARY:event 1-%ctr
-END:VEVENT
-END:VCALENDAR
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/61038c41bd02ae5daf9f7fe9d54199fd.ics (from rev 5438, CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/61038c41bd02ae5daf9f7fe9d54199fd.ics)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/61038c41bd02ae5daf9f7fe9d54199fd.ics (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/61038c41bd02ae5daf9f7fe9d54199fd.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,31 @@
+BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//Cyrusoft International\, Inc.//Mulberry v4.0//EN
+BEGIN:VTIMEZONE
+TZID:US/Eastern
+LAST-MODIFIED:20040110T032845Z
+BEGIN:STANDARD
+DTSTART:20001026T020000
+RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
+TZNAME:EST
+TZOFFSETFROM:-0400
+TZOFFSETTO:-0500
+END:STANDARD
+BEGIN:DAYLIGHT
+DTSTART:20000404T020000
+RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
+TZNAME:EDT
+TZOFFSETFROM:-0500
+TZOFFSETTO:-0400
+END:DAYLIGHT
+END:VTIMEZONE
+BEGIN:VEVENT
+UID:54E181BC7CCC373042B28842-%ctr at ninevah.local
+DTSTART;TZID=US/Eastern:20060101T100000
+DURATION:PT1H
+CREATED:20060101T150000Z
+DTSTAMP:20051222T205953Z
+SUMMARY:event 1-%ctr
+END:VEVENT
+END:VCALENDAR
Deleted: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/84be58ced1f1bb34057e1bd7e602c9c8.ics
===================================================================
--- CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/84be58ced1f1bb34057e1bd7e602c9c8.ics 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/84be58ced1f1bb34057e1bd7e602c9c8.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,31 +0,0 @@
-BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//Cyrusoft International\, Inc.//Mulberry v4.0//EN
-BEGIN:VTIMEZONE
-TZID:US/Eastern
-LAST-MODIFIED:20040110T032845Z
-BEGIN:STANDARD
-DTSTART:20001026T020000
-RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
-TZNAME:EST
-TZOFFSETFROM:-0400
-TZOFFSETTO:-0500
-END:STANDARD
-BEGIN:DAYLIGHT
-DTSTART:20000404T020000
-RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
-TZNAME:EDT
-TZOFFSETFROM:-0500
-TZOFFSETTO:-0400
-END:DAYLIGHT
-END:VTIMEZONE
-BEGIN:VEVENT
-UID:54E181BC7CCC373042B28842-8-%ctr at ninevah.local
-DTSTART;TZID=US/Eastern:20060107T100000
-DURATION:PT1H
-CREATED:20060101T150000Z
-DTSTAMP:20051222T205953Z
-SUMMARY:event 8-%ctr
-END:VEVENT
-END:VCALENDAR
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/84be58ced1f1bb34057e1bd7e602c9c8.ics (from rev 5438, CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/84be58ced1f1bb34057e1bd7e602c9c8.ics)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/84be58ced1f1bb34057e1bd7e602c9c8.ics (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/84be58ced1f1bb34057e1bd7e602c9c8.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,31 @@
+BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//Cyrusoft International\, Inc.//Mulberry v4.0//EN
+BEGIN:VTIMEZONE
+TZID:US/Eastern
+LAST-MODIFIED:20040110T032845Z
+BEGIN:STANDARD
+DTSTART:20001026T020000
+RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
+TZNAME:EST
+TZOFFSETFROM:-0400
+TZOFFSETTO:-0500
+END:STANDARD
+BEGIN:DAYLIGHT
+DTSTART:20000404T020000
+RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
+TZNAME:EDT
+TZOFFSETFROM:-0500
+TZOFFSETTO:-0400
+END:DAYLIGHT
+END:VTIMEZONE
+BEGIN:VEVENT
+UID:54E181BC7CCC373042B28842-8-%ctr at ninevah.local
+DTSTART;TZID=US/Eastern:20060107T100000
+DURATION:PT1H
+CREATED:20060101T150000Z
+DTSTAMP:20051222T205953Z
+SUMMARY:event 8-%ctr
+END:VEVENT
+END:VCALENDAR
Deleted: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/acc1015b7dc300c1b5665f6833960994.ics
===================================================================
--- CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/acc1015b7dc300c1b5665f6833960994.ics 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/acc1015b7dc300c1b5665f6833960994.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,31 +0,0 @@
-BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//Cyrusoft International\, Inc.//Mulberry v4.0//EN
-BEGIN:VTIMEZONE
-TZID:US/Eastern
-LAST-MODIFIED:20040110T032845Z
-BEGIN:STANDARD
-DTSTART:20001026T020000
-RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
-TZNAME:EST
-TZOFFSETFROM:-0400
-TZOFFSETTO:-0500
-END:STANDARD
-BEGIN:DAYLIGHT
-DTSTART:20000404T020000
-RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
-TZNAME:EDT
-TZOFFSETFROM:-0500
-TZOFFSETTO:-0400
-END:DAYLIGHT
-END:VTIMEZONE
-BEGIN:VEVENT
-UID:54E181BC7CCC373042B28842-9-%ctr at ninevah.local
-DTSTART;TZID=US/Eastern:20060107T103000
-DURATION:PT1H
-CREATED:20060101T150000Z
-DTSTAMP:20051222T205953Z
-SUMMARY:event 9-%ctr
-END:VEVENT
-END:VCALENDAR
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/acc1015b7dc300c1b5665f6833960994.ics (from rev 5438, CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/acc1015b7dc300c1b5665f6833960994.ics)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/acc1015b7dc300c1b5665f6833960994.ics (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/acc1015b7dc300c1b5665f6833960994.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,31 @@
+BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//Cyrusoft International\, Inc.//Mulberry v4.0//EN
+BEGIN:VTIMEZONE
+TZID:US/Eastern
+LAST-MODIFIED:20040110T032845Z
+BEGIN:STANDARD
+DTSTART:20001026T020000
+RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
+TZNAME:EST
+TZOFFSETFROM:-0400
+TZOFFSETTO:-0500
+END:STANDARD
+BEGIN:DAYLIGHT
+DTSTART:20000404T020000
+RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
+TZNAME:EDT
+TZOFFSETFROM:-0500
+TZOFFSETTO:-0400
+END:DAYLIGHT
+END:VTIMEZONE
+BEGIN:VEVENT
+UID:54E181BC7CCC373042B28842-9-%ctr at ninevah.local
+DTSTART;TZID=US/Eastern:20060107T103000
+DURATION:PT1H
+CREATED:20060101T150000Z
+DTSTAMP:20051222T205953Z
+SUMMARY:event 9-%ctr
+END:VEVENT
+END:VCALENDAR
Deleted: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b0d5785f275c064117ffd1fc20f4ed40.ics
===================================================================
--- CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b0d5785f275c064117ffd1fc20f4ed40.ics 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b0d5785f275c064117ffd1fc20f4ed40.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,39 +0,0 @@
-BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//Cyrusoft International\, Inc.//Mulberry v4.0//EN
-BEGIN:VTIMEZONE
-TZID:US/Eastern
-LAST-MODIFIED:20040110T032845Z
-BEGIN:STANDARD
-DTSTART:20001026T020000
-RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
-TZNAME:EST
-TZOFFSETFROM:-0400
-TZOFFSETTO:-0500
-END:STANDARD
-BEGIN:DAYLIGHT
-DTSTART:20000404T020000
-RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
-TZNAME:EDT
-TZOFFSETFROM:-0500
-TZOFFSETTO:-0400
-END:DAYLIGHT
-END:VTIMEZONE
-BEGIN:VEVENT
-UID:A3217B429B4D2FF2DC2EEE66-%ctr at ninevah.local
-DTSTART;TZID=US/Eastern:20060101T180000
-DURATION:PT1H
-CREATED:20060101T230000Z
-DTSTAMP:20051222T210310Z
-SUMMARY:event 4-%ctr
-BEGIN:VALARM
-ACTION:AUDIO
-DURATION:PT10M
-REPEAT:5
-TRIGGER;RELATED=START:-PT1H
-X-MULBERRY-ALARM-STATUS:PENDING
-X-MULBERRY-SPEAK-TEXT:
-END:VALARM
-END:VEVENT
-END:VCALENDAR
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b0d5785f275c064117ffd1fc20f4ed40.ics (from rev 5438, CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b0d5785f275c064117ffd1fc20f4ed40.ics)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b0d5785f275c064117ffd1fc20f4ed40.ics (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b0d5785f275c064117ffd1fc20f4ed40.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,39 @@
+BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//Cyrusoft International\, Inc.//Mulberry v4.0//EN
+BEGIN:VTIMEZONE
+TZID:US/Eastern
+LAST-MODIFIED:20040110T032845Z
+BEGIN:STANDARD
+DTSTART:20001026T020000
+RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
+TZNAME:EST
+TZOFFSETFROM:-0400
+TZOFFSETTO:-0500
+END:STANDARD
+BEGIN:DAYLIGHT
+DTSTART:20000404T020000
+RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
+TZNAME:EDT
+TZOFFSETFROM:-0500
+TZOFFSETTO:-0400
+END:DAYLIGHT
+END:VTIMEZONE
+BEGIN:VEVENT
+UID:A3217B429B4D2FF2DC2EEE66-%ctr at ninevah.local
+DTSTART;TZID=US/Eastern:20060101T180000
+DURATION:PT1H
+CREATED:20060101T230000Z
+DTSTAMP:20051222T210310Z
+SUMMARY:event 4-%ctr
+BEGIN:VALARM
+ACTION:AUDIO
+DURATION:PT10M
+REPEAT:5
+TRIGGER;RELATED=START:-PT1H
+X-MULBERRY-ALARM-STATUS:PENDING
+X-MULBERRY-SPEAK-TEXT:
+END:VALARM
+END:VEVENT
+END:VCALENDAR
Deleted: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b495c5dd5aa53392078eb43b1f906a80.ics
===================================================================
--- CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b495c5dd5aa53392078eb43b1f906a80.ics 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b495c5dd5aa53392078eb43b1f906a80.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,38 +0,0 @@
-BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//Cyrusoft International\, Inc.//Mulberry v4.0//EN
-BEGIN:VTIMEZONE
-TZID:US/Eastern
-LAST-MODIFIED:20040110T032845Z
-BEGIN:STANDARD
-DTSTART:20001026T020000
-RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
-TZNAME:EST
-TZOFFSETFROM:-0400
-TZOFFSETTO:-0500
-END:STANDARD
-BEGIN:DAYLIGHT
-DTSTART:20000404T020000
-RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
-TZNAME:EDT
-TZOFFSETFROM:-0500
-TZOFFSETTO:-0400
-END:DAYLIGHT
-END:VTIMEZONE
-BEGIN:VEVENT
-UID:945113826375CBB89184DC36-%ctr at ninevah.local
-DTSTART;TZID=US/Eastern:20060102T100000
-DURATION:PT1H
-CREATED:20060102T150000Z
-DTSTAMP:20051222T210412Z
-RRULE:FREQ=DAILY;COUNT=5
-SUMMARY:event 5-%ctr
-BEGIN:VALARM
-ACTION:AUDIO
-TRIGGER;RELATED=START:-PT10M
-X-MULBERRY-ALARM-STATUS:PENDING
-X-MULBERRY-SPEAK-TEXT:
-END:VALARM
-END:VEVENT
-END:VCALENDAR
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b495c5dd5aa53392078eb43b1f906a80.ics (from rev 5438, CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b495c5dd5aa53392078eb43b1f906a80.ics)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b495c5dd5aa53392078eb43b1f906a80.ics (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b495c5dd5aa53392078eb43b1f906a80.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,38 @@
+BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//Cyrusoft International\, Inc.//Mulberry v4.0//EN
+BEGIN:VTIMEZONE
+TZID:US/Eastern
+LAST-MODIFIED:20040110T032845Z
+BEGIN:STANDARD
+DTSTART:20001026T020000
+RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
+TZNAME:EST
+TZOFFSETFROM:-0400
+TZOFFSETTO:-0500
+END:STANDARD
+BEGIN:DAYLIGHT
+DTSTART:20000404T020000
+RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
+TZNAME:EDT
+TZOFFSETFROM:-0500
+TZOFFSETTO:-0400
+END:DAYLIGHT
+END:VTIMEZONE
+BEGIN:VEVENT
+UID:945113826375CBB89184DC36-%ctr at ninevah.local
+DTSTART;TZID=US/Eastern:20060102T100000
+DURATION:PT1H
+CREATED:20060102T150000Z
+DTSTAMP:20051222T210412Z
+RRULE:FREQ=DAILY;COUNT=5
+SUMMARY:event 5-%ctr
+BEGIN:VALARM
+ACTION:AUDIO
+TRIGGER;RELATED=START:-PT10M
+X-MULBERRY-ALARM-STATUS:PENDING
+X-MULBERRY-SPEAK-TEXT:
+END:VALARM
+END:VEVENT
+END:VCALENDAR
Deleted: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b88dd50941e4a31520ee396fd7894c96.ics
===================================================================
--- CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b88dd50941e4a31520ee396fd7894c96.ics 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b88dd50941e4a31520ee396fd7894c96.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -1,31 +0,0 @@
-BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//Cyrusoft International\, Inc.//Mulberry v4.0//EN
-BEGIN:VTIMEZONE
-TZID:US/Eastern
-LAST-MODIFIED:20040110T032845Z
-BEGIN:STANDARD
-DTSTART:20001026T020000
-RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
-TZNAME:EST
-TZOFFSETFROM:-0400
-TZOFFSETTO:-0500
-END:STANDARD
-BEGIN:DAYLIGHT
-DTSTART:20000404T020000
-RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
-TZNAME:EDT
-TZOFFSETFROM:-0500
-TZOFFSETTO:-0400
-END:DAYLIGHT
-END:VTIMEZONE
-BEGIN:VEVENT
-UID:54E181BC7CCC373042B28842-10-%ctr at ninevah.local
-DTSTART;TZID=US/Eastern:20060108T100000
-DURATION:PT1H
-CREATED:20060101T150000Z
-DTSTAMP:20051222T205953Z
-SUMMARY:event 10-%ctr
-END:VEVENT
-END:VCALENDAR
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b88dd50941e4a31520ee396fd7894c96.ics (from rev 5438, CalendarServer/trunk/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b88dd50941e4a31520ee396fd7894c96.ics)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b88dd50941e4a31520ee396fd7894c96.ics (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/calendar_store/ho/me/home1/calendar_2/b88dd50941e4a31520ee396fd7894c96.ics 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,31 @@
+BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//Cyrusoft International\, Inc.//Mulberry v4.0//EN
+BEGIN:VTIMEZONE
+TZID:US/Eastern
+LAST-MODIFIED:20040110T032845Z
+BEGIN:STANDARD
+DTSTART:20001026T020000
+RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
+TZNAME:EST
+TZOFFSETFROM:-0400
+TZOFFSETTO:-0500
+END:STANDARD
+BEGIN:DAYLIGHT
+DTSTART:20000404T020000
+RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
+TZNAME:EDT
+TZOFFSETFROM:-0500
+TZOFFSETTO:-0400
+END:DAYLIGHT
+END:VTIMEZONE
+BEGIN:VEVENT
+UID:54E181BC7CCC373042B28842-10-%ctr at ninevah.local
+DTSTART;TZID=US/Eastern:20060108T100000
+DURATION:PT1H
+CREATED:20060101T150000Z
+DTSTAMP:20051222T205953Z
+SUMMARY:event 10-%ctr
+END:VEVENT
+END:VCALENDAR
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/test_file.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/test_file.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/calendarstore/test/test_file.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -27,7 +27,8 @@
from txdav.idav import IPropertyStore
-from txcaldav.icalendarstore import ICalendarHome, ICalendar, ICalendarObject
+from txcaldav.icalendarstore import ICalendarStore, ICalendarHome
+from txcaldav.icalendarstore import ICalendar, ICalendarObject
from txcaldav.icalendarstore import CalendarNameNotAllowedError
from txcaldav.icalendarstore import CalendarObjectNameNotAllowedError
from txcaldav.icalendarstore import CalendarAlreadyExistsError
@@ -152,29 +153,31 @@
storePath.copyTo(calendarPath)
test.calendarStore = CalendarStore(calendarPath)
+ assert test.calendarStore is not None, "No calendar store?"
def setUpHome1(test):
setUpCalendarStore(test)
test.home1 = test.calendarStore.calendarHomeWithUID("home1")
+ assert test.home1 is not None, "No calendar home?"
def setUpCalendar1(test):
setUpHome1(test)
test.calendar1 = test.home1.calendarWithName("calendar_1")
+ assert test.calendar1 is not None, "No calendar?"
class CalendarStoreTest(unittest.TestCase):
def setUp(self):
setUpCalendarStore(self)
- # FIXME: If we define an interface
- #def test_interface(self):
- # """
- # Interface is completed and conforming.
- # """
- # try:
- # verifyObject(ICalendarStore, self.calendarstore)
- # except BrokenMethodImplementation, e:
- # self.fail(e)
+ def test_interface(self):
+ """
+ Interface is completed and conforming.
+ """
+ try:
+ verifyObject(ICalendarStore, self.calendarStore)
+ except BrokenMethodImplementation, e:
+ self.fail(e)
def test_init(self):
"""
@@ -202,6 +205,26 @@
None
)
+ def test_calendarHomeWithUID_create(self):
+ """
+ Create missing calendar home.
+ """
+ calendarHome = self.calendarStore.calendarHomeWithUID(
+ "xyzzy",
+ create=True
+ )
+
+ self.failUnless(isinstance(calendarHome, CalendarHome))
+ self.failUnless(calendarHome.path.isdir())
+
+ def test_calendarHomeWithUID_create_exists(self):
+ """
+ Create missing calendar home.
+ """
+ calendarHome = self.calendarStore.calendarHomeWithUID("home1")
+
+ self.failUnless(isinstance(calendarHome, CalendarHome))
+
def test_calendarHomeWithUID_dot(self):
"""
Filenames starting with "." are reserved by this
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/icalendarstore.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/icalendarstore.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/icalendarstore.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -39,6 +39,7 @@
"InternalDataStoreError",
# Classes
+ "ICalendarStore",
"ICalendarHome",
"ICalendar",
"ICalendarObject",
@@ -125,6 +126,23 @@
# Interfaces
#
+class ICalendarStore(Interface):
+ """
+ Calendar store
+ """
+ def calendarHomeWithUID(uid, create=False):
+ """
+ Retrieve the calendar home for the principal with the given
+ C{uid}.
+
+ If C{create} is true, create the calendar home if it doesn't
+ already exist.
+
+ @return: an L{ICalendarHome} or C{None} if no such calendar
+ home exists.
+ """
+
+
class ICalendarHome(Interface):
"""
Calendar home
@@ -134,6 +152,14 @@
includes both calendars owned by the principal as well as
calendars that have been shared with and accepts by the principal.
"""
+ # FIXME: We need a principal interface somewhere, possibly part of
+ # an idirectory rework. IDirectoryRecord may be close...
+ #def owner():
+ # """
+ # Retrieve the owner principal for this calendar home.
+ # @return: a ???
+ # """
+
def uid():
"""
Retrieve the unique identifier for this calendar home.
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/resource.py (from rev 5438, CalendarServer/trunk/txcaldav/resource.py)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/resource.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txcaldav/resource.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,151 @@
+##
+# Copyright (c) 2010 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+"""
+CalDAV resources.
+"""
+
+__all__ = [
+ "CalDAVResource",
+ "CalendarHomeResource",
+ "CalendarCollectionResource",
+ "CalendarObjectResource",
+ "ScheduleInboxResource",
+ "ScheduleOutboxResource",
+]
+
+
+import urllib
+
+from twext.python.log import LoggingMixIn
+from twext.web2.http_headers import MimeType
+from twext.web2.http import RedirectResponse, Response
+from twext.web2.stream import MemoryStream
+
+from twistedcaldav import config
+from twistedcaldav.extensions import DAVResource
+
+
+class CalDAVResource(DAVResource, LoggingMixIn):
+ """
+ CalDAV resource.
+ """
+ def davComplianceClasses(self):
+ return (
+ tuple(super(CalDAVResource, self).davComplianceClasses())
+ + config.CalDAVComplianceClasses
+ )
+
+ supportedCalendarComponentSet = caldavxml.SupportedCalendarComponentSet(
+ *[caldavxml.CalendarComponent(name=item) for item in allowedComponents]
+ )
+
+
+class CalendarHomeResource(CalDAVResource):
+ """
+ Calendar home resource.
+
+ This resource is backed by an L{ICalendarHome} implementation.
+ """
+
+
+class CalendarCollectionResource(CalDAVResource):
+ """
+ Calendar collection resource.
+
+ This resource is backed by an L{ICalendar} implementation.
+ """
+ #
+ # HTTP
+ #
+
+ def render(self, request):
+ if config.EnableMonolithicCalendars:
+ #
+ # Send listing instead of iCalendar data to HTML agents
+ # This is mostly useful for debugging...
+ #
+ # FIXME: Add a self-link to the dirlist with a query string so
+ # users can still download the actual iCalendar data?
+ #
+ # FIXME: Are there better ways to detect this than hacking in
+ # user agents?
+ #
+ # FIXME: In the meantime, make this a configurable regex list?
+ #
+ agent = request.headers.getHeader("user-agent")
+ if agent is not None and (
+ agent.startswith("Mozilla/") and agent.find("Gecko") != -1
+ ):
+ renderAsHTML = True
+ else:
+ renderAsHTML = False
+ else:
+ renderAsHTML = True
+
+ if not renderAsHTML:
+ # Render a monolithic iCalendar file
+ if request.path[-1] != "/":
+ # Redirect to include trailing '/' in URI
+ return RedirectResponse(request.unparseURL(path=urllib.quote(urllib.unquote(request.path), safe=':/')+'/'))
+
+ def _defer(data):
+ response = Response()
+ response.stream = MemoryStream(str(data))
+ response.headers.setHeader("content-type", MimeType.fromString("text/calendar"))
+ return response
+
+ d = self.iCalendarRolledup(request)
+ d.addCallback(_defer)
+ return d
+
+ return super(CalDAVResource, self).render(request)
+
+ #
+ # WebDAV
+ #
+
+ liveProperties = DAVResource.liveProperties + (
+ (dav_namespace, "owner"), # Private Events needs this but it is also OK to return empty
+ (caldav_namespace, "supported-calendar-component-set"),
+ (caldav_namespace, "supported-calendar-data" ),
+ )
+
+
+
+
+class CalendarObjectResource(CalDAVResource):
+ """
+ Calendar object resource.
+
+ This resource is backed by an L{ICalendarObject} implementation.
+ """
+
+
+class ScheduleInboxResource(CalDAVResource):
+ """
+ Schedule inbox resource.
+
+ This resource is backed by an XXXXXXX implementation.
+ """
+
+
+class ScheduleOutboxResource(CalDAVResource):
+ """
+ Schedule outbox resource.
+
+ This resource is backed by an XXXXXXX implementation.
+ """
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/idav.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/idav.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/idav.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -20,6 +20,7 @@
__all__ = [
"PropertyStoreError",
+ "PropertyChangeNotAllowedError",
"IPropertyName",
"IPropertyStore",
]
@@ -29,12 +30,28 @@
from zope.interface.common.mapping import IMapping
+#
+# Exceptions
+#
+
class PropertyStoreError(RuntimeError):
"""
Property store error.
"""
+class PropertyChangeNotAllowedError(PropertyStoreError):
+ """
+ Property cannot be edited.
+ """
+ def __init__(self, message, keys):
+ PropertyStoreError.__init__(self, message)
+ self.keys = keys
+
+#
+# Interfaces
+#
+
class IPropertyName(Interface):
"""
Property name.
@@ -58,12 +75,12 @@
are not persisted until C{flush()} is called, and can be undone
using C{abort()}.
"""
- def flush(self):
+ def flush():
"""
Write out any pending changes.
"""
- def abort(self):
+ def abort():
"""
Abort any pending changes.
"""
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/base.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/base.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/base.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -20,6 +20,7 @@
__all__ = [
"AbstractPropertyStore",
+ "PropertyName",
]
from zope.interface import implements
@@ -42,7 +43,7 @@
if (index is -1 or not len(sname) > index or not sname[0] == "{"):
raise TypeError("Invalid sname: %r" % (sname,))
- return (sname[1:index], sname[index+1:])
+ return PropertyName(sname[1:index], sname[index+1:])
def __init__(self, namespace, name):
self.namespace = namespace
@@ -167,3 +168,10 @@
def update(other=None):
# FIXME
raise NotImplementedError()
+
+
+# FIXME: Actually, we should replace this with calls to IPropertyName()
+def validKey(key):
+ # Used by implementations to verify that keys are valid
+ if not isinstance(key, PropertyName):
+ raise TypeError("Not a PropertyName: %r" % (key,))
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/none.py (from rev 5438, CalendarServer/trunk/txdav/propertystore/none.py)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/none.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/none.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,89 @@
+##
+# Copyright (c) 2010 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+"""
+Property store with no storage.
+"""
+
+from __future__ import absolute_import
+
+__all__ = [
+ "PropertyStore",
+]
+
+from txdav.propertystore.base import AbstractPropertyStore, validKey
+from txdav.idav import PropertyChangeNotAllowedError
+
+
+class PropertyStore(AbstractPropertyStore):
+ """
+ Property store with no storage.
+ """
+ def __init__(self):
+ self.modified = {}
+
+ def __str__(self):
+ return "<%s>" % (self.__class__.__name__,)
+
+ #
+ # Accessors
+ #
+
+ def __delitem__(self, key):
+ validKey(key)
+
+ if key in self.modified:
+ del self.modified[key]
+ else:
+ raise KeyError(key)
+
+ def __getitem__(self, key):
+ validKey(key)
+
+ if key in self.modified:
+ return self.modified[key]
+ else:
+ raise KeyError(key)
+
+ def __contains__(self, key):
+ validKey(key)
+
+ return key in self.modified
+
+ def __setitem__(self, key, value):
+ validKey(key)
+
+ self.modified[key] = value
+
+ def __iter__(self):
+ return (k for k in self.modified)
+
+ def __len__(self):
+ return len(self.modified)
+
+ #
+ # I/O
+ #
+
+ def flush(self):
+ if self.modified:
+ raise PropertyChangeNotAllowedError(
+ "None property store cannot flush changes.",
+ keys = self.modified.keys()
+ )
+
+ def abort(self):
+ self.modified.clear()
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/test/base.py (from rev 5438, CalendarServer/trunk/txdav/propertystore/test/base.py)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/test/base.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/test/base.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,153 @@
+##
+# Copyright (c) 2010 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+"""
+Generic property store tests.
+"""
+
+__all__ = [
+ "PropertyStoreTest",
+ "propertyName",
+ "propertyValue",
+]
+
+
+from zope.interface.verify import verifyObject, BrokenMethodImplementation
+
+from twisted.trial import unittest
+
+from twext.web2.dav import davxml
+
+from txdav.idav import IPropertyStore
+from txdav.propertystore.base import PropertyName
+
+
+class PropertyStoreTest(unittest.TestCase):
+ # Subclass must define self.propertyStore in setUp().
+
+ def test_interface(self):
+ try:
+ verifyObject(IPropertyStore, self.propertyStore)
+ except BrokenMethodImplementation, e:
+ self.fail(e)
+
+ def test_set_get_contains(self):
+ store = self.propertyStore
+
+ name = propertyName("test")
+ value = propertyValue("Hello, World!")
+
+ store[name] = value
+ self.assertEquals(store.get(name, None), value)
+ self.failUnless(name in store)
+
+ def test_delete_get_contains(self):
+ store = self.propertyStore
+
+ name = propertyName("test")
+ value = propertyValue("Hello, World!")
+
+ store[name] = value
+ del store[name]
+ self.assertEquals(store.get(name, None), None)
+ self.failIf(name in store)
+
+ def test_iteration(self):
+ store = self.propertyStore
+
+ value = propertyValue("Hello, World!")
+
+ names = set(propertyName(str(i)) for i in (1,2,3,4))
+
+ for name in names:
+ store[name] = value
+
+ self.assertEquals(set(store.keys()), names)
+ self.assertEquals(len(store), len(names))
+
+ def test_delete_none(self):
+ def doDelete():
+ del self.propertyStore[propertyName("xyzzy")]
+
+ self.assertRaises(KeyError, doDelete)
+
+ def test_keyInPropertyName(self):
+ store = self.propertyStore
+
+ def doGet():
+ store["xyzzy"]
+
+ def doSet():
+ store["xyzzy"] = propertyValue("Hello, World!")
+
+ def doDelete():
+ del store["xyzzy"]
+
+ def doContains():
+ "xyzzy" in store
+
+ self.assertRaises(TypeError, doGet)
+ self.assertRaises(TypeError, doSet)
+ self.assertRaises(TypeError, doDelete)
+ self.assertRaises(TypeError, doContains)
+
+ def test_flush(self):
+ store = self.propertyStore
+
+ name = propertyName("test")
+ value = propertyValue("Hello, World!")
+
+ #
+ # Set value flushes correctly
+ #
+ store[name] = value
+
+ store.flush()
+ store.abort()
+
+ self.assertEquals(store.get(name, None), value)
+ self.assertEquals(len(store), 1)
+
+ #
+ # Deleted value flushes correctly
+ #
+ del store[name]
+
+ store.flush()
+ store.abort()
+
+ self.assertEquals(store.get(name, None), None)
+ self.assertEquals(len(store), 0)
+
+ def test_abort(self):
+ store = self.propertyStore
+
+ name = propertyName("test")
+ value = propertyValue("Hello, World!")
+
+ store[name] = value
+
+ store.abort()
+
+ self.assertEquals(store.get(name, None), None)
+ self.assertEquals(len(store), 0)
+
+
+def propertyName(name):
+ return PropertyName("http://calendarserver.org/ns/test/", name)
+
+def propertyValue(value):
+ return davxml.ResponseDescription(value)
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/test/test_base.py (from rev 5438, CalendarServer/trunk/txdav/propertystore/test/test_base.py)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/test/test_base.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/test/test_base.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,55 @@
+##
+# Copyright (c) 2010 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+"""
+Property store tests.
+"""
+
+from zope.interface.verify import verifyObject, BrokenMethodImplementation
+
+from twisted.trial import unittest
+
+from txdav.idav import IPropertyName
+from txdav.propertystore.base import PropertyName
+
+
+class PropertyNameTest(unittest.TestCase):
+ def test_interface(self):
+ name = PropertyName("http://calendarserver.org/", "bleargh")
+ try:
+ verifyObject(IPropertyName, name)
+ except BrokenMethodImplementation, e:
+ self.fail(e)
+
+ def test_init(self):
+ name = PropertyName("http://calendarserver.org/", "bleargh")
+
+ self.assertEquals(name.namespace, "http://calendarserver.org/")
+ self.assertEquals(name.name, "bleargh")
+
+ def test_fromString(self):
+ name = PropertyName.fromString("{http://calendarserver.org/}bleargh")
+
+ self.assertEquals(name.namespace, "http://calendarserver.org/")
+ self.assertEquals(name.name, "bleargh")
+
+ def test_toString(self):
+ name = PropertyName("http://calendarserver.org/", "bleargh")
+
+ self.assertEquals(
+ name.toString(),
+ "{http://calendarserver.org/}bleargh"
+ )
Copied: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/test/test_none.py (from rev 5438, CalendarServer/trunk/txdav/propertystore/test/test_none.py)
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/test/test_none.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/test/test_none.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -0,0 +1,57 @@
+##
+# Copyright (c) 2010 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+"""
+Property store tests.
+"""
+
+from txdav.idav import PropertyChangeNotAllowedError
+from txdav.propertystore.none import PropertyStore
+from txdav.propertystore.test.base import propertyName, propertyValue
+
+from txdav.propertystore.test import base
+
+
+class PropertyStoreTest(base.PropertyStoreTest):
+ def setUp(self):
+ self.propertyStore = PropertyStore()
+
+ def test_flush(self):
+ store = self.propertyStore
+
+ # Flushing no changes is ok
+ store.flush()
+
+ name = propertyName("test")
+ value = propertyValue("Hello, World!")
+
+ store[name] = value
+
+ # Flushing changes isn't allowed
+ self.assertRaises(PropertyChangeNotAllowedError, store.flush)
+
+ # Changes are still here
+ self.assertEquals(store.get(name, None), value)
+
+ # Flushing no changes is ok
+ del store[name]
+ store.flush()
+
+ self.assertEquals(store.get(name, None), None)
+
+ def test_abort(self):
+ super(PropertyStoreTest, self).test_abort()
+ self.assertEquals(self.propertyStore.modified, {})
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/test/test_xattr.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/test/test_xattr.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/test/test_xattr.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -18,35 +18,45 @@
Property store tests.
"""
-from zope.interface.verify import verifyObject, BrokenMethodImplementation
+from twext.python.filepath import FilePath
-#from twext.python.filepath import CachingFilePath as FilePath
-from twisted.trial import unittest
+from txdav.propertystore.base import PropertyName
+from txdav.propertystore.test.base import propertyName
-from txdav.idav import IPropertyStore
-from txdav.propertystore.xattr import PropertyStore
+from txdav.propertystore.test import base
+try:
+ from txdav.propertystore.xattr import PropertyStore
+ from xattr import xattr
+except ImportError, e:
+ PropertyStore = None
+ importErrorMessage = str(e)
-class PropertyStoreTest(unittest.TestCase):
- def test_interface(self):
- raise NotImplementedError()
- store = PropertyStore()
+class PropertyStoreTest(base.PropertyStoreTest):
+ def setUp(self):
+ tempDir = FilePath(self.mktemp())
+ tempDir.makedirs()
+ tempFile = tempDir.child("test")
+ tempFile.touch()
+ self.propertyStore = PropertyStore(tempFile)
- try:
- verifyObject(IPropertyStore, store)
- except BrokenMethodImplementation, e:
- self.fail(e)
- test_interface.todo = "Unimplemented"
-
def test_init(self):
- raise NotImplementedError()
- test_init.todo = "Unimplemented"
+ store = self.propertyStore
+ self.failUnless(isinstance(store.attrs, xattr))
+ self.assertEquals(store.removed, set())
+ self.assertEquals(store.modified, {})
- def test_flush(self):
- raise NotImplementedError()
- test_flush.todo = "Unimplemented"
+ def test_abort(self):
+ super(PropertyStoreTest, self).test_abort()
+ store = self.propertyStore
+ self.assertEquals(store.removed, set())
+ self.assertEquals(store.modified, {})
- def test_abort(self):
- raise NotImplementedError()
- test_abort.todo = "Unimplemented"
+
+if PropertyStore is None:
+ PropertyStoreTest.skip = importErrorMessage
+
+
+def propertyName(name):
+ return PropertyName("http://calendarserver.org/ns/test/", name)
Modified: CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/xattr.py
===================================================================
--- CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/xattr.py 2010-04-07 15:13:45 UTC (rev 5438)
+++ CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/propertystore/xattr.py 2010-04-07 19:27:31 UTC (rev 5439)
@@ -29,15 +29,11 @@
import urllib
from zlib import compress, decompress, error as ZlibError
from cPickle import UnpicklingError, loads as unpickle
+from xattr import xattr
-import xattr
-
-if getattr(xattr, "xattr", None) is None:
- raise ImportError("wrong xattr package imported")
-
from twext.web2.dav.davxml import WebDAVDocument
-from txdav.propertystore.base import AbstractPropertyStore, PropertyName
+from txdav.propertystore.base import AbstractPropertyStore, PropertyName, validKey
from txdav.idav import PropertyStoreError
@@ -88,7 +84,7 @@
def __init__(self, path):
self.path = path
- self.attrs = xattr.xattr(path.path)
+ self.attrs = xattr(path.path)
self.removed = set()
self.modified = {}
@@ -100,11 +96,18 @@
#
def __delitem__(self, key):
+ validKey(key)
+
if key in self.modified:
del self.modified[key]
+ elif self._encodeKey(key) not in self.attrs:
+ raise KeyError(key)
+
self.removed.add(key)
def __getitem__(self, key):
+ validKey(key)
+
if key in self.modified:
return self.modified[key]
@@ -157,6 +160,8 @@
return doc.root_element
def __contains__(self, key):
+ validKey(key)
+
if key in self.modified:
return True
if key in self.removed:
@@ -164,6 +169,8 @@
return self._encodeKey(key) in self.attrs
def __setitem__(self, key, value):
+ validKey(key)
+
if key in self.removed:
self.removed.remove(key)
self.modified[key] = value
@@ -182,7 +189,11 @@
yield key
def __len__(self):
- return len(self.attrs)
+ keys = (
+ set(self.attrs.keys()) |
+ set(self._encodeKey(key) for key in self.modified)
+ )
+ return len(keys)
#
# I/O
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20100407/7cdb78e6/attachment-0001.html>
More information about the calendarserver-changes
mailing list