[CalendarServer-changes] [9937] CalendarServer/branches/users/cdaboo/ischedule-dkim

source_changes at macosforge.org source_changes at macosforge.org
Mon Oct 15 12:23:10 PDT 2012


Revision: 9937
          http://trac.calendarserver.org//changeset/9937
Author:   cdaboo at apple.com
Date:     2012-10-15 12:23:10 -0700 (Mon, 15 Oct 2012)
Log Message:
-----------
Merge from trunk.

Modified Paths:
--------------
    CalendarServer/branches/users/cdaboo/ischedule-dkim/calendarserver/tap/caldav.py
    CalendarServer/branches/users/cdaboo/ischedule-dkim/calendarserver/tap/test/test_util.py
    CalendarServer/branches/users/cdaboo/ischedule-dkim/calendarserver/tap/util.py
    CalendarServer/branches/users/cdaboo/ischedule-dkim/contrib/performance/loadtest/config.plist
    CalendarServer/branches/users/cdaboo/ischedule-dkim/contrib/performance/loadtest/ical.py
    CalendarServer/branches/users/cdaboo/ischedule-dkim/support/Makefile.Apple
    CalendarServer/branches/users/cdaboo/ischedule-dkim/support/version.py
    CalendarServer/branches/users/cdaboo/ischedule-dkim/twext/web2/dav/resource.py
    CalendarServer/branches/users/cdaboo/ischedule-dkim/twext/web2/stream.py
    CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/scheduling/icaldiff.py
    CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/scheduling/implicit.py
    CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/scheduling/itip.py
    CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/stdconfig.py
    CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/storebridge.py
    CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/base/propertystore/base.py
    CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/caldav/datastore/file.py
    CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/caldav/datastore/sql.py
    CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/caldav/datastore/test/common.py
    CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/caldav/datastore/util.py
    CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/common/datastore/sql.py

Property Changed:
----------------
    CalendarServer/branches/users/cdaboo/ischedule-dkim/


Property changes on: CalendarServer/branches/users/cdaboo/ischedule-dkim
___________________________________________________________________
Modified: svn:mergeinfo
   - /CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/q:9560-9688
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/wsanchez/transations:5515-5593
/CalendarServer/trunk:9747-9883
   + /CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/q:9560-9688
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/wsanchez/transations:5515-5593
/CalendarServer/trunk:9747-9936

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/calendarserver/tap/caldav.py
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/calendarserver/tap/caldav.py	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/calendarserver/tap/caldav.py	2012-10-15 19:23:10 UTC (rev 9937)
@@ -75,7 +75,7 @@
 from twistedcaldav.stdconfig import DEFAULT_CONFIG, DEFAULT_CONFIG_FILE
 from twistedcaldav.upgrade import UpgradeFileSystemFormatService, PostDBImportService
 
-from calendarserver.tap.util import pgServiceFromConfig, getDBPool
+from calendarserver.tap.util import pgServiceFromConfig, getDBPool, MemoryLimitService
 
 from twext.enterprise.ienterprise import POSTGRES_DIALECT
 from twext.enterprise.ienterprise import ORACLE_DIALECT
@@ -1207,6 +1207,11 @@
         s.processMonitor = monitor
         monitor.setServiceParent(s)
 
+        if config.MemoryLimiter.Enabled:
+            memoryLimiter = MemoryLimitService(monitor, config.MemoryLimiter.Seconds,
+                config.MemoryLimiter.Bytes, config.MemoryLimiter.ResidentOnly)
+            memoryLimiter.setServiceParent(s)
+
         for name, pool in config.Memcached.Pools.items():
             if pool.ServerEnabled:
                 self.log_info(

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/calendarserver/tap/test/test_util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/calendarserver/tap/test/test_util.py	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/calendarserver/tap/test/test_util.py	2012-10-15 19:23:10 UTC (rev 9937)
@@ -14,10 +14,11 @@
 # limitations under the License.
 ##
 
-from calendarserver.tap.util import computeProcessCount, directoryFromConfig
+from calendarserver.tap.util import computeProcessCount, directoryFromConfig, MemoryLimitService
 from twistedcaldav.test.util import TestCase
 from twistedcaldav.config import config
 from twistedcaldav.directory.augment import AugmentXMLDB
+from twisted.internet.task import Clock
 
 class ProcessCountTestCase(TestCase):
 
@@ -50,6 +51,8 @@
                 computeProcessCount(min, perCPU, perGB, cpuCount=cpu, memSize=mem)
             )
 
+
+
 class UtilTestCase(TestCase):
 
     def test_directoryFromConfig(self):
@@ -62,3 +65,78 @@
             # augmentService set to AugmentXMLDB
             if hasattr(service, "augmentService"):
                 self.assertTrue(isinstance(service.augmentService, AugmentXMLDB))
+
+
+
+# Stub classes for MemoryLimitServiceTestCase
+
+class StubProtocol(object):
+    def __init__(self, transport):
+        self.transport = transport
+
+
+
+class StubProcess(object):
+    def __init__(self, pid):
+        self.pid = pid
+
+
+
+class StubProcessMonitor(object):
+    def __init__(self, processes, protocols):
+        self.processes = processes
+        self.protocols = protocols
+        self.history = []
+
+
+    def stopProcess(self, name):
+        self.history.append(name)
+
+
+
+class MemoryLimitServiceTestCase(TestCase):
+
+    def test_checkMemory(self):
+        """
+        Set up stub objects to verify MemoryLimitService.checkMemory( )
+        only stops the processes whose memory usage exceeds the configured
+        limit, and skips memcached
+        """
+        data = {
+            # PID : (name, resident memory-in-bytes, virtual memory-in-bytes)
+            101 : ("process #1", 10, 1010),
+            102 : ("process #2", 30, 1030),
+            103 : ("process #3", 50, 1050),
+            99  : ("memcached-Default", 10, 1010),
+        }
+
+        processes = []
+        protocols = {}
+        for pid, (name, _ignore_resident, _ignore_virtual) in data.iteritems():
+            protocols[name] = StubProtocol(StubProcess(pid))
+            processes.append(name)
+        processMonitor = StubProcessMonitor(processes, protocols)
+        clock = Clock()
+        service = MemoryLimitService(processMonitor, 10, 15, True, reactor=clock)
+
+        # For testing, use a stub implementation of memory-usage lookup
+        def testMemoryForPID(pid, residentOnly):
+            return data[pid][1 if residentOnly else 2]
+        service._memoryForPID = testMemoryForPID
+
+        # After 5 seconds, nothing should have happened, since the interval is 10 seconds
+        service.startService()
+        clock.advance(5)
+        self.assertEquals(processMonitor.history, [])
+
+        # After 7 more seconds, processes 2 and 3 should have been stopped since their
+        # memory usage exceeds 10 bytes
+        clock.advance(7)
+        self.assertEquals(processMonitor.history, ['process #2', 'process #3'])
+
+        # Now switch to looking at virtual memory, in which case all 3 processes
+        # should be stopped
+        service._residentOnly = False
+        processMonitor.history = []
+        clock.advance(10)
+        self.assertEquals(processMonitor.history, ['process #1', 'process #2', 'process #3'])

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/calendarserver/tap/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/calendarserver/tap/util.py	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/calendarserver/tap/util.py	2012-10-15 19:23:10 UTC (rev 9937)
@@ -23,12 +23,14 @@
     "getRootResource",
     "getDBPool",
     "FakeRequest",
+    "MemoryLimitService",
 ]
 
 import errno
 import os
 from time import sleep
 from socket import fromfd, AF_UNIX, SOCK_STREAM, socketpair
+import psutil
 
 from twext.python.filepath import CachingFilePath as FilePath
 from twext.python.log import Logger
@@ -37,6 +39,7 @@
 from twext.web2.http_headers import Headers
 from twext.web2.static import File as FileResource
 
+from twisted.application.service import Service
 from twisted.cred.portal import Portal
 from twisted.internet.defer import inlineCallbacks, returnValue
 from twisted.internet import reactor as _reactor
@@ -69,6 +72,7 @@
 from twext.enterprise.ienterprise import ORACLE_DIALECT
 from twext.enterprise.adbapi2 import ConnectionPool, ConnectionPoolConnection
 
+
 try:
     from twistedcaldav.authkerb import NegotiateCredentialFactory
     NegotiateCredentialFactory  # pacify pyflakes
@@ -839,3 +843,96 @@
 
     def addResponseFilter(self, *args, **kwds):
         pass
+
+
+
+def memoryForPID(pid, residentOnly=True):
+    """
+    Return the amount of memory in use for the given process.  If residentOnly is True,
+        then RSS is returned; if False, then virtual memory is returned.
+    @param pid: process id
+    @type pid: C{int}
+    @param residentOnly: Whether only resident memory should be included
+    @type residentOnly: C{boolean}
+    @return: Memory used by process in bytes
+    @rtype: C{int}
+    """
+    memoryInfo = psutil.Process(pid).get_memory_info()
+    return memoryInfo.rss if residentOnly else memoryInfo.vms
+
+
+
+class MemoryLimitService(Service, object):
+    """
+    A service which when paired with a DelayedStartupProcessMonitor will periodically
+    examine the memory usage of the monitored processes and stop any which exceed
+    a configured limit.  Memcached processes are ignored.
+    """
+
+    def __init__(self, processMonitor, intervalSeconds, limitBytes, residentOnly, reactor=None):
+        """
+        @param processMonitor: the DelayedStartupProcessMonitor
+        @param intervalSeconds: how often to check
+        @type intervalSeconds: C{int}
+        @param limitBytes: any monitored process over this limit is stopped
+        @type limitBytes: C{int}
+        @param residentOnly: whether only resident memory should be included
+        @type residentOnly: C{boolean}
+        @param reactor: for testing
+        """
+        self._processMonitor = processMonitor
+        self._seconds = intervalSeconds
+        self._bytes = limitBytes
+        self._residentOnly = residentOnly
+        self._delayedCall = None
+        if reactor is None:
+            from twisted.internet import reactor
+        self._reactor = reactor
+
+        # Unit tests can swap out _memoryForPID
+        self._memoryForPID = memoryForPID
+
+
+    def startService(self):
+        """
+        Start scheduling the memory checks
+        """
+        super(MemoryLimitService, self).startService()
+        self._delayedCall = self._reactor.callLater(self._seconds, self.checkMemory)
+
+
+    def stopService(self):
+        """
+        Stop checking memory
+        """
+        super(MemoryLimitService, self).stopService()
+        if self._delayedCall is not None and self._delayedCall.active():
+            self._delayedCall.cancel()
+            self._delayedCall = None
+
+
+    def checkMemory(self):
+        """
+        Stop any processes monitored by our paired processMonitor whose resident
+        memory exceeds our configured limitBytes.  Reschedule intervalSeconds in
+        the future.
+        """
+        try:
+            for name in self._processMonitor.processes:
+                if name.startswith("memcached"):
+                    continue
+                proto = self._processMonitor.protocols.get(name, None)
+                if proto is not None:
+                    proc = proto.transport
+                    pid = proc.pid
+                    try:
+                        memory = self._memoryForPID(pid, self._residentOnly)
+                    except Exception, e:
+                        log.error("Unable to determine memory usage of PID: %d (%s)" % (pid, e))
+                        continue
+                    if memory > self._bytes:
+                        log.warn("Killing large process: %s PID:%d %s:%d" %
+                            (name, pid, "Resident" if self._residentOnly else "Virtual", memory))
+                        self._processMonitor.stopProcess(name)
+        finally:
+            self._delayedCall = self._reactor.callLater(self._seconds, self.checkMemory)

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/contrib/performance/loadtest/config.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/contrib/performance/loadtest/config.plist	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/contrib/performance/loadtest/config.plist	2012-10-15 19:23:10 UTC (rev 9937)
@@ -257,55 +257,6 @@
 						</dict>
 					</dict>
 
-					<!-- This profile invites new attendees to existing events.
-					     This profile should no longer be used - use RealisticInviter instead. -->
-					<dict>
-						<key>class</key>
-						<string>contrib.performance.loadtest.profiles.Inviter</string>
-
-						<key>params</key>
-						<dict>
-							<key>enabled</key>
-							<false/>
-
-							<!-- Define the frequency at which new invitations will be sent out. -->
-							<key>sendInvitationDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.NormalDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- mu gives the mean of the normal distribution (in seconds). -->
-									<key>mu</key>
-									<integer>60</integer>
-
-									<!-- and sigma gives its standard deviation. -->
-									<key>sigma</key>
-									<integer>5</integer>
-								</dict>
-							</dict>
-
-							<!-- Define the distribution of who will be invited to an event. Each
-								set of credentials loaded by the load tester has an index; samples from this
-								distribution will be added to that index to arrive at the index of some other
-								credentials, which will be the target of the invitation. -->
-							<key>inviteeDistanceDistribution</key>
-							<dict>
-								<key>type</key>
-								<string>contrib.performance.stats.UniformIntegerDistribution</string>
-								<key>params</key>
-								<dict>
-									<!-- The minimum value (inclusive) of the uniform distribution. -->
-									<key>min</key>
-									<integer>-100</integer>
-									<!-- The maximum value (exclusive) of the uniform distribution. -->
-									<key>max</key>
-									<integer>101</integer>
-								</dict>
-							</dict>
-						</dict>
-					</dict>
-
 					<!-- This profile invites some number of new attendees to new events. -->
 					<dict>
 						<key>class</key>
@@ -352,10 +303,10 @@
 								<dict>
 									<!-- The minimum value (inclusive) of the uniform distribution. -->
 									<key>min</key>
-									<integer>-100</integer>
+									<integer>0</integer>
 									<!-- The maximum value (exclusive) of the uniform distribution. -->
 									<key>max</key>
-									<integer>101</integer>
+									<integer>99</integer>
 								</dict>
 							</dict>
 
@@ -385,7 +336,7 @@
 									<integer>6</integer>
 									<!-- maximum -->
 									<key>maximum</key>
-									<real>100</real>
+									<real>60</real>
 								</dict>
 							</dict>
 
@@ -520,7 +471,7 @@
 						<key>params</key>
 						<dict>
 							<key>enabled</key>
-							<true/>
+							<false/>
 
 							<!-- Define the interval (in seconds) at which this profile will use
 								its client to create a new task. -->

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/contrib/performance/loadtest/ical.py
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/contrib/performance/loadtest/ical.py	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/contrib/performance/loadtest/ical.py	2012-10-15 19:23:10 UTC (rev 9937)
@@ -771,7 +771,10 @@
                         if caldavxml.supported_calendar_component_set in nodes:
                             for comp in nodes[caldavxml.supported_calendar_component_set].getchildren():
                                 componentTypes.add(comp.get("name").upper())
-                    
+
+                    if textProps.get(davxml.displayname, None) == "tasks":
+                        # Until we can fix caldavxml.supported_calendar_component_set
+                        break                    
                     changeTag = davxml.sync_token if self.supportSync else csxml.getctag
                     calendars.append(Calendar(
                             nodeType.tag,

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/support/Makefile.Apple
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/support/Makefile.Apple	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/support/Makefile.Apple	2012-10-15 19:23:10 UTC (rev 9937)
@@ -59,16 +59,17 @@
 PyGreSQL-4.0::          $(BuildDirectory)/PyGreSQL-4.0
 sqlparse-0.1.2::        $(BuildDirectory)/sqlparse-0.1.2
 setproctitle-1.1.6::	$(BuildDirectory)/setproctitle-1.1.6
+psutil-0.6.1::		$(BuildDirectory)/psutil-0.6.1
 $(Project)::            $(BuildDirectory)/$(Project)
 
-build:: PyKerberos pycalendar PyGreSQL-4.0 sqlparse-0.1.2 setproctitle-1.1.6 $(Project)
+build:: PyKerberos pycalendar PyGreSQL-4.0 sqlparse-0.1.2 setproctitle-1.1.6 psutil-0.6.1 $(Project)
 
 setup:
 	$(_v) ./run -g
 
-prep:: setup CalDAVTester.tgz PyKerberos.tgz pycalendar.tgz PyGreSQL-4.0.tgz sqlparse-0.1.2.tgz setproctitle-1.1.6.tgz
+prep:: setup CalDAVTester.tgz PyKerberos.tgz pycalendar.tgz PyGreSQL-4.0.tgz sqlparse-0.1.2.tgz setproctitle-1.1.6.tgz psutil-0.6.1.tgz
 
-PyKerberos pycalendar PyGreSQL-4.0 sqlparse-0.1.2 setproctitle-1.1.6 $(Project)::
+PyKerberos pycalendar PyGreSQL-4.0 sqlparse-0.1.2 setproctitle-1.1.6 psutil-0.6.1 $(Project)::
 	@echo "Building $@..."
 	$(_v) cd $(BuildDirectory)/$@ && $(Environment) $(PYTHON) setup.py build
 
@@ -79,6 +80,7 @@
 	$(_v) cd $(BuildDirectory)/PyGreSQL-4.0       && $(Environment) $(PYTHON) setup.py install $(PY_INSTALL_FLAGS)
 	$(_v) cd $(BuildDirectory)/sqlparse-0.1.2     && $(Environment) $(PYTHON) setup.py install $(PY_INSTALL_FLAGS)
 	$(_v) cd $(BuildDirectory)/setproctitle-1.1.6 && $(Environment) $(PYTHON) setup.py install $(PY_INSTALL_FLAGS)
+	$(_v) cd $(BuildDirectory)/psutil-0.6.1       && $(Environment) $(PYTHON) setup.py install $(PY_INSTALL_FLAGS)
 	$(_v) for so in $$(find "$(DSTROOT)$(PY_HOME)/lib" -type f -name '*.so'); do $(STRIP) -Sx "$${so}"; done 
 	$(_v) $(INSTALL_DIRECTORY) "$(DSTROOT)$(SIPP)$(ETCDIR)$(CALDAVDSUBDIR)"
 	$(_v) $(INSTALL_FILE) "$(Sources)/conf/caldavd-apple.plist" "$(DSTROOT)$(SIPP)$(ETCDIR)$(CALDAVDSUBDIR)/caldavd.plist"

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/support/version.py
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/support/version.py	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/support/version.py	2012-10-15 19:23:10 UTC (rev 9937)
@@ -44,7 +44,7 @@
             continue
 
         if branch == "trunk":
-            base_version = "trunk"
+            base_version += "-trunk"
         elif branch.endswith("-dev"):
             base_version += "-dev"
 

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/twext/web2/dav/resource.py
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/twext/web2/dav/resource.py	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/twext/web2/dav/resource.py	2012-10-15 19:23:10 UTC (rev 9937)
@@ -2095,7 +2095,11 @@
             returnValue(None)
         while (url != "/"):
             url = parentForURL(url)
+            if url is None:
+                break
             parent = (yield request.locateResource(url))
+            if parent is None:
+                break
             if parent.hasQuotaRoot(request):
                 returnValue(parent)
 

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/twext/web2/stream.py
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/twext/web2/stream.py	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/twext/web2/stream.py	2012-10-15 19:23:10 UTC (rev 9937)
@@ -763,7 +763,8 @@
             return
         
         if isinstance(data, Deferred):
-            self.deferred = data.addCallbacks(self._doWrite, self.stopProducing)
+            self.deferred = data
+            self.deferred.addCallbacks(self._doWrite, self.stopProducing)
         else:
             self._doWrite(data)
 

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/scheduling/icaldiff.py
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/scheduling/icaldiff.py	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/scheduling/icaldiff.py	2012-10-15 19:23:10 UTC (rev 9937)
@@ -335,7 +335,7 @@
                     if self._attendeeDecline(overridden):
                         changeCausesReply = True
                         changedRids.append(rid.getText() if rid else "")
-                        
+
                     # When a master component is present we keep the missing override in place but mark it as hidden.
                     # When no master is present we remove the override,
                     if exdatesnew is not None:
@@ -430,7 +430,7 @@
                     if self._attendeeDecline(overridden):
                         changeCausesReply = True
                         changedRids.append(decline.getText() if decline else "")
-                        
+
                     # When a master component is present we keep the missing override in place but mark it as hidden.
                     # When no master is present we remove the override,
                     if exdatesnew is not None:

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/scheduling/implicit.py
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/scheduling/implicit.py	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/scheduling/implicit.py	2012-10-15 19:23:10 UTC (rev 9937)
@@ -1033,7 +1033,7 @@
                 if not changeAllowed:
                     if self.calendar.hasPropertyValueInAllComponents(Property("STATUS", "CANCELLED")):
                         log.debug("Attendee '%s' is creating CANCELLED event for mismatched UID: '%s' - removing entire event" % (self.attendee, self.uid,))
-                        self.return_status = ImplicitScheduler.STATUS_ORPHANED_CANCELLED_EVENT
+                        self.return_status = ImplicitScheduler.STATUS_ORPHANED_EVENT
                         returnValue(None)
                     else:
                         log.error("Attendee '%s' is not allowed to make an unauthorized change to an organized event: UID:%s" % (self.attendeePrincipal, self.uid,))
@@ -1043,6 +1043,13 @@
                             "Attendee changes are not allowed",
                         ))
 
+                # Check that the return calendar actually has any components left - this can happen if a cancelled
+                # component is removed and replaced by another cancelled or invalid one
+                if self.calendar.mainType() is None:
+                    log.debug("Attendee '%s' is replacing CANCELLED event: '%s' - removing entire event" % (self.attendee, self.uid,))
+                    self.return_status = ImplicitScheduler.STATUS_ORPHANED_EVENT
+                    returnValue(None)
+
                 if not doITipReply:
                     log.debug("Implicit - attendee '%s' is updating UID: '%s' but change is not significant" % (self.attendee, self.uid))
                     returnValue(None)

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/scheduling/itip.py
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/scheduling/itip.py	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/scheduling/itip.py	2012-10-15 19:23:10 UTC (rev 9937)
@@ -256,7 +256,7 @@
                     # Existing component is cancelled.
                     overridden.replaceProperty(Property("STATUS", "CANCELLED"))
                     newseq = component.propertyValue("SEQUENCE")
-                    overridden.replacePropertyInAllComponents(Property("SEQUENCE", newseq))
+                    overridden.replaceProperty(Property("SEQUENCE", newseq))
 
             elif calendar_master:
                 # We are trying to CANCEL a non-overridden instance.

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/stdconfig.py
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/stdconfig.py	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/stdconfig.py	2012-10-15 19:23:10 UTC (rev 9937)
@@ -491,6 +491,15 @@
         },
     },
 
+    # How large a spawned process is allowed to get before it's stopped
+    "MemoryLimiter" : {
+        "Enabled" : True,
+        "Seconds" : 60, # How often to check memory sizes (in seconds)
+        "Bytes"   : 2 * 1024 * 1024 * 1024, # Memory limit (RSS in bytes)
+        "ResidentOnly" : True,  # True: only take into account resident memory;
+                                # False: include virtual memory
+    },
+
     #
     # Service ACLs
     #

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/storebridge.py
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/storebridge.py	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/twistedcaldav/storebridge.py	2012-10-15 19:23:10 UTC (rev 9937)
@@ -1634,6 +1634,8 @@
 
         stream = ProducerStream()
         class StreamProtocol(Protocol):
+            def connectionMade(self):
+                stream.registerProducer(self.transport, False)
             def dataReceived(self, data):
                 stream.write(data)
             def connectionLost(self, reason):
@@ -1643,7 +1645,6 @@
         except IOError, e:
             log.error("Unable to read attachment: %s, due to: %s" % (self, e,))
             raise HTTPError(responsecode.NOT_FOUND)
-            
         return Response(OK, {"content-type":self.contentType()}, stream)
 
 

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/base/propertystore/base.py
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/base/propertystore/base.py	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/base/propertystore/base.py	2012-10-15 19:23:10 UTC (rev 9937)
@@ -26,7 +26,7 @@
 from twext.python.log import LoggingMixIn
 from txdav.xml import element as davxml
 from txdav.xml.base import encodeXMLName
-from twext.web2.dav.resource import TwistedGETContentMD5,\
+from twext.web2.dav.resource import TwistedGETContentMD5, \
     TwistedQuotaRootProperty
 
 from txdav.idav import IPropertyStore, IPropertyName
@@ -48,12 +48,14 @@
         if (index is -1 or not len(sname) > index or not sname[0] == "{"):
             raise TypeError("Invalid sname: %r" % (sname,))
 
-        return PropertyName(sname[1:index], sname[index+1:])
+        return PropertyName(sname[1:index], sname[index + 1:])
 
+
     @staticmethod
     def fromElement(element):
         return PropertyName(element.namespace, element.name)
 
+
     def __init__(self, namespace, name):
         self.namespace = namespace
         self.name = name
@@ -89,10 +91,12 @@
             self.toString(),
         )
 
+
     def toString(self):
         return encodeXMLName(self.namespace, self.name)
 
 
+
 class AbstractPropertyStore(LoggingMixIn, DictMixin):
     """
     Base property store.
@@ -121,7 +125,7 @@
         @param shareeUser: the per user uid or None if the same as defaultUser
         @type shareeUser: C{str}
         """
-        
+
         assert(defaultUser is not None or shareeUser is not None)
         self._defaultUser = shareeUser if defaultUser is None else defaultUser
         self._perUser = defaultUser if shareeUser is None else shareeUser
@@ -132,13 +136,20 @@
     def __str__(self):
         return "<%s>" % (self.__class__.__name__)
 
+
+    def _setDefaultUserUID(self, uid):
+        self._defaultUser = uid
+
+
     def _setPerUserUID(self, uid):
         self._perUser = uid
 
+
     def setSpecialProperties(self, shadowableKeys, globalKeys):
         self._shadowableKeys.update(shadowableKeys)
         self._globalKeys.update(globalKeys)
 
+
     #
     # Subclasses must override these
     #
@@ -146,30 +157,37 @@
     def _getitem_uid(self, key, uid):
         raise NotImplementedError()
 
+
     def _setitem_uid(self, key, value, uid):
         raise NotImplementedError()
 
+
     def _delitem_uid(self, key, uid):
         raise NotImplementedError()
 
+
     def _keys_uid(self, uid):
         raise NotImplementedError()
-    
+
+
     def _removeResource(self):
         raise NotImplementedError()
 
+
     def flush(self):
         raise NotImplementedError()
 
+
     def abort(self):
         raise NotImplementedError()
 
+
     #
     # Required UserDict implementations
     #
 
     def __getitem__(self, key):
-        # Handle per-user behavior 
+        # Handle per-user behavior
         if self.isShadowableProperty(key):
             try:
                 result = self._getitem_uid(key, self._perUser)
@@ -181,22 +199,25 @@
         else:
             return self._getitem_uid(key, self._perUser)
 
+
     def __setitem__(self, key, value):
-        # Handle per-user behavior 
+        # Handle per-user behavior
         if self.isGlobalProperty(key):
             return self._setitem_uid(key, value, self._defaultUser)
         else:
             return self._setitem_uid(key, value, self._perUser)
 
+
     def __delitem__(self, key):
-        # Handle per-user behavior 
+        # Handle per-user behavior
         if self.isGlobalProperty(key):
             self._delitem_uid(key, self._defaultUser)
         else:
             self._delitem_uid(key, self._perUser)
 
+
     def keys(self):
-        
+
         userkeys = list(self._keys_uid(self._perUser))
         if self._defaultUser != self._perUser:
             defaultkeys = self._keys_uid(self._defaultUser)
@@ -205,6 +226,7 @@
                     userkeys.append(key)
         return tuple(userkeys)
 
+
     def update(self, other):
         # FIXME: direct tests.
         # FIXME: support positional signature (although since strings aren't
@@ -212,20 +234,25 @@
         for key in other:
             self[key] = other[key]
 
+
     # Per-user property handling
     def isShadowableProperty(self, key):
         return key in self._shadowableKeys
-    
+
+
     def isGlobalProperty(self, key):
         return key in self._globalKeys
 
+
     def copyAllProperties(self, other):
         """
         Copy all the properties from another store into this one. This needs to be done
         independently of the UID. Each underlying store will need to implement this.
         """
         pass
-        
+
+
+
 # FIXME: Actually, we should replace this with calls to IPropertyName()
 def validKey(key):
     # Used by implementations to verify that keys are valid

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/caldav/datastore/file.py
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/caldav/datastore/file.py	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/caldav/datastore/file.py	2012-10-15 19:23:10 UTC (rev 9937)
@@ -34,8 +34,6 @@
 
 from twisted.internet.defer import inlineCallbacks, returnValue, succeed, fail
 
-from twisted.python.failure import Failure
-
 from twext.python.vcomponent import VComponent
 from txdav.xml import element as davxml
 from txdav.xml.rfc2518 import ResourceType, GETContentType
@@ -56,7 +54,7 @@
     IndexSchedule as OldInboxIndex
 from txdav.caldav.datastore.util import (
     validateCalendarComponent, dropboxIDFromCalendarObject, CalendarObjectBase,
-    StorageTransportBase
+    StorageTransportBase, AttachmentRetrievalTransport
 )
 
 from txdav.common.datastore.file import (
@@ -737,6 +735,7 @@
     def write(self, data):
         # FIXME: multiple chunks
         self._file.write(data)
+        return super(AttachmentStorageTransport, self).write(data)
 
 
     def loseConnection(self):
@@ -801,12 +800,7 @@
 
 
     def retrieve(self, protocol):
-        # FIXME: makeConnection
-        # FIXME: actually stream
-        # FIMXE: connectionLost
-        protocol.dataReceived(self._path.getContent())
-        # FIXME: ConnectionDone, not NotImplementedError
-        protocol.connectionLost(Failure(NotImplementedError()))
+        return AttachmentRetrievalTransport(self._path).start(protocol)
 
 
     @property

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/caldav/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/caldav/datastore/sql.py	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/caldav/datastore/sql.py	2012-10-15 19:23:10 UTC (rev 9937)
@@ -32,9 +32,7 @@
 from twext.python.filepath import CachingFilePath
 
 from twisted.internet.defer import inlineCallbacks, returnValue
-from twisted.internet.error import ConnectionLost
 from twisted.python import hashlib
-from twisted.python.failure import Failure
 
 from twistedcaldav import caldavxml, customxml
 from twistedcaldav.caldavxml import ScheduleCalendarTransp, Opaque
@@ -80,6 +78,8 @@
 from pycalendar.duration import PyCalendarDuration
 from pycalendar.timezone import PyCalendarTimezone
 
+from txdav.caldav.datastore.util import AttachmentRetrievalTransport
+
 from zope.interface.declarations import implements
 
 import os
@@ -1531,8 +1531,7 @@
 
 
     def retrieve(self, protocol):
-        protocol.dataReceived(self._path.getContent())
-        protocol.connectionLost(Failure(ConnectionLost()))
+        return AttachmentRetrievalTransport(self._path).start(protocol)
 
 
     _removeStatement = Delete(

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/caldav/datastore/test/common.py
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/caldav/datastore/test/common.py	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/caldav/datastore/test/common.py	2012-10-15 19:23:10 UTC (rev 9937)
@@ -2080,24 +2080,21 @@
         t.write(sampleData)
         yield t.loseConnection()
         yield self.exceedQuotaTest(get)
+        @inlineCallbacks
         def checkOriginal():
-            catch = StringIO()
-            catch.dataReceived = catch.write
-            lost = []
-            catch.connectionLost = lost.append
-            attachment.retrieve(catch)
+            actual = yield self.attachmentToString(attachment)
             expected = sampleData
             # note: 60 is less than len(expected); trimming is just to make
             # the error message look sane when the test fails.
-            actual = catch.getvalue()[:60]
+            actual = actual[:60]
             self.assertEquals(actual, expected)
-        checkOriginal()
+        yield checkOriginal()
         yield self.commit()
         # Make sure that things go back to normal after a commit of that
         # transaction.
         obj = yield self.calendarObjectUnderTest()
         attachment = yield get()
-        checkOriginal()
+        yield checkOriginal()
 
 
     def test_removeAttachmentWithName(self, refresh=lambda x:x):

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/caldav/datastore/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/caldav/datastore/util.py	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/caldav/datastore/util.py	2012-10-15 19:23:10 UTC (rev 9937)
@@ -45,6 +45,11 @@
     InternalDataStoreError, HomeChildNameAlreadyExistsError
 )
 from txdav.base.datastore.util import normalizeUUIDOrNot
+from twisted.protocols.basic import FileSender
+from twisted.internet.interfaces import ITransport
+from twisted.internet.interfaces import IConsumer
+from twisted.internet.error import ConnectionLost
+from twisted.internet.task import LoopingCall
 
 log = Logger()
 
@@ -219,7 +224,7 @@
                 if exists is None:
                     newattachment = yield outObject.createAttachmentWithName(name)
                     transport = newattachment.store(ctype)
-                    proto =_AttachmentMigrationProto(transport)
+                    proto = _AttachmentMigrationProto(transport)
                     attachment.retrieve(proto)
                     yield proto.done
 
@@ -262,18 +267,18 @@
     # usual suspects.
     contentTypes.update(
         {
-            '.conf':  'text/plain',
-            '.diff':  'text/plain',
-            '.exe':   'application/x-executable',
-            '.flac':  'audio/x-flac',
-            '.java':  'text/plain',
-            '.ogg':   'application/ogg',
-            '.oz':    'text/x-oz',
-            '.swf':   'application/x-shockwave-flash',
-            '.tgz':   'application/x-gtar',
-            '.wml':   'text/vnd.wap.wml',
-            '.xul':   'application/vnd.mozilla.xul+xml',
-            '.py':    'text/plain',
+            '.conf': 'text/plain',
+            '.diff': 'text/plain',
+            '.exe': 'application/x-executable',
+            '.flac': 'audio/x-flac',
+            '.java': 'text/plain',
+            '.ogg': 'application/ogg',
+            '.oz': 'text/x-oz',
+            '.swf': 'application/x-shockwave-flash',
+            '.tgz': 'application/x-gtar',
+            '.wml': 'text/vnd.wap.wml',
+            '.xul': 'application/vnd.mozilla.xul+xml',
+            '.py': 'text/plain',
             '.patch': 'text/plain',
         }
     )
@@ -299,9 +304,15 @@
         self.storeTransport = storeTransport
         self.done = Deferred()
 
+
+    def connectionMade(self):
+        self.storeTransport.registerProducer(self.transport, False)
+
+
     def dataReceived(self, data):
         self.storeTransport.write(data)
 
+
     @inlineCallbacks
     def connectionLost(self, reason):
         try:
@@ -462,14 +473,34 @@
         Create a storage transport with a reference to an L{IAttachment} and a
         L{twext.web2.http_headers.MimeType}.
         """
+        from twisted.internet import reactor
+        self._clock = reactor
         self._attachment = attachment
         self._contentType = contentType
+        self._producer = None
 
         # Make sure we have some kind of contrent-type
         if self._contentType is None:
             self._contentType = http_headers.MimeType.fromString(getType(self._attachment.name(), self.contentTypes))
 
 
+    def write(self, data):
+        """
+        Children must override this to actually write the data, but should
+        upcall this implementation to interact properly with producers.
+        """
+        if self._producer and self._streamingProducer:
+            # XXX this needs to be in a callLater because otherwise
+            # resumeProducing will call write which will call resumeProducing
+            # (etc) forever.
+            self._clock.callLater(0, self._producer.resumeProducing)
+
+
+    def registerProducer(self, producer, streaming):
+        self._producer = producer
+        self._streamingProducer = streaming
+
+
     def getPeer(self):
         return StorageTransportAddress(self._attachment, False)
 
@@ -482,7 +513,84 @@
         return self.write(''.join(seq))
 
 
+    def stopProducing(self):
+        return self.loseConnection()
 
+
+
+class AttachmentRetrievalTransport(FileSender, object):
+    """
+    The transport for a protocol that does L{IAttachment.retrieve}.
+    """
+    implements(ITransport)
+
+    def __init__(self, filePath):
+        from twisted.internet import reactor
+        self.filePath = filePath
+        self.clock = reactor
+
+
+    def start(self, protocol):
+        this = self
+        class Consumer(object):
+            implements(IConsumer)
+            def registerProducer(self, producer, streaming):
+                protocol.makeConnection(producer)
+                this._maybeLoopDelivery()
+            def write(self, data):
+                protocol.dataReceived(data)
+            def unregisterProducer(self):
+                this._done(protocol)
+        self.beginFileTransfer(self.filePath.open(), Consumer())
+
+
+    def _done(self, protocol):
+        if self._deliveryLoop:
+            self._deliveryLoop.stop()
+        protocol.connectionLost(Failure(ConnectionLost()))
+
+
+    def write(self, data):
+        raise NotImplemented("This is a read-only transport.")
+
+
+    def writeSequence(self, datas):
+        self.write("".join(datas))
+
+
+    def loseConnection(self):
+        pass
+
+
+    def getPeer(self):
+        return self
+
+
+    def getHost(self):
+        return self
+
+    _everResumedProducing = False
+
+    def resumeProducing(self):
+        self._everResumedProducing = True
+        super(AttachmentRetrievalTransport, self).resumeProducing()
+
+    _deliveryLoop = None
+
+    def _maybeLoopDelivery(self):
+        """
+        If no consumer was registered (as inferred by the fact that
+        resumeProducing() wasn't called)
+        """
+        if not self._everResumedProducing:
+            # Not registered as a streaming producer.
+            def deliverNextChunk():
+                super(AttachmentRetrievalTransport, self).resumeProducing()
+            self._deliveryLoop = LoopingCall(deliverNextChunk)
+            self._deliveryLoop.start(0.01, True)
+
+
+
 def fixOneCalendarObject(component):
     """
     Correct the properties which may contain a user's directory UUID within a

Modified: CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/common/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/common/datastore/sql.py	2012-10-12 17:26:54 UTC (rev 9936)
+++ CalendarServer/branches/users/cdaboo/ischedule-dkim/txdav/common/datastore/sql.py	2012-10-15 19:23:10 UTC (rev 9937)
@@ -80,7 +80,7 @@
 
 from txdav.common.icommondatastore import ConcurrentModification
 from twistedcaldav.customxml import NotificationType
-from twistedcaldav.dateops import datetimeMktime, parseSQLTimestamp,\
+from twistedcaldav.dateops import datetimeMktime, parseSQLTimestamp, \
     pyCalendarTodatetime
 
 from txdav.base.datastore.util import normalizeUUIDOrNot
@@ -217,6 +217,7 @@
         self._migrating = state
         self._enableNotifications = not state
 
+
     def setUpgrading(self, state):
         """
         Set the "upgrading" state
@@ -224,17 +225,19 @@
         self._enableNotifications = not state
 
 
+
 class TransactionStatsCollector(object):
     """
     Used to log each SQL query and statistics about that query during the course of a single transaction.
     Results can be printed out where ever needed at the end of the transaction.
     """
-    
+
     def __init__(self, label, logFileName=None):
         self.label = label
         self.logFileName = logFileName
         self.statements = []
-    
+
+
     def startStatement(self, sql, args):
         """
         Called prior to an SQL query being run.
@@ -243,7 +246,7 @@
         @type sql: C{str}
         @param args: the arguments (binds) to the SQL statement
         @type args: C{list}
-        
+
         @return: C{tuple} containing the index in the statement list for this statement, and the start time
         """
         args = ["%s" % (arg,) for arg in args]
@@ -251,6 +254,7 @@
         self.statements.append(["%s %s" % (sql, args,), 0, 0])
         return len(self.statements) - 1, time.time()
 
+
     def endStatement(self, context, rows):
         """
         Called after an SQL query has executed.
@@ -263,12 +267,13 @@
         index, tstamp = context
         self.statements[index][1] = len(rows) if rows else 0
         self.statements[index][2] = time.time() - tstamp
-        
+
+
     def printReport(self):
         """
         Print a report of all the SQL statements executed to date.
         """
-        
+
         toFile = StringIO()
         toFile.write("*** SQL Stats ***\n")
         toFile.write("\n")
@@ -283,18 +288,20 @@
             toFile.write("Rows: %s\n" % (rows,))
             toFile.write("Time (ms): %.3f\n" % (t,))
         toFile.write("***\n\n")
-        
+
         if self.logFileName:
             open(self.logFileName, "a").write(toFile.getvalue())
         else:
             log.error(toFile.getvalue())
 
+
+
 class CommonStoreTransactionMonitor(object):
     """
     Object that monitors the state of a transaction over time and logs or times out
     the transaction.
     """
-    
+
     callLater = reactor.callLater
 
     def __init__(self, txn, logTimerSeconds, timeoutSeconds):
@@ -303,13 +310,14 @@
         self.delayedTimeout = None
         self.logTimerSeconds = logTimerSeconds
         self.timeoutSeconds = timeoutSeconds
-        
+
         self.txn.postCommit(self._cleanTxn)
         self.txn.postAbort(self._cleanTxn)
-        
+
         self._installLogTimer()
         self._installTimeout()
-    
+
+
     def _cleanTxn(self):
         self.txn = None
         if self.delayedLog:
@@ -319,6 +327,7 @@
             self.delayedTimeout.cancel()
             self.delayedTimeout = None
 
+
     def _installLogTimer(self):
         def _logTransactionWait():
             if self.txn is not None:
@@ -327,7 +336,8 @@
 
         if self.logTimerSeconds:
             self.delayedLog = self.callLater(self.logTimerSeconds, _logTransactionWait)
-    
+
+
     def _installTimeout(self):
         def _forceAbort():
             if self.txn is not None:
@@ -439,11 +449,12 @@
     def _calendarserver(cls): #@NoSelf
         cs = schema.CALENDARSERVER
         return Select(
-            [cs.VALUE,],
+            [cs.VALUE, ],
             From=cs,
             Where=cs.NAME == Parameter('name'),
         )
 
+
     @inlineCallbacks
     def calendarserverValue(self, key):
         result = yield self._calendarserver.on(self, name=key)
@@ -507,6 +518,7 @@
         """
         return NotificationCollection.notificationsWithUID(self, uid, create)
 
+
     @classproperty
     def _insertAPNSubscriptionQuery(cls): #@NoSelf
         apn = schema.APN_SUBSCRIPTIONS
@@ -648,9 +660,11 @@
     def isNotifiedAlready(self, obj):
         return obj in self._notifiedAlready
 
+
     def notificationAddedForObject(self, obj):
         self._notifiedAlready.add(obj)
 
+
     def isBumpedAlready(self, obj):
         """
         Indicates whether or not bumpAddedForObject has already been
@@ -659,6 +673,7 @@
         """
         return obj in self._bumpedAlready
 
+
     def bumpAddedForObject(self, obj):
         """
         Records the fact that a bumpModified( ) call has already been
@@ -754,13 +769,14 @@
             # caller shouldn't be paying attention anyway.
             block.end()
 
+
     @inlineCallbacks
     def execSQL(self, *a, **kw):
         """
         Execute some SQL (delegate to L{IAsyncTransaction}).
         """
-        if self._stats:        
-            statsContext = self._stats.startStatement(a[0], a[1])
+        if self._stats:
+            statsContext = self._stats.startStatement(a[0], a[1] if len(a) > 1 else ())
         self.currentStatement = a[0]
         if self._store.logTransactionWaits and a[0].split(" ", 1)[0].lower() in ("insert", "update", "delete",):
             self.iudCount += 1
@@ -773,18 +789,19 @@
             results = (yield self._sqlTxn.execSQL(*a, **kw))
         finally:
             self.currentStatement = None
-            if self._stats:        
+            if self._stats:
                 self._stats.endStatement(statsContext, results)
         returnValue(results)
 
+
     @inlineCallbacks
     def execSQLBlock(self, sql):
         """
         Execute a block of SQL by parsing it out into individual statements and execute
         each of those.
-        
+
         FIXME: temporary measure for handling large schema upgrades. This should NOT be used
-        for regular SQL operations - only upgrades. 
+        for regular SQL operations - only upgrades.
         """
         parsed = parse(sql)
         for stmt in parsed:
@@ -795,6 +812,7 @@
             stmt = str(stmt).rstrip(";")
             yield self.execSQL(stmt)
 
+
     def commit(self):
         """
         Commit the transaction and execute any post-commit hooks.
@@ -816,7 +834,7 @@
         co = schema.CALENDAR_OBJECT
         cb = schema.CALENDAR_BIND
         tr = schema.TIME_RANGE
-        kwds = { }
+        kwds = {}
         if limited:
             kwds["Limit"] = Parameter("batchSize")
         return Select(
@@ -828,9 +846,9 @@
             ],
             From=ch.join(co).join(cb).join(tr),
             Where=(
-                ch.RESOURCE_ID == cb.CALENDAR_HOME_RESOURCE_ID     ).And(
-                tr.CALENDAR_OBJECT_RESOURCE_ID == co.RESOURCE_ID   ).And(
-                cb.CALENDAR_RESOURCE_ID == tr.CALENDAR_RESOURCE_ID ).And(
+                ch.RESOURCE_ID == cb.CALENDAR_HOME_RESOURCE_ID).And(
+                tr.CALENDAR_OBJECT_RESOURCE_ID == co.RESOURCE_ID).And(
+                cb.CALENDAR_RESOURCE_ID == tr.CALENDAR_RESOURCE_ID).And(
                 cb.BIND_MODE == _BIND_MODE_OWN
             ),
             GroupBy=(
@@ -864,7 +882,7 @@
             if cutoff < truncateLowerLimit:
                 raise ValueError("Cannot query events older than %s" % (truncateLowerLimit.getText(),))
 
-        kwds = { "CutOff" : pyCalendarTodatetime(cutoff) }
+        kwds = {"CutOff": pyCalendarTodatetime(cutoff)}
         if batchSize is not None:
             kwds["batchSize"] = batchSize
             query = self._oldEventsLimited
@@ -943,20 +961,27 @@
         count = 0
         for dropboxID, path in results:
             attachment = Attachment(self, dropboxID, path)
-            (yield attachment.remove( ))
+            (yield attachment.remove())
             count += 1
         returnValue(count)
 
 
+
 class _EmptyCacher(object):
+
     def set(self, key, value):
         return succeed(True)
+
+
     def get(self, key, withIdentifier=False):
         return succeed(None)
+
+
     def delete(self, key):
         return succeed(True)
 
 
+
 class CommonHome(LoggingMixIn):
 
     # All these need to be initialized by derived classes for each store type
@@ -969,10 +994,10 @@
     _notifierPrefix = None
     _revisionsTable = None
     _notificationRevisionsTable = NOTIFICATION_OBJECT_REVISIONS_TABLE
-    
+
     _dataVersionKey = None
     _dataVersionValue = None
-    
+
     _cacher = None  # Initialize in derived classes
 
     def __init__(self, transaction, ownerUID, notifiers):
@@ -1017,6 +1042,7 @@
         return Select([home.RESOURCE_ID],
                       From=home, Where=home.OWNER_UID == Parameter("ownerUID"))
 
+
     @classproperty
     def _ownerFromResourceID(cls): #@NoSelf
         home = cls._homeSchema
@@ -1024,6 +1050,7 @@
                       From=home,
                       Where=home.RESOURCE_ID == Parameter("resourceID"))
 
+
     @classproperty
     def _metaDataQuery(cls): #@NoSelf
         metadata = cls._homeMetaDataSchema
@@ -1031,6 +1058,7 @@
                       From=metadata,
                       Where=metadata.RESOURCE_ID == Parameter("resourceID"))
 
+
     @inlineCallbacks
     def initFromStore(self, no_cache=False):
         """
@@ -1183,13 +1211,14 @@
         self._childrenLoaded = True
         returnValue(results)
 
+
     def listChildren(self):
         """
         Retrieve the names of the children in this home.
 
         @return: an iterable of C{str}s.
         """
-        
+
         if self._childrenLoaded:
             return succeed(self._children.keys())
         else:
@@ -1201,7 +1230,7 @@
         Retrieve the names of the invited children in this home.
 
         @return: an iterable of C{str}s.
-        """ 
+        """
         return self._childClass.listInvitedObjects(self)
 
 
@@ -1216,6 +1245,7 @@
         """
         return self._childClass.objectWithName(self, name)
 
+
     @memoizedKey("resourceID", "_children")
     def childWithID(self, resourceID):
         """
@@ -1227,6 +1257,7 @@
         """
         return self._childClass.objectWithID(self, resourceID)
 
+
     def invitedChildWithName(self, name):
         """
         Retrieve the invited child with the given C{name} contained in this
@@ -1237,6 +1268,7 @@
         """
         return self._childClass.invitedObjectWithName(self, name)
 
+
     @inlineCallbacks
     def createChildWithName(self, name):
         if name.startswith("."):
@@ -1246,6 +1278,7 @@
         child = (yield self.childWithName(name))
         returnValue(child)
 
+
     @inlineCallbacks
     def removeChildWithName(self, name):
         child = yield self.childWithName(name)
@@ -1380,12 +1413,12 @@
                                     rev.RESOURCE_ID == shareID)).on(self._txn))
                     if name
                 ]
-    
+
                 for path, name, wasdeleted in results:
                     if wasdeleted:
                         if sharetoken:
                             deleted.append("%s/%s" % (path, name,))
-    
+
                 for path, name, wasdeleted in results:
                     changed.append("%s/%s" % (path, name,))
 
@@ -1551,7 +1584,7 @@
                                            resourceID=self._resourceID)
             self._quotaUsedBytes = 0
 
-    
+
     def addNotifier(self, notifier):
         if self._notifiers is None:
             self._notifiers = ()
@@ -1575,6 +1608,7 @@
         else:
             returnValue(None)
 
+
     @classproperty
     def _lockLastModifiedQuery(cls): #@NoSelf
         meta = cls._homeMetaDataSchema
@@ -1585,6 +1619,7 @@
             NoWait=True
         )
 
+
     @classproperty
     def _changeLastModifiedQuery(cls): #@NoSelf
         meta = cls._homeMetaDataSchema
@@ -1592,6 +1627,7 @@
                       Where=meta.RESOURCE_ID == Parameter("resourceID"),
                       Return=meta.MODIFIED)
 
+
     @inlineCallbacks
     def bumpModified(self):
         """
@@ -1614,7 +1650,7 @@
             yield self._lockLastModifiedQuery.on(subtxn, resourceID=self._resourceID)
             result = (yield self._changeLastModifiedQuery.on(subtxn, resourceID=self._resourceID))
             returnValue(result)
-            
+
         try:
             self._modified = (yield self._txn.subtransaction(_bumpModified, retries=0, failureOK=True))[0][0]
             queryCacher = self._txn._queryCacher
@@ -1625,6 +1661,7 @@
         except AllRetriesFailed:
             log.debug("CommonHome.bumpModified failed")
 
+
     @inlineCallbacks
     def notifyChanged(self):
         """
@@ -1664,6 +1701,7 @@
         _ignore_uuid, revision = token.split("_", 1)
         return int(revision)
 
+
     @inlineCallbacks
     def syncToken(self):
         if self._syncTokenRevision is None:
@@ -1690,7 +1728,7 @@
 
     @inlineCallbacks
     def resourceNamesSinceToken(self, token):
-        
+
         if token is None:
             token = 0
         elif isinstance(token, str):
@@ -1702,7 +1740,7 @@
             (yield self._objectNamesSinceRevisionQuery.on(
                 self._txn, revision=token, resourceID=self._resourceID))
         ]
-        results.sort(key=lambda x:x[1])
+        results.sort(key=lambda x: x[1])
 
         changed = []
         deleted = []
@@ -1913,16 +1951,16 @@
                 self._syncTokenRevision = rows[0][0]
             else:
                 action = "insert"
-        
+
         if action == "insert":
             # Note that an "insert" may happen for a resource that previously
             # existed and then was deleted. In that case an entry in the
             # REVISIONS table still exists so we have to detect that and do db
             # INSERT or UPDATE as appropriate
 
-            found = bool( (
+            found = bool((
                 yield self._insertFindPreviouslyNamedQuery.on(
-                    self._txn, resourceID=self._resourceID, name=name)) )
+                    self._txn, resourceID=self._resourceID, name=name)))
             if found:
                 self._syncTokenRevision = (
                     yield self._updatePreviouslyNamedQuery.on(
@@ -1957,19 +1995,19 @@
 
     _objectResourceClass = None
 
-    _bindSchema              = None
-    _homeSchema              = None
-    _homeChildSchema          = None
+    _bindSchema = None
+    _homeSchema = None
+    _homeChildSchema = None
     _homeChildMetaDataSchema = None
-    _revisionsSchema          = None
-    _objectSchema              = None
+    _revisionsSchema = None
+    _objectSchema = None
 
-    _bindTable           = None
-    _homeChildTable      = None
-    _homeChildBindTable  = None
-    _revisionsTable      = None
-    _revisionsBindTable  = None
-    _objectTable         = None
+    _bindTable = None
+    _homeChildTable = None
+    _homeChildBindTable = None
+    _revisionsTable = None
+    _revisionsBindTable = None
+    _objectTable = None
 
 
     def __init__(self, home, name, resourceID, mode, status, message=None, ownerHome=None):
@@ -1981,20 +2019,20 @@
         else:
             notifiers = None
 
-        self._home              = home
-        self._name              = name
-        self._resourceID        = resourceID
-        self._bindMode          = mode
-        self._bindStatus         = status
-        self._bindMessage         = message
-        self._ownerHome         = home if ownerHome is None else ownerHome
-        self._created           = None
-        self._modified          = None
-        self._objects           = {}
-        self._objectNames       = None
+        self._home = home
+        self._name = name
+        self._resourceID = resourceID
+        self._bindMode = mode
+        self._bindStatus = status
+        self._bindMessage = message
+        self._ownerHome = home if ownerHome is None else ownerHome
+        self._created = None
+        self._modified = None
+        self._objects = {}
+        self._objectNames = None
         self._syncTokenRevision = None
-        self._notifiers         = notifiers
-        self._index             = None  # Derived classes need to set this
+        self._notifiers = notifiers
+        self._index = None  # Derived classes need to set this
 
 
     @classproperty
@@ -2013,14 +2051,15 @@
         different child classes to have their own type specific data, but still make use of the
         common base logic.
         """
-        
+
         # Common behavior is to have created and modified
-        
+
         return (
             cls._homeChildMetaDataSchema.CREATED,
             cls._homeChildMetaDataSchema.MODIFIED,
         )
-        
+
+
     @classmethod
     def metadataAttributes(cls):
         """
@@ -2028,14 +2067,15 @@
         different child classes to have their own type specific data, but still make use of the
         common base logic.
         """
-        
+
         # Common behavior is to have created and modified
-        
+
         return (
             "_created",
             "_modified",
         )
-        
+
+
     @classmethod
     @inlineCallbacks
     def listObjects(cls, home):
@@ -2057,6 +2097,7 @@
         return cls._bindFor((bind.HOME_RESOURCE_ID == Parameter("homeID"))
                             .And(bind.BIND_STATUS != _BIND_STATUS_ACCEPTED))
 
+
     @classmethod
     @inlineCallbacks
     def listInvitedObjects(cls, home):
@@ -2099,11 +2140,11 @@
     def _updateBindColumnsQuery(cls, columnMap): #@NoSelf
         bind = cls._bindSchema
         return Update(columnMap,
-                      Where=
-                      (bind.RESOURCE_ID == Parameter("resourceID"))
+                      Where=(bind.RESOURCE_ID == Parameter("resourceID"))
                       .And(bind.HOME_RESOURCE_ID == Parameter("homeID")),
                       Return=bind.RESOURCE_NAME)
 
+
     @classproperty
     def _updateBindQuery(cls): #@NoSelf
         bind = cls._bindSchema
@@ -2137,10 +2178,10 @@
         @return: the name of the shared calendar in the new calendar home.
         @rtype: L{str}
         """
-        
+
         if status is None:
             status = _BIND_STATUS_ACCEPTED
-        
+
         @inlineCallbacks
         def doInsert(subt):
             newName = str(uuid4())
@@ -2160,7 +2201,7 @@
                 mode=mode, status=status, message=message,
                 resourceID=self._resourceID, homeID=shareeHome._resourceID
             ))[0][0]
-                
+
         # Must send notification to ensure cache invalidation occurs
         yield self.notifyChanged()
 
@@ -2200,46 +2241,45 @@
 
         #remove None parameters, and substitute None for empty string
         bind = self._bindSchema
-        columnMap = dict([(k, v if v else None) 
-                          for k,v in {bind.BIND_MODE:mode,
+        columnMap = dict([(k, v if v else None)
+                          for k, v in {bind.BIND_MODE:mode,
                             bind.BIND_STATUS:status,
                             bind.MESSAGE:message,
                             bind.RESOURCE_NAME:name}.iteritems() if v is not None])
-                                
+
         if len(columnMap):
-            
+
             #TODO:  with bit of parameter wrangling, call shareWith() here instead.
             sharedname = yield self._updateBindColumnsQuery(columnMap).on(
                             self._txn,
                             resourceID=self._resourceID, homeID=shareeView._home._resourceID
                         )
-            
+
             #update affected attributes
             if mode:
                 shareeView._bindMode = columnMap[bind.BIND_MODE]
-                
+
             if status:
                 shareeView._bindStatus = columnMap[bind.BIND_STATUS]
                 if shareeView._bindStatus == _BIND_STATUS_ACCEPTED:
                     yield shareeView._initSyncToken()
                 elif shareeView._bindStatus == _BIND_STATUS_DECLINED:
-                    shareeView._deletedSyncToken(sharedRemoval=True);
+                    shareeView._deletedSyncToken(sharedRemoval=True)
 
             if message:
                 shareeView._bindMessage = columnMap[bind.MESSAGE]
-            
+
             queryCacher = self._txn._queryCacher
             if queryCacher:
                 cacheKey = queryCacher.keyForObjectWithName(shareeView._home._resourceID, shareeView._name)
                 queryCacher.invalidateAfterCommit(self._txn, cacheKey)
-            
+
             shareeView._name = sharedname[0][0]
 
             # Must send notification to ensure cache invalidation occurs
             yield self.notifyChanged()
-                    
+
         returnValue(shareeView._name)
-        
 
 
     @inlineCallbacks
@@ -2255,20 +2295,19 @@
 
         @return: a L{Deferred} which will fire with the previously-used name.
         """
-        
-        
+
         #remove sync tokens
         shareeChildren = yield shareeHome.children()
         for shareeChild in shareeChildren:
             if not shareeChild.owned() and shareeChild._resourceID == self._resourceID:
-                shareeChild._deletedSyncToken(sharedRemoval=True);
- 
+                shareeChild._deletedSyncToken(sharedRemoval=True)
+
                 queryCacher = self._txn._queryCacher
                 if queryCacher:
                     cacheKey = queryCacher.keyForObjectWithName(shareeHome._resourceID, shareeChild._name)
                     queryCacher.invalidateAfterCommit(self._txn, cacheKey)
 
-                break;
+                break
 
         bind = self._bindSchema
         rows = yield Delete(
@@ -2278,15 +2317,15 @@
             Return=bind.RESOURCE_NAME,
         ).on(self._txn, resourceID=self._resourceID,
              homeID=shareeHome._resourceID)
-        
+
         resourceName = None
         if rows:
             resourceName = rows[0][0]
             shareeHome._children.pop(resourceName, None)
-        
+
         # Must send notification to ensure cache invalidation occurs
         yield self.notifyChanged()
-       
+
         returnValue(resourceName)
 
 
@@ -2296,30 +2335,35 @@
         """
         return self._bindMode
 
+
     def owned(self):
         """
         @see: L{ICalendar.owned}
         """
         return self._bindMode == _BIND_MODE_OWN
 
+
     def shareStatus(self):
         """
         @see: L{ICalendar.shareStatus}
         """
         return self._bindStatus
 
+
     def shareMessage(self):
         """
         @see: L{ICalendar.shareMessage}
         """
         return self._bindMessage
 
+
     def shareUID(self):
         """
         @see: L{ICalendar.shareUID}
         """
         return self.name()
 
+
     @inlineCallbacks
     def unshare(self, homeType):
         """
@@ -2354,6 +2398,7 @@
                   Where=condition
         )
 
+
     @classproperty
     def _sharedBindForResourceID(cls): #@NoSelf
         bind = cls._bindSchema
@@ -2361,7 +2406,6 @@
                             .And(bind.BIND_STATUS == _BIND_STATUS_ACCEPTED)
                             .And(bind.BIND_MODE != _BIND_MODE_OWN)
                             )
-    
 
 
     @inlineCallbacks
@@ -2408,6 +2452,7 @@
                             .And(bind.BIND_STATUS != _BIND_STATUS_ACCEPTED)
                             )
 
+
     @inlineCallbacks
     def asInvited(self):
         """
@@ -2422,7 +2467,7 @@
         """
         if not self.owned():
             returnValue([])
-        
+
         rows = yield self._invitedBindForResourceID.on(
             self._txn, resourceID=self._resourceID, homeID=self._home._resourceID,
         )
@@ -2459,7 +2504,7 @@
         dataRows = (yield cls._childrenAndMetadataForHomeID.on(home._txn, homeID=home._resourceID))
 
         if dataRows:
-            
+
             # Get property stores for all these child resources (if any found)
             propertyStores = (yield PropertyStore.forMultipleResources(
                 home.uid(), home._txn,
@@ -2481,8 +2526,8 @@
         # Create the actual objects merging in properties
         for items in dataRows:
             bindMode, homeID, resourceID, resourceName, bindStatus, bindMessage = items[:6] #@UnusedVariable
-            metadata=items[7:]
-            
+            metadata = items[6:]
+
             if bindStatus == _BIND_MODE_OWN:
                 ownerHome = home
             else:
@@ -2501,6 +2546,11 @@
                 setattr(child, attr, value)
             child._syncTokenRevision = revisions[resourceID]
             propstore = propertyStores.get(resourceID, None)
+
+            # We have to re-adjust the property store object to account for possible shared
+            # collections as previously we loaded them all as if they were owned
+            if bindStatus != _BIND_MODE_OWN:
+                propstore._setDefaultUserUID(ownerHome.uid())
             yield child._loadPropertyStore(propstore)
             results.append(child)
         returnValue(results)
@@ -2537,7 +2587,7 @@
             returnValue(None)
 
         bindMode, homeID, resourceID, resourceName, bindStatus, bindMessage = rows[0] #@UnusedVariable
-        
+
         #TODO:  combine with _invitedBindForNameAndHomeID and sort results
         ownerHomeID = (yield cls._ownerHomeWithResourceID.on(
                         home._txn, resourceID=resourceID))[0][0]
@@ -2561,6 +2611,7 @@
                                .And(bind.BIND_STATUS == _BIND_STATUS_ACCEPTED)
                                )
 
+
     @classmethod
     @inlineCallbacks
     def objectWithName(cls, home, name):
@@ -2578,40 +2629,40 @@
         """
         rows = None
         queryCacher = home._txn._queryCacher
-        
+
         if queryCacher:
             # Retrieve data from cache
             cacheKey = queryCacher.keyForObjectWithName(home._resourceID, name)
             rows = yield queryCacher.get(cacheKey)
-            
+
         if rows is None:
             # No cached copy
             rows = yield cls._childForNameAndHomeID.on(home._txn, name=name, homeID=home._resourceID)
-                    
+
             if rows:
                 bindMode, homeID, resourceID, resourceName, bindStatus, bindMessage = rows[0] #@UnusedVariable
-                # get ownerHomeID                
+                # get ownerHomeID
                 if bindMode == _BIND_MODE_OWN:
                     ownerHomeID = homeID
                 else:
                     ownerHomeID = (yield cls._ownerHomeWithResourceID.on(
                                     home._txn, resourceID=resourceID))[0][0]
                 rows[0].append(ownerHomeID)
-            
+
             if rows and queryCacher:
                 # Cache the result
                 queryCacher.setAfterCommit(home._txn, cacheKey, rows)
-        
+
         if not rows:
             returnValue(None)
-        
+
         bindMode, homeID, resourceID, resourceName, bindStatus, bindMessage, ownerHomeID = rows[0] #@UnusedVariable
-        
+
         if bindMode == _BIND_MODE_OWN:
             ownerHome = home
         else:
             ownerHome = yield home._txn.homeWithResourceID(home._homeType, ownerHomeID)
-            
+
         child = cls(
             home=home,
             name=name, resourceID=resourceID,
@@ -2632,8 +2683,8 @@
         return cls._bindFor((bind.RESOURCE_ID == Parameter("resourceID"))
                                .And(bind.HOME_RESOURCE_ID == Parameter("homeID"))
                                )
-                               
-                               
+
+
     @classmethod
     @inlineCallbacks
     def objectWithID(cls, home, resourceID):
@@ -2650,9 +2701,9 @@
             home._txn, resourceID=resourceID, homeID=home._resourceID)
         if not rows:
             returnValue(None)
-                
+
         bindMode, homeID, resourceID, resourceName, bindStatus, bindMessage = rows[0] #@UnusedVariable
-        
+
         if bindMode == _BIND_MODE_OWN:
             ownerHome = home
         else:
@@ -2663,7 +2714,7 @@
             home=home,
             name=resourceName, resourceID=resourceID,
             mode=bindMode, status=bindStatus,
-            message=bindMessage,  ownerHome=ownerHome,
+            message=bindMessage, ownerHome=ownerHome,
         )
         yield child.initFromStore()
         returnValue(child)
@@ -2887,9 +2938,9 @@
 
         # Set to non-existent state
         self._resourceID = None
-        self._created    = None
-        self._modified   = None
-        self._objects    = {}
+        self._created = None
+        self._modified = None
+        self._objects = {}
 
         yield self.notifyChanged()
 
@@ -3140,7 +3191,7 @@
 
     @inlineCallbacks
     def removeObjectResourceWithName(self, name):
-        
+
         child = (yield self.objectResourceWithName(name))
         if child is None:
             raise NoSuchObjectResourceError
@@ -3149,24 +3200,26 @@
 
     @inlineCallbacks
     def removeObjectResourceWithUID(self, uid):
-        
+
         child = (yield self.objectResourceWithUID(uid))
         if child is None:
             raise NoSuchObjectResourceError
         yield self._removeObjectResource(child)
 
+
     @inlineCallbacks
     def _removeObjectResource(self, child):
         name = child.name()
         uid = child.uid()
         try:
             yield child.remove()
-        finally:        
+        finally:
             self._objects.pop(name, None)
             self._objects.pop(uid, None)
             yield self._deleteRevision(name)
             yield self.notifyChanged()
 
+
     @classproperty
     def _moveParentUpdateQuery(cls): #@NoSelf
         """
@@ -3178,6 +3231,7 @@
             Where=obj.RESOURCE_ID == Parameter("resourceID")
         )
 
+
     def _movedObjectResource(self, child, newparent):
         """
         Method that subclasses can override to do an extra DB adjustments when a resource
@@ -3185,12 +3239,13 @@
         """
         return succeed(True)
 
+
     @inlineCallbacks
     def moveObjectResource(self, child, newparent):
         """
         Move a child of this collection into another collection without actually removing/re-inserting the data.
         Make sure sync and cache details for both collections are updated.
-        
+
         TODO: check that the resource name does not exist in the new parent, or that the UID
         does not exist there too.
 
@@ -3209,7 +3264,7 @@
         self._objects.pop(child._resourceID, None)
         yield self._deleteRevision(name)
         yield self.notifyChanged()
-        
+
         # Adjust the child to be a child of the new parent and update ancillary tables
         yield self._moveParentUpdateQuery.on(
             self._txn,
@@ -3223,6 +3278,7 @@
         yield newparent._insertRevision(name)
         yield newparent.notifyChanged()
 
+
     def objectResourcesHaveProperties(self):
         return False
 
@@ -3252,6 +3308,7 @@
 
         @param props: the L{PropertyStore} from C{properties()}.
         """
+        pass
 
 
     # IDataStoreObject
@@ -3279,13 +3336,15 @@
         if self._notifiers is None:
             self._notifiers = ()
         self._notifiers += (notifier,)
- 
+
+
     def notifierID(self, label="default"):
         if self._notifiers:
             return self._notifiers[0].getID(label)
         else:
             return None
 
+
     @inlineCallbacks
     def nodeName(self, label="default"):
         if self._notifiers:
@@ -3296,6 +3355,7 @@
         else:
             returnValue(None)
 
+
     @classproperty
     def _lockLastModifiedQuery(cls): #@NoSelf
         schema = cls._homeChildMetaDataSchema
@@ -3306,6 +3366,7 @@
             NoWait=True
         )
 
+
     @classproperty
     def _changeLastModifiedQuery(cls): #@NoSelf
         schema = cls._homeChildMetaDataSchema
@@ -3344,6 +3405,7 @@
         except AllRetriesFailed:
             log.debug("CommonHomeChild.bumpModified failed")
 
+
     @inlineCallbacks
     def notifyChanged(self):
         """
@@ -3361,6 +3423,7 @@
             self._txn.notificationAddedForObject(self)
 
 
+
 class CommonObjectResource(LoggingMixIn, FancyEqMixin):
     """
     Base class for object resources.
@@ -3387,7 +3450,7 @@
         self._created = None
         self._modified = None
         self._objectText = None
-        
+
         self._locked = False
 
 
@@ -3438,6 +3501,7 @@
 
         returnValue(results)
 
+
     @classmethod
     def _allColumnsWithParentAndNames(cls, names): #@NoSelf
         obj = cls._objectSchema
@@ -3458,9 +3522,10 @@
             result_batch = (yield cls._loadAllObjectsWithNames(parent, names[:cls.BATCH_LOAD_SIZE]))
             results.extend(result_batch)
             names = names[cls.BATCH_LOAD_SIZE:]
-        
+
         returnValue(results)
-            
+
+
     @classmethod
     @inlineCallbacks
     def _loadAllObjectsWithNames(cls, parent, names):
@@ -3510,11 +3575,13 @@
         objectResource = cls(parent, name, uid, None)
         return objectResource.initFromStore()
 
+
     @classmethod
     def objectWithID(cls, parent, resourceID):
         objectResource = cls(parent, None, None, resourceID)
         return objectResource.initFromStore()
 
+
     @classmethod
     @inlineCallbacks
     def create(cls, parent, name, component, metadata):
@@ -3525,14 +3592,14 @@
 
         if name.startswith("."):
             raise ObjectResourceNameNotAllowedError(name)
-        
+
         objectResource = cls(parent, name, None, None, metadata=metadata)
         yield objectResource.setComponent(component, inserting=True)
         yield objectResource._loadPropertyStore(created=True)
 
         # Note: setComponent triggers a notification, so we don't need to
         # call notify( ) here like we do for object removal.
-        
+
         returnValue(objectResource)
 
 
@@ -3644,7 +3711,7 @@
         self.initPropertyStore(props)
         self._propertyStore = props
 
-    
+
     def properties(self):
         return self._propertyStore
 
@@ -3691,11 +3758,11 @@
         @type wait: C{bool}
         @param txn: alternative transaction to use
         @type txn: L{CommonStoreTransaction}
-        
+
         @raise: L{NoSuchObjectResourceError} if resource does not exist, other L{Exception}
                 if already locked and NOWAIT is used.
         """
-        
+
         txn = txn if txn is not None else self._txn
         yield self._selectForUpdateQuery(not wait).on(txn, NoSuchObjectResourceError, resourceID=self._resourceID)
         self._locked = True
@@ -3713,6 +3780,7 @@
     def componentType(self):
         returnValue((yield self.component()).mainType())
 
+
     @classproperty
     def _deleteQuery(cls): #@NoSelf
         """
@@ -3737,6 +3805,7 @@
         self._modified = None
         self._objectText = None
 
+
     def uid(self):
         return self._uid
 
@@ -3745,7 +3814,6 @@
         return self._name
 
 
-
     # IDataStoreObject
     def contentType(self):
         raise NotImplementedError()
@@ -3833,12 +3901,10 @@
             notifiers = None
         self._notifiers = notifiers
 
-
     _resourceIDFromUIDQuery = Select(
         [_homeSchema.RESOURCE_ID], From=_homeSchema,
         Where=_homeSchema.OWNER_UID == Parameter("uid"))
 
-
     _provisionNewNotificationsQuery = Insert(
         {_homeSchema.OWNER_UID: Parameter("uid")},
         Return=_homeSchema.RESOURCE_ID
@@ -3920,16 +3986,16 @@
 
     def uid(self):
         return self._uid
-    
 
+
     def owned(self):
         return True
 
 
     def ownerHome(self):
         return self._home
-    
 
+
     def viewerHome(self):
         return self._home
 
@@ -3942,7 +4008,6 @@
         self._notificationNames = sorted([result.name() for result in results])
         returnValue(results)
 
-
     _notificationUIDsForHomeQuery = Select(
         [schema.NOTIFICATION.NOTIFICATION_UID], From=schema.NOTIFICATION,
         Where=schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID ==
@@ -4000,7 +4065,6 @@
     def removeNotificationObjectWithName(self, name):
         return self.removeNotificationObjectWithUID(self._nameToUID(name))
 
-
     _removeByUIDQuery = Delete(
         From=schema.NOTIFICATION,
         Where=(schema.NOTIFICATION.NOTIFICATION_UID == Parameter("uid")).And(
@@ -4015,7 +4079,6 @@
         self._notifications.pop(uid, None)
         yield self._deleteRevision("%s.xml" % (uid,))
 
-
     _initSyncTokenQuery = Insert(
         {
             _revisionsSchema.HOME_RESOURCE_ID : Parameter("resourceID"),
@@ -4031,7 +4094,6 @@
         self._syncTokenRevision = (yield self._initSyncTokenQuery.on(
             self._txn, resourceID=self._resourceID))[0][0]
 
-
     _syncTokenQuery = Select(
         [Max(_revisionsSchema.REVISION)], From=_revisionsSchema,
         Where=_revisionsSchema.HOME_RESOURCE_ID == Parameter("resourceID")
@@ -4056,7 +4118,8 @@
         if self._notifiers is None:
             self._notifiers = ()
         self._notifiers += (notifier,)
- 
+
+
     def notifierID(self, label="default"):
         if self._notifiers:
             return self._notifiers[0].getID(label)
@@ -4112,7 +4175,7 @@
         """
         # Delete NOTIFICATION rows
         no = schema.NOTIFICATION
-        kwds = { "ResourceID" : self._resourceID }
+        kwds = {"ResourceID": self._resourceID}
         yield Delete(
             From=no,
             Where=(
@@ -4130,6 +4193,7 @@
         ).on(self._txn, **kwds)
 
 
+
 class NotificationObject(LoggingMixIn, FancyEqMixin):
 
     implements(INotificationObject)
@@ -4339,7 +4403,6 @@
             self._modified = rows[0][0]
         self._objectText = xmldata
 
-
     _xmlDataFromID = Select(
         [_objectSchema.XML_DATA], From=_objectSchema,
         Where=_objectSchema.RESOURCE_ID == Parameter("resourceID"))
@@ -4368,6 +4431,7 @@
     def size(self):
         return self._size
 
+
     def xmlType(self):
         # NB This is the NotificationType property element
         if isinstance(self._xmlType, str):
@@ -4376,6 +4440,7 @@
 
         return self._xmlType
 
+
     def created(self):
         return datetimeMktime(parseSQLTimestamp(self._created))
 
@@ -4735,6 +4800,3 @@
         # obscure bug.
     else:
         yield t.commit()
-
-
-
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20121015/ef215a0b/attachment-0001.html>


More information about the calendarserver-changes mailing list