[CalendarServer-changes] [4858] CalendarServer/branches/users/cdaboo/deployment-partition-4722

source_changes at macosforge.org source_changes at macosforge.org
Fri Dec 11 11:55:50 PST 2009


Revision: 4858
          http://trac.macosforge.org/projects/calendarserver/changeset/4858
Author:   cdaboo at apple.com
Date:     2009-12-11 11:55:47 -0800 (Fri, 11 Dec 2009)
Log Message:
-----------
Merging r4722 through r4857.

Revision Links:
--------------
    http://trac.macosforge.org/projects/calendarserver/changeset/4722
    http://trac.macosforge.org/projects/calendarserver/changeset/4857

Modified Paths:
--------------
    CalendarServer/branches/users/cdaboo/deployment-partition-4722/bin/caldav_warmup
    CalendarServer/branches/users/cdaboo/deployment-partition-4722/lib-patches/Twisted/twisted.web2.dav.resource.patch
    CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/cluster.py
    CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/config.py
    CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/directory/appleopendirectory.py
    CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/directory/directory.py
    CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/instance.py
    CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/method/put.py
    CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/tap.py

Added Paths:
-----------
    CalendarServer/branches/users/cdaboo/deployment-partition-4722/bin/caldav_cache_update.py
    CalendarServer/branches/users/cdaboo/deployment-partition-4722/contrib/tools/request_monitor
    CalendarServer/branches/users/cdaboo/deployment-partition-4722/lib-patches/Twisted/twisted.internet.tcp.patch
    CalendarServer/branches/users/cdaboo/deployment-partition-4722/lib-patches/Twisted/twisted.python.util.patch

Copied: CalendarServer/branches/users/cdaboo/deployment-partition-4722/bin/caldav_cache_update.py (from rev 4857, CalendarServer/branches/users/wsanchez/deployment/bin/caldav_cache_update.py)
===================================================================
--- CalendarServer/branches/users/cdaboo/deployment-partition-4722/bin/caldav_cache_update.py	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/deployment-partition-4722/bin/caldav_cache_update.py	2009-12-11 19:55:47 UTC (rev 4858)
@@ -0,0 +1,384 @@
+#!/usr/bin/env python
+##
+# Copyright (c) 2006-2009 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+"""
+This tool trawls through the server's data store, reading data.
+
+This is useful for ensuring that any on-demand data format upgrades
+are done.
+
+This tool requires access to the calendar server's configuration and
+data storage.
+"""
+
+import sys
+
+#sys.path.insert(0, "/usr/share/caldavd/lib/python")
+
+from getopt import getopt, GetoptError
+from os.path import dirname, abspath
+from twisted.internet import reactor
+from twisted.internet.address import IPv4Address
+from twisted.internet.defer import inlineCallbacks
+from twisted.python import log
+from twisted.python.reflect import namedClass
+from twistedcaldav import memcachepool
+from twistedcaldav.cache import MemcacheChangeNotifier
+from twistedcaldav.config import config, defaultConfigFile
+from twistedcaldav.directory.directory import DirectoryService, DirectoryRecord
+from twistedcaldav.directory.principal import DirectoryPrincipalResource
+from twistedcaldav.notify import installNotificationClient
+from twistedcaldav.resource import isPseudoCalendarCollectionResource
+from twistedcaldav.static import CalDAVFile, CalendarHomeFile
+import os
+
+CALENDARS_DOCROOT = "_run/main/docs/calendars/"
+
+def loadConfig(configFileName):
+    if configFileName is None:
+        configFileName = defaultConfigFile
+
+    if not os.path.isfile(configFileName):
+        sys.stderr.write("No config file: %s\n" % (configFileName,))
+        sys.exit(1)
+
+    config.loadConfig(configFileName)
+
+    CalendarHomeFile.cacheNotifierFactory = MemcacheChangeNotifier
+    DirectoryPrincipalResource.cacheNotifierFactory = MemcacheChangeNotifier
+
+    memcachepool.installPool(
+        IPv4Address(
+            'TCP',
+            config.Memcached["BindAddress"],
+            config.Memcached["Port"]),
+        config.Memcached["MaxClients"])
+
+    installNotificationClient(
+        config.Notifications["InternalNotificationHost"],
+        config.Notifications["InternalNotificationPort"],
+    )
+
+    return config
+
+def getDirectory():
+    BaseDirectoryService = namedClass(config.DirectoryService["type"])
+
+    class MyDirectoryService (BaseDirectoryService):
+        def getPrincipalCollection(self):
+            if not hasattr(self, "_principalCollection"):
+                #
+                # Instantiating a CalendarHomeProvisioningResource with a directory
+                # will register it with the directory (still smells like a hack).
+                #
+                # We need that in order to locate calendar homes via the directory.
+                #
+                from twistedcaldav.static import CalendarHomeProvisioningFile
+                CalendarHomeProvisioningFile(os.path.join(config.DocumentRoot, "calendars"), self, "/calendars/")
+
+                from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
+                self._principalCollection = DirectoryPrincipalProvisioningResource("/principals/", self)
+
+            return self._principalCollection
+
+        def setPrincipalCollection(self, coll):
+            # See principal.py line 237:  self.directory.principalCollection = self
+            pass
+
+        principalCollection = property(getPrincipalCollection, setPrincipalCollection)
+
+        def calendarHomeForRecord(self, record):
+            principal = self.principalCollection.principalForRecord(record)
+            if principal:
+                try:
+                    return principal._calendarHome()
+                except AttributeError:
+                    pass
+            return None
+
+        def calendarHomeForShortName(self, recordType, shortName):
+            principal = self.principalCollection.principalForShortName(recordType, shortName)
+            if principal:
+                try:
+                    return principal._calendarHome()
+                except AttributeError:
+                    pass
+            return None
+
+        def principalForCalendarUserAddress(self, cua):
+            return self.principalCollection.principalForCalendarUserAddress(cua)
+
+
+    return MyDirectoryService(**config.DirectoryService["params"])
+
+class DummyDirectoryService (DirectoryService):
+    realmName = ""
+    baseGUID = "51856FD4-5023-4890-94FE-4356C4AAC3E4"
+    def recordTypes(self): return ()
+    def listRecords(self): return ()
+    def recordWithShortName(self): return None
+
+dummyDirectoryRecord = DirectoryRecord(
+    service = DummyDirectoryService(),
+    recordType = "dummy",
+    guid = "8EF0892F-7CB6-4B8E-B294-7C5A5321136A",
+    shortName = "dummy",
+    fullName = "Dummy McDummerson",
+    calendarUserAddresses = set(),
+    autoSchedule = False,
+)
+
+class UsageError (StandardError):
+    pass
+
+def usage(e=None):
+    if e:
+        print e
+        print ""
+
+    name = os.path.basename(sys.argv[0])
+    print "usage: %s [options] [input_specifiers]" % (name,)
+    print ""
+    print "Warm up data store by reading everything once."
+    print __doc__
+    print "options:"
+    print "  -h --help: print this help and exit"
+    print "  -f --config: Specify caldavd.plist configuration path"
+    print ""
+    print "input specifiers:"
+    print "  -a --all: add all calendar homes"
+    print "  -H --home: add a calendar home (and all calendars within it)"
+    print "  -r --record: add a directory record's calendar home (format: 'recordType:shortName')"
+    print "  -u --user: add a user's calendar home (shorthand for '-r users:shortName')"
+    print "  --no-icalendar: Don't read iCalendar data"
+    print "  --no-properties: Don't read DAV properties"
+    print "  --no-index: Don't read indexes"
+
+    if e:
+        sys.exit(64)
+    else:
+        sys.exit(0)
+
+def main():
+    try:
+        (optargs, args) = getopt(
+            sys.argv[1:], "c:d:hf:", [
+                "config=",
+                "log=",
+                "calverified=",
+                "docroot=",
+                "help",
+            ],
+        )
+    except GetoptError, e:
+        usage(e)
+
+    configFileName = None
+    logFileName = "/dev/stdout"
+    calverifyLogFileName = None
+    docroot = CALENDARS_DOCROOT
+
+    for opt, arg in optargs:
+        if opt in ("-h", "--help"):
+            usage()
+
+        elif opt in ("-f", "--config"):
+            configFileName = arg
+
+        elif opt in ("--log",):
+            logFileName = arg
+
+        elif opt in ("-c", "--calverified",):
+            calverifyLogFileName = arg
+
+        elif opt in ("-d", "--docroot",):
+            docroot = arg
+
+    if args:
+        usage("Too many arguments: %s" % (" ".join(args),))
+
+    observer = log.FileLogObserver(open(logFileName, "a"))
+    log.addObserver(observer.emit)
+
+    if not calverifyLogFileName:
+        usage("CalVerify log file name must be specified")
+
+    changedHomes, changedCalendars = calverifyScrape(calverifyLogFileName, docroot)
+    for i in sorted(changedHomes):
+        print i
+    for i in sorted(changedCalendars):
+        print i
+    print "Total homes: %s" % (len(changedHomes),)
+    print "Total calendars: %s" % (len(changedCalendars),)
+
+    #
+    # Start the reactor
+    #
+    reactor.callLater(0, run, configFileName, changedHomes, changedCalendars)
+    reactor.run()
+
+ at inlineCallbacks
+def run(configFileName, changedHomes, changedCalendars):
+
+    def checkExists(resource):
+        if not resource.exists():
+            sys.stderr.write("No such file: %s\n" % (resource.fp.path,))
+            sys.exit(1)
+
+    if changedHomes or changedCalendars:
+        loadConfig(configFileName)
+        directory = getDirectory()
+        
+        #from twistedcaldav.log import setLogLevelForNamespace
+        #setLogLevelForNamespace("twistedcaldav.memcacheprops", "debug")
+
+        calendarHomes = set()
+
+        for path in changedHomes:
+            path = abspath(path)
+            guid = os.path.basename(path)
+
+            record = directory.recordWithGUID(guid)
+            if record is None:
+                record = DirectoryRecord(
+                    service = DummyDirectoryService(),
+                    recordType = "dummy",
+                    guid = guid,
+                    shortName = "dummy",
+                    fullName = "",
+                    calendarUserAddresses = set(),
+                    autoSchedule = False,
+                )
+
+            parent = CalDAVFile(dirname(abspath(path)))
+            calendarHome = CalendarHomeFile(path, parent, record)
+            calendarHome.url = lambda:"/calendars/__uids__/%s/" % (guid,)
+            checkExists(calendarHome)
+            calendarHomes.add(calendarHome)
+
+        calendars = set()
+
+        for path in changedCalendars:
+            guid = os.path.basename(path)
+
+            record = directory.recordWithGUID(guid)
+            if record is None:
+                record = DirectoryRecord(
+                    service = DummyDirectoryService(),
+                    recordType = "dummy",
+                    guid = guid,
+                    shortName = "dummy",
+                    fullName = "",
+                    calendarUserAddresses = set(),
+                    autoSchedule = False,
+                )
+
+            parent.url = lambda self:"/calendars/__uids__/"
+            calendarHome = CalendarHomeFile(path, parent, record)
+            calendarHome.url = lambda:"/calendars/__uids__/%s/" % (guid,)
+            checkExists(calendarHome)
+            calendar = calendarHome.getChild(os.path.basename(path.basename()))
+            calendars.add(calendar)
+
+    n = 0
+    ok_n = 0
+    fail_n = 0
+    N = len(calendarHomes) + len(calendars)
+    for calendarHome in calendarHomes:
+        n += 1
+        log.msg("%.2f%% (%d of %d)" % (100.0 * n/N, n, N))
+        try:
+            yield processCalendarHome(
+                calendarHome,
+                directory = directory,
+            )
+            ok_n += 1
+        except Exception, e:
+            log.msg("Exception for calendar home '%s': %s" % (calendarHome, e))
+            fail_n += 1
+    for calendar in calendars:
+        n += 1
+        log.msg("%.2f%% (%d of %d)" % (100.0 * n/N, n, N))
+        try:
+            yield processCalendar(
+                calendar,
+            )
+            ok_n += 1
+        except Exception, e:
+            log.msg("Exception for calendar '%s': %s" % (calendar, e))
+            fail_n += 1
+
+    log.msg("")
+    log.msg("Results:")
+    log.msg("Total Processed: %d" % (n,))
+    log.msg("Total OK: %d" % (ok_n,))
+    log.msg("Total Bad: %d" % (fail_n,))
+
+    reactor.stop()
+
+def calverifyScrape(fileName, docroot):
+    
+    # Find affected paths
+    homes = set()
+    individuals = set()
+    with open(fileName) as f:
+        
+        for line in f:
+            if line.startswith("Fixed:"):
+                fixedpath = line[7:-1]
+                splits = fixedpath.split("/")[:4]
+                homes.add(docroot + "/".join(splits))
+            elif line.startswith("Fixed (removed):"):
+                fixedpath = line[17:-1]
+                splits = fixedpath.split("/")[:-1]
+                individuals.add(docroot + "/".join(splits))
+
+    # Remove individuals also in homes
+    for item in tuple(individuals):
+        splits = item.split("/")[:5]
+        if "/".join(splits) in homes:
+            individuals.remove(item)
+
+    return homes, individuals
+
+ at inlineCallbacks
+def processCalendarHome(
+    calendarHome,
+    directory = None,
+):
+    # Update ctags on each calendar collection 
+    for childName in calendarHome.listChildren():
+        if childName in ("outbox", "dropbox",):
+            continue
+        child = calendarHome.getChild(childName)
+        if isPseudoCalendarCollectionResource(child):
+            yield processCalendar(
+                child,
+            )
+
+ at inlineCallbacks
+def processCalendar(
+    calendarCollection,
+):
+    # Update the ctag on the calendar. This will update the memcache token
+    # and send a push notification.
+    yield calendarCollection.updateCTag()
+    
+    print calendarCollection
+
+if __name__ == "__main__":
+    main()

Modified: CalendarServer/branches/users/cdaboo/deployment-partition-4722/bin/caldav_warmup
===================================================================
--- CalendarServer/branches/users/cdaboo/deployment-partition-4722/bin/caldav_warmup	2009-12-11 18:41:21 UTC (rev 4857)
+++ CalendarServer/branches/users/cdaboo/deployment-partition-4722/bin/caldav_warmup	2009-12-11 19:55:47 UTC (rev 4858)
@@ -277,18 +277,32 @@
                     calendarHomes.add(calendarHome)
 
     n = 0
+    ok_n = 0
+    fail_n = 0
     N = len(calendarHomes)
     for calendarHome in calendarHomes:
         n += 1
         log.msg("%.2f%% (%d of %d)" % (100.0 * n/N, n, N))
-        processCalendarHome(
-            calendarHome,
-            directory = directory,
-            readCalendarData = readCalendarData,
-            readProperties = readProperties,
-            readIndexes = readIndexes,
-        )
+        try:
+            processCalendarHome(
+                calendarHome,
+                directory = directory,
+                readCalendarData = readCalendarData,
+                readProperties = readProperties,
+                readIndexes = readIndexes,
+            )
+            ok_n += 1
+        except Exception, e:
+            log.msg("Exception for calendar home '%s': %s" % (calendarHome, e))
+            fail_n += 1
 
+    log.msg("")
+    log.msg("Results:")
+    log.msg("Total Processed: %d" % (n,))
+    log.msg("Total OK: %d" % (ok_n,))
+    log.msg("Total Bad: %d" % (fail_n,))
+
+
 def processCalendarHome(
     calendarHome,
     directory = None,

Copied: CalendarServer/branches/users/cdaboo/deployment-partition-4722/contrib/tools/request_monitor (from rev 4857, CalendarServer/branches/users/wsanchez/deployment/contrib/tools/request_monitor)
===================================================================
--- CalendarServer/branches/users/cdaboo/deployment-partition-4722/contrib/tools/request_monitor	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/deployment-partition-4722/contrib/tools/request_monitor	2009-12-11 19:55:47 UTC (rev 4858)
@@ -0,0 +1,385 @@
+#!/usr/bin/env python
+##
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+import sys
+import os
+import time
+import datetime
+from dateutil.parser import parse as dateparse
+import getopt
+from subprocess import Popen, PIPE, STDOUT
+
+sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) 
+
+
+filename = "/var/log/caldavd/access.log"
+
+def listenq():
+    child = Popen(
+        args=[
+            "/usr/sbin/netstat", "-L", "-anp", "tcp",
+        ],
+        stdout=PIPE, stderr=STDOUT,
+    )
+    output, error = child.communicate()
+    for line in output.split("\n"):
+        try:
+            line.index(".8443")
+            q = line.split("/")[0]
+            return q
+            break
+        except ValueError:
+            pass
+    else: # Nobody is listening on 8443
+        return "0"
+
+_listenQueueHistory = []
+
+def listenQueueHistory():
+    global _listenQueueHistory
+    latest = listenq()
+    _listenQueueHistory.insert(0, latest)
+    del _listenQueueHistory[12:]
+    return _listenQueueHistory
+
+
+_idleHistory = []
+
+def idleHistory():
+    global _idleHistory
+    latest = cpuidle()
+    _idleHistory.insert(0, latest)
+    del _idleHistory[12:]
+    return _idleHistory
+
+
+
+def tail(filename, n):
+    child = Popen(
+        args=[
+            "/usr/bin/tail", "-%d" % (n,), filename,
+        ],
+        stdout=PIPE, stderr=STDOUT,
+    )
+    output, error = child.communicate()
+    return output.split("\n")
+
+
+def cpuidle():
+    child = Popen(
+        args=[
+            "/usr/sbin/iostat", "-c", "2", "-n", "0",
+        ],
+        stdout=PIPE, stderr=STDOUT,
+    )
+    output, error = child.communicate()
+    return output.split("\n")[-2].split()[2]
+
+
+def parseLine(line):
+
+    startPos = line.find("- ")
+    endPos = line.find(" [")
+    userId = line[startPos+2:endPos]
+
+    startPos = endPos + 2
+    endPos = line.find(']', startPos)
+    logTime = line[startPos:endPos]
+
+    startPos = endPos + 3
+    endPos = line.find(' ', startPos)
+    if line[startPos] == '?':
+        method = "???"
+        uri = ""
+        startPos += 5
+    else:
+        method = line[startPos:endPos]
+
+        startPos = endPos + 1
+        endPos = line.find(" HTTP/", startPos)
+        uri = line[startPos:endPos]
+        startPos = endPos + 11
+
+    status = int(line[startPos:startPos+3])
+
+    startPos += 4
+    endPos = line.find(' ', startPos)
+    bytes = int(line[startPos:endPos])
+
+    startPos = endPos + 2
+    endPos = line.find('"', startPos)
+    referer = line[startPos:endPos]
+
+    startPos = endPos + 3
+    endPos = line.find('"', startPos)
+    client = line[startPos:endPos]
+
+    startPos = endPos + 2
+    if line[startPos] == '[':
+        extended = {}
+
+        startPos += 1
+        endPos = line.find(' ', startPos)
+        extended["t"] = float(line[startPos:endPos])
+
+        startPos = endPos + 6
+        endPos = line.find(' ', startPos)
+        extended["i"] = int(line[startPos:endPos])
+
+        startPos = endPos + 1
+        endPos = line.find(' ', startPos)
+        extended["or"] = int(line[startPos:endPos])
+    else:
+        items = line[startPos:].split()
+        extended = dict([item.split('=') for item in items])
+
+    return userId, logTime, method, uri, status, bytes, referer, client, extended
+
+
+def usage():
+    print "-h         print help and exit"
+    print "--lines N  specifies how many lines to tail from access.log"
+
+numLines = 10000
+options, args = getopt.getopt(sys.argv[1:], "h", ["lines=",])
+for option, value in options:
+    if option == "-h":
+        usage()
+        sys.exit(0)
+    elif option == "--lines":
+        numLines = int(value)
+
+
+while True:
+
+    samples = []
+    currentSec = None
+    currentCount = 0
+    times = []
+    ids = {}
+    rawCounts = {}
+    timesSpent = {}
+    numRequests = 0
+    totalRespTime = 0.0
+    maxRespTime = 0.0
+    under10ms = 0
+    over10ms = 0
+    over100ms = 0
+    over1s = 0
+    over10s = 0
+    over30s = 0
+    over60s = 0
+    requests = []
+    users = { }
+    startTime = None
+    endTime = None
+    errorCount = 0
+
+    try: 
+        for line in tail(filename, numLines):
+            if not line or line.startswith("Log"):
+                continue
+
+            numRequests += 1
+
+            userId, logTime, method, uri, status, bytes, _ignore_referer, client, extended = parseLine(line)
+
+            logTime = dateparse(logTime, fuzzy=True)
+            times.append(logTime)
+
+            if status >= 500:
+                errorCount += 1
+
+            outstanding = int(extended['or'])
+            logId = int(extended['i'])
+            raw = rawCounts.get(logId, 0) + 1
+            rawCounts[logId] = raw
+            prevMax = ids.get(logId, 0)
+            if outstanding > prevMax:
+                ids[logId] = outstanding
+
+            respTime = float(extended['t'])
+            timeSpent = timesSpent.get(logId, 0.0) + respTime
+            timesSpent[logId] = timeSpent
+            totalRespTime += respTime
+            if respTime > maxRespTime:
+                maxRespTime = respTime
+
+            if respTime >= 60000.0:
+                over60s += 1
+            elif respTime >= 30000.0:
+                over30s +=1
+            elif respTime >= 10000.0:
+                over10s +=1
+            elif respTime >= 1000.0:
+                over1s +=1
+            elif respTime >= 100.0:
+                over100ms +=1
+            elif respTime >= 10.0:
+                over10ms +=1
+            else:
+                under10ms +=1
+
+
+            ext = []
+            for key, value in extended.iteritems():
+                if key not in ('i', 't'):
+                    if key == "cl":
+                        value = float(value)/1024
+                        value = "%.1fkb" % (value,)
+                        key = "req"
+                    ext.append("%s:%s" % (key, value))
+            ext = ", ".join(ext)
+
+            try:
+                client = client.split(";")[2]
+                client = client.strip()
+            except:
+                pass
+
+            if userId != "-":
+                userStat = users.get(userId, { 'count' : 0, 'clients' : {} })
+                userStat['count'] += 1
+                clientCount = userStat['clients'].get(client, 0)
+                userStat['clients'][client] = clientCount + 1
+                users[userId] = userStat
+
+            reqStartTime = logTime - datetime.timedelta(milliseconds=respTime)
+            requests.append((respTime, userId, method, bytes/1024.0, ext, client, logId, logTime, reqStartTime))
+
+
+        times.sort()
+        startTime = times[0]
+        endTime = times[-1]
+        for logTime in times:
+
+            if currentSec is None:
+                currentCount = 0
+                currentSec = logTime
+            else:
+                currentCount += 1
+                if logTime != currentSec:
+                    samples.append(currentCount)
+                    currentCount = 0
+                    currentSec = logTime
+
+
+        if len(samples) < 3:
+            avg = ""
+        else:
+            samples = samples[1:-1]
+            total = 0
+            for sample in samples:
+                total += sample
+            avg = float(total) / len(samples)
+            avg = "%.1f average requests per second" % (avg,)
+
+        print "- " * 40
+        q = listenQueueHistory()
+        print datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S"), 
+        print "Listen queue:", q[0], " (Recent", ", ".join(q[1:]), "Oldest)"
+        q = idleHistory()
+        print "CPU idle %:", q[0], " (Recent", ", ".join(q[1:]), "Oldest)"
+
+        if avg:
+            print avg, "|",
+        print "%d requests between %s and %s" % (numLines, startTime.strftime("%H:%M:%S"), endTime.strftime("%H:%M:%S"))
+        print "Response time: average %.1f ms, max %.1f ms" % (totalRespTime / numRequests, maxRespTime)
+        print "<10ms: %d  >10ms: %d  >100ms: %d  >1s: %d  >10s: %d  >30s: %d  >60s: %d" % (under10ms, over10ms, over100ms, over1s, over10s, over30s, over60s)
+        print
+        if errorCount:
+            print "Number of 500 errors: %d" % (errorCount,)
+            print
+
+        print "Proc:   Peak outstanding:        Seconds of processing (number of requests):"
+        for l in xrange(8):
+            base = l * 10
+            print "%2d-%2d: " % (base, base+9),
+
+            for i in xrange(base, base+10):
+                try:
+                    r = ids[i]
+                    s = "%1d" % (r,)
+                except KeyError:
+                    s = "."
+                print s,
+
+            print "    ",
+
+            for i in xrange(base, base+10):
+                try:
+                    r = timesSpent[i] / 1000
+                    c = rawCounts[i]
+                    s = "%4.0f(%4d)" % (r,c)
+                except KeyError:
+                    s = "         ."
+                print s,
+
+
+            print
+
+        print
+        print "Top 10 longest (in most recent %d requests):" % (numLines,)
+        requests.sort()
+        requests.reverse()
+        for i in xrange(10):
+            try:
+                respTime, userId, method, kb, ext, client, logId, logTime, reqStartTime = requests[i]
+                """
+                overlapCount = 0
+                for request in requests:
+                    _respTime, _userId, _method, _kb, _ext, _client, _logId, _logTime, _reqStartTime = request
+                    if _logId == logId and _logTime > reqStartTime and _reqStartTime < logTime:
+                        overlapCount += 1
+
+                print "%7.1fms  %-12s %s res:%.1fkb, %s [%s] #%d +%d %s->%s" % (respTime, userId, method, kb, ext, client, logId, overlapCount, reqStartTime.strftime("%H:%M:%S"), logTime.strftime("%H:%M:%S"),)
+                """
+                print "%7.1fms  %-12s %s res:%.1fkb, %s [%s] #%d %s->%s" % (respTime, userId, method, kb, ext, client, logId, reqStartTime.strftime("%H:%M:%S"), logTime.strftime("%H:%M:%S"),)
+            except:
+                pass
+
+            
+
+        print
+        print "Top 5 busiest users (in most recent %d requests):" % (numLines,)
+        userlist = []
+        for user, userStat in users.iteritems():
+            userlist.append((userStat['count'], user, userStat))
+        userlist.sort()
+        userlist.reverse()
+        for i in xrange(5):
+            try:
+                count, user, userStat = userlist[i]
+                print "%3d  %-12s " % (count, user),
+                clientStat = userStat['clients']
+                clients = clientStat.keys()
+                if len(clients) == 1:
+                    print "[%s]" % (clients[0],)
+                else:
+                    clientList = []
+                    for client in clients:
+                        clientList.append("%s: %d" % (client, clientStat[client]))
+                    print "[%s]" % ", ".join(clientList)
+            except:
+                pass
+
+        print
+        time.sleep(10)
+
+    except Exception, e:
+        print "Script failure", e
+

Copied: CalendarServer/branches/users/cdaboo/deployment-partition-4722/lib-patches/Twisted/twisted.internet.tcp.patch (from rev 4857, CalendarServer/branches/users/wsanchez/deployment/lib-patches/Twisted/twisted.internet.tcp.patch)
===================================================================
--- CalendarServer/branches/users/cdaboo/deployment-partition-4722/lib-patches/Twisted/twisted.internet.tcp.patch	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/deployment-partition-4722/lib-patches/Twisted/twisted.internet.tcp.patch	2009-12-11 19:55:47 UTC (rev 4858)
@@ -0,0 +1,20 @@
+Index: twisted/internet/tcp.py
+===================================================================
+--- twisted/internet/tcp.py	(revision 19773)
++++ twisted/internet/tcp.py	(working copy)
+@@ -772,6 +772,15 @@
+                     return
+                 try:
+                     skt, addr = self.socket.accept()
++                    if addr is None: # sometimes this happens on OS X
++                        log.msg("socket.accept() return None for addr. self.socket: %s, %s, %s, %s, skt: %s" % (
++                            self.socket.family,
++                            self.socket.type,
++                            self.socket.proto,
++                            self.socket.getsockname(),
++                            skt,
++                        ))
++                        raise socket.error(ECONNABORTED)
+                 except socket.error, e:
+                     if e.args[0] in (EWOULDBLOCK, EAGAIN):
+                         self.numberAccepts = i

Copied: CalendarServer/branches/users/cdaboo/deployment-partition-4722/lib-patches/Twisted/twisted.python.util.patch (from rev 4857, CalendarServer/branches/users/wsanchez/deployment/lib-patches/Twisted/twisted.python.util.patch)
===================================================================
--- CalendarServer/branches/users/cdaboo/deployment-partition-4722/lib-patches/Twisted/twisted.python.util.patch	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/deployment-partition-4722/lib-patches/Twisted/twisted.python.util.patch	2009-12-11 19:55:47 UTC (rev 4858)
@@ -0,0 +1,92 @@
+Index: twisted/python/util.py
+===================================================================
+--- twisted/python/util.py	(revision 19773)
++++ twisted/python/util.py	(working copy)
+@@ -562,9 +562,32 @@
+     return [e[2] for e in L2]
+ 
+ try:
+-    import pwd, grp
+-    from os import setgroups, getgroups
+-    
++    from ctypes import c_int, c_char_p, cdll
++    from ctypes.util import find_library
++    hasCtypes = True
++except ImportError:
++    hasCtypes = False
++
++if sys.platform == "darwin" and hasCtypes:
++    import pwd
++    libc = cdll.LoadLibrary(find_library("libc"))
++    def initgroups(uid, primaryGid):
++        """
++        Call initgroups with ctypes.
++        """
++        c_gid = c_int(primaryGid)
++        username = pwd.getpwuid(uid)[0]
++        c_username = c_char_p(username)
++        return libc.initgroups(c_username, c_gid)
++
++elif pwd is None or grp is None or setgroups is None or getgroups is None:
++    def initgroups(uid, primaryGid):
++        """
++        Do nothing.
++
++        Underlying platform support require to manipulate groups is missing.
++        """
++else:
+     def _setgroups_until_success(l):
+         while(1):
+             # NASTY NASTY HACK (but glibc does it so it must be okay):
+@@ -589,9 +612,10 @@
+             else:
+                 # Success, yay!
+                 return
+-            
++
+     def initgroups(uid, primaryGid):
+-        """Initializes the group access list.
++        """
++        Initializes the group access list.
+ 
+         This is done by reading the group database /etc/group and using all
+         groups of which C{uid} is a member.  The additional group
+@@ -600,14 +624,21 @@
+         If the given user is a member of more than C{NGROUPS}, arbitrary
+         groups will be silently discarded to bring the number below that
+         limit.
+-        """       
++
++        @type uid: C{int}
++        @param uid: The UID for which to look up group information.
++
++        @type primaryGid: C{int} or C{NoneType}
++        @param primaryGid: If provided, an additional GID to include when
++            setting the groups.
++        """
+         try:
+             # Try to get the maximum number of groups
+             max_groups = os.sysconf("SC_NGROUPS_MAX")
+         except:
+             # No predefined limit
+             max_groups = 0
+-        
++
+         username = pwd.getpwuid(uid)[0]
+         l = []
+         if primaryGid is not None:
+@@ -628,14 +659,8 @@
+                         raise
+             else:
+                 raise
+-                                    
+ 
+-except:
+-    def initgroups(uid, primaryGid):
+-        """Do nothing.
+ 
+-        Underlying platform support require to manipulate groups is missing.
+-        """
+ 
+ 
+ def switchUID(uid, gid, euid=False):

Modified: CalendarServer/branches/users/cdaboo/deployment-partition-4722/lib-patches/Twisted/twisted.web2.dav.resource.patch
===================================================================
--- CalendarServer/branches/users/cdaboo/deployment-partition-4722/lib-patches/Twisted/twisted.web2.dav.resource.patch	2009-12-11 18:41:21 UTC (rev 4857)
+++ CalendarServer/branches/users/cdaboo/deployment-partition-4722/lib-patches/Twisted/twisted.web2.dav.resource.patch	2009-12-11 19:55:47 UTC (rev 4858)
@@ -398,8 +398,37 @@
  
          # Need to get list of supported privileges
          supported = []
-@@ -1038,9 +1113,9 @@
+@@ -781,14 +856,15 @@
  
+         # Steps 1 - 6
+         got_deny = False
++        ignore = set() # to store aces we'll be ignoring below
+         for ace in new_acl.children:
+             for old_ace in old_acl.children:
+                 if (ace.principal == old_ace.principal):
+                     # Step 1
+                     if old_ace.protected:
+-                        log.err("Attempt to overwrite protected ace %r on resource %r" % (old_ace, self))
+-                        yield (davxml.dav_namespace, "no-protected-ace-conflict")
+-                        return
++                        log.debug("Ignoring an attempt to overwrite protected ace %r on resource %r" % (old_ace, self))
++                        ignore.add(ace)
++                        break
+ 
+                     # Step 2
+                     #
+@@ -845,7 +921,8 @@
+         # Iterate through the old ones and replace any that are in the new set, or remove
+         # the non-inherited/non-protected not in the new set
+         #
+-        new_aces = [ace for ace in new_acl.children]
++        # Skip any ace in the ignore set
++        new_aces = [ace for ace in new_acl.children if ace not in ignore]
+         new_set = []
+         for old_ace in old_acl.children:
+             for i, new_ace in enumerate(new_aces):
+@@ -1038,9 +1115,9 @@
+ 
              if myURL == "/":
                  # If we get to the root without any ACLs, then use the default.
 +                acl = self.defaultRootAccessControlList()
@@ -410,7 +439,7 @@
  
          # Dynamically update privileges for those ace's that are inherited.
          if inheritance:
-@@ -1076,7 +1151,7 @@
+@@ -1076,7 +1153,7 @@
                                  # Adjust ACE for inherit on this resource
                                  children = list(ace.children)
                                  children.remove(TwistedACLInheritable())
@@ -419,7 +448,7 @@
                                  aces.append(davxml.ACE(*children))
              else:
                  aces.extend(inherited_aces)
-@@ -1105,8 +1180,7 @@
+@@ -1105,8 +1182,7 @@
          the child resource loop and supply those to the checkPrivileges on each child.
  
          @param request: the L{IRequest} for the request in progress.
@@ -429,7 +458,7 @@
          """
          
          # Get the parent ACLs with inheritance and preserve the <inheritable> element.
-@@ -1128,21 +1202,9 @@
+@@ -1128,21 +1204,9 @@
                  # Adjust ACE for inherit on this resource
                  children = list(ace.children)
                  children.remove(TwistedACLInheritable())
@@ -453,7 +482,7 @@
  
      inheritedACEsforChildren = deferredGenerator(inheritedACEsforChildren)
  
-@@ -1152,49 +1214,69 @@
+@@ -1152,49 +1216,69 @@
  
          This implementation returns an empty set.
          """
@@ -551,7 +580,7 @@
      def samePrincipal(self, principal1, principal2):
          """
          Check whether the two prinicpals are exactly the same in terms of
-@@ -1219,7 +1301,6 @@
+@@ -1219,7 +1303,6 @@
              return False
                  
      def matchPrincipal(self, principal1, principal2, request):
@@ -559,7 +588,7 @@
          """
          Check whether the principal1 is a principal in the set defined by
          principal2.
-@@ -1244,6 +1325,9 @@
+@@ -1244,6 +1327,9 @@
              if isinstance(principal1, davxml.Unauthenticated):
                  yield False
                  return
@@ -569,7 +598,7 @@
              else:
                  yield True
                  return
-@@ -1271,7 +1355,6 @@
+@@ -1271,7 +1357,6 @@
  
          assert principal2 is not None, "principal2 is None"
  
@@ -577,7 +606,7 @@
          # Compare two HRefs and do group membership test as well
          if principal1 == principal2:
              yield True
-@@ -1289,6 +1372,7 @@
+@@ -1289,6 +1374,7 @@
  
      matchPrincipal = deferredGenerator(matchPrincipal)
  
@@ -585,7 +614,7 @@
      def principalIsGroupMember(self, principal1, principal2, request):
          """
          Check whether one principal is a group member of another.
-@@ -1299,18 +1383,21 @@
+@@ -1299,18 +1385,21 @@
          @return: L{Deferred} with result C{True} if principal1 is a member of principal2, C{False} otherwise
          """
          
@@ -618,7 +647,7 @@
          
      def validPrincipal(self, ace_principal, request):
          """
-@@ -1351,11 +1438,16 @@
+@@ -1351,11 +1440,16 @@
          @return C{True} if C{href_principal} is valid, C{False} otherwise.
  
          This implementation tests for a href element that corresponds to
@@ -638,7 +667,7 @@
          return d
  
      def resolvePrincipal(self, principal, request):
-@@ -1432,7 +1524,7 @@
+@@ -1432,7 +1526,7 @@
                  log.err("DAV:self ACE is set on non-principal resource %r" % (self,))
                  yield None
                  return
@@ -647,7 +676,7 @@
  
          if isinstance(principal, davxml.HRef):
              yield principal
-@@ -1517,6 +1609,270 @@
+@@ -1517,6 +1611,270 @@
          return None
  
      ##
@@ -918,7 +947,7 @@
      # HTTP
      ##
  
-@@ -1525,15 +1881,11 @@
+@@ -1525,15 +1883,11 @@
          #litmus = request.headers.getRawHeaders("x-litmus")
          #if litmus: log.msg("*** Litmus test: %s ***" % (litmus,))
  
@@ -936,7 +965,7 @@
  
          def setHeaders(response):
              response = IResponse(response)
-@@ -1567,7 +1919,7 @@
+@@ -1567,7 +1921,7 @@
      def findChildren(self, depth, request, callback, privileges=None, inherited_aces=None):
          return succeed(None)
  
@@ -945,7 +974,7 @@
      """
      Resource representing a WebDAV principal.  (RFC 3744, section 2)
      """
-@@ -1577,7 +1929,7 @@
+@@ -1577,7 +1931,7 @@
      # WebDAV
      ##
  
@@ -954,7 +983,7 @@
          (dav_namespace, "alternate-URI-set"),
          (dav_namespace, "principal-URL"    ),
          (dav_namespace, "group-member-set" ),
-@@ -1585,14 +1937,11 @@
+@@ -1585,14 +1939,11 @@
      )
  
      def davComplianceClasses(self):
@@ -970,7 +999,7 @@
      def readProperty(self, property, request):
          def defer():
              if type(property) is tuple:
-@@ -1610,10 +1959,20 @@
+@@ -1610,10 +1961,20 @@
                      return davxml.PrincipalURL(davxml.HRef(self.principalURL()))
  
                  if name == "group-member-set":
@@ -993,7 +1022,7 @@
  
                  if name == "resourcetype":
                      if self.isCollection():
-@@ -1655,7 +2014,7 @@
+@@ -1655,7 +2016,7 @@
          principals.  Subclasses should override this method to provide member
          URLs for this resource if appropriate.
          """
@@ -1002,7 +1031,7 @@
  
      def groupMemberships(self):
          """
-@@ -1666,6 +2025,7 @@
+@@ -1666,6 +2027,7 @@
          """
          unimplemented(self)
  
@@ -1010,7 +1039,7 @@
      def principalMatch(self, href):
          """
          Check whether the supplied principal matches this principal or is a
-@@ -1675,10 +2035,33 @@
+@@ -1675,10 +2037,33 @@
          """
          uri = str(href)
          if self.principalURL() == uri:
@@ -1046,7 +1075,7 @@
  class AccessDeniedError(Exception):
      def __init__(self, errors):
          """ 
-@@ -1718,6 +2101,37 @@
+@@ -1718,6 +2103,37 @@
  davxml.registerElement(TwistedACLInheritable)
  davxml.ACE.allowed_children[(twisted_dav_namespace, "inheritable")] = (0, 1)
  

Modified: CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/cluster.py
===================================================================
--- CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/cluster.py	2009-12-11 18:41:21 UTC (rev 4857)
+++ CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/cluster.py	2009-12-11 19:55:47 UTC (rev 4858)
@@ -19,10 +19,12 @@
 import tempfile
 import socket
 import time
+import signal
 
 from twisted.runner import procmon
 from twisted.application import internet, service
 from twisted.internet import reactor, process
+from twisted.internet.threads import deferToThread
 from twisted.python.reflect import namedClass
 
 from twistedcaldav.accesslog import AMPLoggingFactory, RotatingFileAccessLoggingObserver
@@ -176,7 +178,7 @@
         self.consistency = reactor.callLater(self.consistencyDelay,
                                              self._checkConsistency)
 
-    def signalAll(self, signal, startswithname=None):
+    def signalAll(self, signal, startswithname=None, seconds=0):
         """
         Send a signal to all child processes.
 
@@ -185,9 +187,11 @@
         @param startswithname: is set only signal those processes whose name starts with this string
         @type signal: C{str}
         """
+        delay = 0
         for name in self.processes.keys():
             if startswithname is None or name.startswith(startswithname):
-                self.signalProcess(signal, name)
+                reactor.callLater(delay, self.signalProcess, signal, name)
+                delay += seconds
 
     def signalProcess(self, signal, name):
         """
@@ -233,15 +237,24 @@
 
     # Refresh directory information on behalf of the child processes
     directoryClass = namedClass(config.DirectoryService["type"])
-    directory = directoryClass(dosetup=False, **config.DirectoryService["params"])
-    directory.refresh()
+    directory = directoryClass(dosetup=True, doreload=False, **config.DirectoryService["params"])
+    directory.refresh(master=True)
 
+    # Register USR1 handler
+    def sigusr1_handler(num, frame):
+        log.warn("SIGUSR1 recieved in master, triggering directory refresh")
+        deferToThread(directory.refresh, loop=False, master=True)
+        return
+
+    signal.signal(signal.SIGUSR1, sigusr1_handler)
+
     s = service.MultiService()
 
     monitor = DelayedStartupProcessMonitor()
     monitor.setServiceParent(s)
-    s.processMonitor = monitor
 
+    directory.processMonitor = s.processMonitor = monitor
+
     parentEnv = {
         'PATH': os.environ.get('PATH', ''),
         'PYTHONPATH': os.environ.get('PYTHONPATH', ''),

Modified: CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/config.py
===================================================================
--- CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/config.py	2009-12-11 18:41:21 UTC (rev 4857)
+++ CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/config.py	2009-12-11 19:55:47 UTC (rev 4858)
@@ -72,6 +72,7 @@
     "twistedcaldav.directory.appleopendirectory.OpenDirectoryService": {
         "node": "/Search",
         "cacheTimeout": 30,
+        "signalIntervalSeconds": 10,
     },
 }
 
@@ -352,6 +353,7 @@
     "IdleConnectionTimeOut": 15,
     "UIDReservationTimeOut": 30 * 60,
 
+    "MaxAllowedInstances": 3000,
 
     #
     # Localization

Modified: CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/directory/appleopendirectory.py
===================================================================
--- CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/directory/appleopendirectory.py	2009-12-11 18:41:21 UTC (rev 4857)
+++ CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/directory/appleopendirectory.py	2009-12-11 19:55:47 UTC (rev 4858)
@@ -24,6 +24,7 @@
 ]
 
 import sys
+import signal
 
 import opendirectory
 import dsattributes
@@ -61,7 +62,7 @@
     def __repr__(self):
         return "<%s %r: %r>" % (self.__class__.__name__, self.realmName, self.node)
 
-    def __init__(self, node="/Search", dosetup=True, cacheTimeout=30, **kwds):
+    def __init__(self, node="/Search", dosetup=True, doreload=True, cacheTimeout=30, signalIntervalSeconds=10, **kwds):
         """
         @param node: an OpenDirectory node name to bind to.
         @param dosetup: if C{True} then the directory records are initialized,
@@ -78,16 +79,19 @@
         self.directory = directory
         self.node = node
         self.cacheTimeout = cacheTimeout
+        self.signalIntervalSeconds = signalIntervalSeconds
         self._records = {}
         self._delayedCalls = set()
+        self._refreshing = False
 
         self.isWorkgroupServer = False
 
         if dosetup:
-            for recordType in self.recordTypes():
-                self.recordsForType(recordType)
+            if doreload:
+                for recordType in self.recordTypes():
+                    self.recordsForType(recordType)
 
-    def refresh(self, loop=True):
+    def refresh(self, loop=True, master=False):
         """
         This service works by having the master process call this method
         which queries OD for all records, storing the pickled results into
@@ -98,57 +102,82 @@
         these files.
         """
 
+        if self._refreshing:
+            self.log_warn("Already refreshing directory cache")
+            return
+
+        self._refreshing = True
+
         def _refresh(self):
-            dataRoot = FilePath(config.DataRoot)
-            cacheDir = dataRoot.child("DirectoryCache")
-            if not cacheDir.exists():
-                cacheDir.createDirectory()
+            try:
+                dataRoot = FilePath(config.DataRoot)
+                cacheDir = dataRoot.child("DirectoryCache")
+                if not cacheDir.exists():
+                    cacheDir.createDirectory()
 
-            for recordType in self.recordTypes():
-                self.log_debug("Master fetching %s from directory" % (recordType,))
-                cacheFile = cacheDir.child(recordType)
-                try:
-                    results = self._queryDirectory(recordType)
-                except Exception, e:
-                    self.log_error("Master query for %s failed: %s" % (recordType, e))
-                    continue
+                dataWritten = False
+                for recordType in self.recordTypes():
+                    self.log_warn("Master fetching %s for directory cache" % (recordType,))
+                    cacheFile = cacheDir.child(recordType)
+                    try:
+                        results = self._queryDirectory(recordType)
+                    except Exception, e:
+                        self.log_error("Master query for %s failed: %s" % (recordType, e))
+                        continue
 
-                results.sort()
-                numNewResults = len(results)
-                pickled = pickle.dumps(results)
-                needsWrite = True
-                if cacheFile.exists():
-                    prevPickled = cacheFile.getContent()
-                    if prevPickled == pickled:
-                        needsWrite = False
-                    else:
-                        prevResults = pickle.loads(prevPickled)
-                        numPrevResults = len(prevResults)
-                        if numPrevResults == 0:
-                            needsWrite = True
+                    results.sort()
+                    numNewResults = len(results)
+                    pickled = pickle.dumps(results)
+                    needsWrite = True
+                    if cacheFile.exists():
+                        prevPickled = cacheFile.getContent()
+                        if prevPickled == pickled:
+                            needsWrite = False
                         else:
-                            if float(numNewResults) / numPrevResults < 0.5:
-                                # New results is less than half of what it used
-                                # to be -- this indicates we might not have
-                                # gotten back enough records from OD.  Don't
-                                # write out the file, but log an error.
-                                self.log_error("OD results for %s substantially less than last time: was %d, now %d." % (recordType, numPrevResults, numNewResults))
-                                needsWrite = False
-                                continue
+                            prevResults = pickle.loads(prevPickled)
+                            numPrevResults = len(prevResults)
+                            if numPrevResults == 0:
+                                needsWrite = True
+                            else:
+                                if float(numNewResults) / numPrevResults < 0.5:
+                                    # New results is less than half of what it used
+                                    # to be -- this indicates we might not have
+                                    # gotten back enough records from OD.  Don't
+                                    # write out the file, but log an error.
+                                    self.log_error("OD results for %s substantially less than last time: was %d, now %d." % (recordType, numPrevResults, numNewResults))
+                                    needsWrite = False
+                                    continue
 
-                if needsWrite:
-                    self.log_info("Saving cache file for %s (%d items)" % (recordType, numNewResults))
-                    cacheFile.setContent(pickled)
-                else:
-                    self.log_debug("%s info hasn't changed" % (recordType,))
+                    if needsWrite:
+                        self.log_warn("Saving directory cache file for %s (%d items)" % (recordType, numNewResults))
+                        cacheFile.setContent(pickled)
+                        dataWritten = True
+                    else:
+                        self.log_warn("%s info hasn't changed" % (recordType,))
 
+                if dataWritten and hasattr(self, 'processMonitor'):
+                    self.processMonitor.signalAll(signal.SIGUSR1, "caldav", seconds=self.signalIntervalSeconds)
+            finally:
+                self._refreshing = False
+
         def _refreshInThread(self):
             return deferToThread(_refresh, self)
 
-        _refresh(self)
+        if master:
+            _refresh(self)
 
-        if loop:
-            LoopingCall(_refreshInThread, self).start(self.cacheTimeout * 60)
+            if loop:
+                LoopingCall(_refreshInThread, self).start(self.cacheTimeout * 60)
+        else:
+            def _reloadCaches():
+                try:
+                    self.log_warn("Reading directory cache files")
+                    for recordType in self.recordTypes():
+                        self.reloadCache(recordType)
+                    self.log_warn("Done reading directory cache files")
+                finally:
+                    self._refreshing = False
+            deferToThread(_reloadCaches)
 
 
 
@@ -245,20 +274,7 @@
         except KeyError:
             self.reloadCache(recordType)
             storage = self._records[recordType]
-        else:
-            if storage["status"] == "stale":
-                storage["status"] = "loading"
 
-                def onError(f):
-                    storage["status"] = "stale" # Keep trying
-                    self.log_error(
-                        "Unable to load records of type %s from OpenDirectory due to unexpected error: %s"
-                        % (recordType, f)
-                    )
-
-                d = deferToThread(self.reloadCache, recordType)
-                d.addErrback(onError)
-
         return storage
 
     def recordsForType(self, recordType):
@@ -400,17 +416,6 @@
 
     def reloadCache(self, recordType, forceUpdate=False):
 
-        def rot():
-            storage["status"] = "stale"
-            removals = set()
-            for call in self._delayedCalls:
-                if not call.active():
-                    removals.add(call)
-            for item in removals:
-                self._delayedCalls.remove(item)
-
-        cacheTimeout = 60 # child processes always check once per minute
-
         dataRoot = FilePath(config.DataRoot)
         cacheDir = dataRoot.child("DirectoryCache")
         if not cacheDir.exists():
@@ -426,11 +431,14 @@
             lastModified = cacheFile.getModificationTime()
             try:
                 storage = self._records[recordType]
+                if storage["status"] == "loading":
+                    self.log_warn("Directory cache file for %s already being reloaded" % (recordType,))
+                    return
                 if not forceUpdate and (lastModified <= storage["last modified"]):
                     self.log_debug("Directory cache file for %s unchanged" % (recordType,))
                     storage["status"] = "new" # mark this as not stale
-                    self._delayedCalls.add(callLater(cacheTimeout, rot))
                     return
+                storage["status"] = "loading"
             except KeyError:
                 # Haven't read the file before
                 pass
@@ -439,7 +447,6 @@
 
             pickled = cacheFile.getContent()
             results = pickle.loads(pickled)
-            # results = self._queryDirectory(recordType)
 
         records = {}
         guids   = {}
@@ -460,6 +467,24 @@
             else:
                 return ()
 
+        def disableRecord(record):
+            self.log_warn("Record disabled due to conflict (record name and GUID must match): %s" % (record,))
+
+            shortName = record.shortName
+            guid      = record.guid
+            cuaddrset = record.calendarUserAddresses
+
+            disabledNames.add(shortName)
+            disabledGUIDs.add(guid)
+
+            if shortName in records:
+                del records[shortName]
+            if guid in guids:
+                del guids[guid]
+            for cuaddr in cuaddrset:
+                if cuaddr in cuaddrs:
+                    del cuaddrs[cuaddr]
+
         for (recordShortName, value) in results:
 
             # Now get useful record info.
@@ -505,24 +530,6 @@
             d = augment.AugmentService.getAugmentRecord(record.guid)
             d.addCallback(lambda x:record.addAugmentInformation(x))
 
-            def disableRecord(record):
-                self.log_warn("Record disabled due to conflict (record name and GUID must match): %s" % (record,))
-
-                shortName = record.shortName
-                guid      = record.guid
-                cuaddrset = record.calendarUserAddresses
-
-                disabledNames.add(shortName)
-                disabledGUIDs.add(guid)
-
-                if shortName in records:
-                    del records[shortName]
-                if guid in guids:
-                    del guids[guid]
-                for cuaddr in cuaddrset:
-                    if cuaddr in cuaddrs:
-                        del cuaddrs[cuaddr]
-
             # Check for disabled items
             if record.shortName in disabledNames or record.guid in disabledGUIDs:
                 disableRecord(record)
@@ -567,13 +574,11 @@
         if recordType == DirectoryService.recordType_groups:
             storage["groupsForGUID"] = groupsForGUID
 
-        self._delayedCalls.add(callLater(cacheTimeout, rot))
-
         self._records[recordType] = storage
 
         self.log_info(
-            "Added %d records to %s OD record cache; expires in %d seconds"
-            % (len(self._records[recordType]["guids"]), recordType, cacheTimeout)
+            "Added %d records to %s OD record cache"
+            % (len(self._records[recordType]["guids"]), recordType)
         )
 
     def _queryDirectory(self, recordType):

Modified: CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/directory/directory.py
===================================================================
--- CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/directory/directory.py	2009-12-11 18:41:21 UTC (rev 4857)
+++ CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/directory/directory.py	2009-12-11 19:55:47 UTC (rev 4858)
@@ -141,13 +141,14 @@
             for record in self.listRecords(recordType):
                 yield record
 
-    def refresh(self):
+    def refresh(self, master=False):
         """
         This gets called in the master process to give the directory service
         a chance to refresh a cache of directory information
         """
         pass
 
+
 class DirectoryRecord(LoggingMixIn):
     implements(IDirectoryRecord)
 

Modified: CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/instance.py
===================================================================
--- CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/instance.py	2009-12-11 18:41:21 UTC (rev 4857)
+++ CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/instance.py	2009-12-11 19:55:47 UTC (rev 4858)
@@ -20,22 +20,18 @@
 
 import datetime
 
+from twistedcaldav.config import config
 from twistedcaldav.dateops import normalizeForIndex, compareDateTime, differenceDateTime, periodEnd
 
 from vobject.icalendar import utc
 
-# The maximum number of instances we will ezpand out to.
-# Raise a TooManyInstancesError exception if we exceed this.
-max_allowed_instances = 1000
+class TooManyInstancesError(RuntimeError):
+    def __init__(self, count):
+        RuntimeError.__init__(self, "Too many recurrance instances (%s > %s)"
+                              % (count, config.MaxAllowedInstances))
+        self.count = count
+        self.max_allowed = config.MaxAllowedInstances
 
-class TooManyInstancesError(Exception):
-    def __init__(self):
-        Exception.__init__(self)
-        self.max_allowed = max_allowed_instances
-
-    def __repr__(self):
-        return "<%s max:%s>" % (self.__class__.__name__, self.max_allowed)
-
 class Instance(object):
     
     __slots__ = ["component", "start", "end", "rid", "overridden", "future"]
@@ -162,8 +158,8 @@
         self.instances[str(instance.rid)] = instance
         
         # Check for too many instances
-        if len(self.instances) > max_allowed_instances:
-            raise TooManyInstancesError()
+        if len(self.instances) > config.MaxAllowedInstances:
+            raise TooManyInstancesError(len(self.instances))
 
     def _addMasterEventComponent(self, component, limit):
         """

Modified: CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/method/put.py
===================================================================
--- CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/method/put.py	2009-12-11 18:41:21 UTC (rev 4857)
+++ CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/method/put.py	2009-12-11 19:55:47 UTC (rev 4858)
@@ -55,7 +55,7 @@
             calendardata = d.getResult()
             if not hasattr(request, "extendedLogItems"):
                 request.extendedLogItems = {}
-            request.extendedLogItems["cl"] = str(len(calendardata))
+            request.extendedLogItems["cl"] = str(len(calendardata)) if calendardata else "0"
 
             # We must have some data at this point
             if calendardata is None:

Modified: CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/tap.py
===================================================================
--- CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/tap.py	2009-12-11 18:41:21 UTC (rev 4857)
+++ CalendarServer/branches/users/cdaboo/deployment-partition-4722/twistedcaldav/tap.py	2009-12-11 19:55:47 UTC (rev 4858)
@@ -17,6 +17,7 @@
 import os
 import stat
 import socket
+import signal
 
 from zope.interface import implements
 
@@ -876,6 +877,15 @@
         # Change log level back to what it was before
         setLogLevelForNamespace(None, oldLogLevel)
 
+
+        # Register USR1 handler
+        def sigusr1_handler(num, frame):
+            log.debug("SIGUSR1 recieved, triggering directory refresh")
+            baseDirectory.refresh()
+            return
+
+        signal.signal(signal.SIGUSR1, sigusr1_handler)
+
         return service
 
     makeService_Combined = makeService_Combined
@@ -911,7 +921,6 @@
                 else:
                     return "%s: %s" % (frame.f_code.co_name, frame.f_lineno)
 
-            import signal
             def sighup_handler(num, frame):
                 log.info("SIGHUP recieved at %s" % (location(frame),))
 
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20091211/fd11da86/attachment-0001.html>


More information about the calendarserver-changes mailing list