[CalendarServer-changes] [4710] CalendarServer/branches/users/wsanchez/deployment

source_changes at macosforge.org source_changes at macosforge.org
Thu Nov 5 15:13:09 PST 2009


Revision: 4710
          http://trac.macosforge.org/projects/calendarserver/changeset/4710
Author:   sagen at apple.com
Date:     2009-11-05 15:13:07 -0800 (Thu, 05 Nov 2009)
Log Message:
-----------
Adds single-accept queue and master-process directory caching

Modified Paths:
--------------
    CalendarServer/branches/users/wsanchez/deployment/conf/caldavd-test.plist
    CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/accesslog.py
    CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/cluster.py
    CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/config.py
    CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/directory/appleopendirectory.py
    CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/directory/directory.py
    CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/directory/test/test_opendirectoryrecords.py
    CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/directory/xmlfile.py
    CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/httpfactory.py
    CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/tap.py
    CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/test/test_tap.py

Property Changed:
----------------
    CalendarServer/branches/users/wsanchez/deployment/


Property changes on: CalendarServer/branches/users/wsanchez/deployment
___________________________________________________________________
Added: svn:mergeinfo
   + /CalendarServer/branches/users/sagen/deployment-inherit-fds-4571:4573-4709

Modified: CalendarServer/branches/users/wsanchez/deployment/conf/caldavd-test.plist
===================================================================
--- CalendarServer/branches/users/wsanchez/deployment/conf/caldavd-test.plist	2009-11-05 20:41:04 UTC (rev 4709)
+++ CalendarServer/branches/users/wsanchez/deployment/conf/caldavd-test.plist	2009-11-05 23:13:07 UTC (rev 4710)
@@ -61,6 +61,11 @@
   <key>BindSSLPorts</key>
   <array></array>
 
+  <!-- Connection settings -->
+  <key>MaxRequests</key>
+  <integer>1</integer>
+  <key>MaxAccepts</key>
+  <integer>1</integer>
 
   <!--
     Data Store
@@ -335,7 +340,7 @@
   <key>MultiProcess</key>
   <dict>
     <key>ProcessCount</key>
-    <integer>0</integer>
+    <integer>2</integer>
 
     <key>LoadBalancer</key>
     <dict>
@@ -350,6 +355,8 @@
 
       <!-- Least Connections and Round Robin -->
       <!-- <string>LeastConnectionsAndRoundRobin</string> -->
+      <key>Enabled</key>
+      <false/>
     </dict>
 
   </dict>

Modified: CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/accesslog.py
===================================================================
--- CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/accesslog.py	2009-11-05 20:41:04 UTC (rev 4709)
+++ CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/accesslog.py	2009-11-05 23:13:07 UTC (rev 4710)
@@ -117,7 +117,10 @@
             )
 
             try:
-                serverInstance = request.chanRequest.transport.server.port
+                if config.EnableConnectionInheriting:
+                    serverInstance = config.LogID
+                else:
+                    serverInstance = request.chanRequest.transport.server.port
             except AttributeError:
                 serverInstance = "Unknown"
             if config.EnableExtendedAccessLog:

Modified: CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/cluster.py
===================================================================
--- CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/cluster.py	2009-11-05 20:41:04 UTC (rev 4709)
+++ CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/cluster.py	2009-11-05 23:13:07 UTC (rev 4710)
@@ -17,15 +17,19 @@
 import os
 import sys
 import tempfile
+import socket
+import time
 
 from twisted.runner import procmon
 from twisted.application import internet, service
 from twisted.internet import reactor, process
+from twisted.python.reflect import namedClass
 
 from twistedcaldav.accesslog import AMPLoggingFactory, RotatingFileAccessLoggingObserver
 from twistedcaldav.config import config, ConfigurationError
 from twistedcaldav.util import getNCPU
 from twistedcaldav.log import Logger
+from twistedcaldav.directory.appleopendirectory import OpenDirectoryService
 
 log = Logger()
 
@@ -54,17 +58,24 @@
 class TwistdSlaveProcess(object):
     prefix = "caldav"
 
-    def __init__(self, twistd, tapname, configFile,
-                 interfaces, port, sslPort):
+    def __init__(self, twistd, tapname, configFile, id,
+                 interfaces, port, sslPort,
+                 inheritFDs=None, inheritSSLFDs=None):
+
         self.twistd = twistd
 
         self.tapname = tapname
 
         self.configFile = configFile
 
+        self.id = id
+
         self.ports = port
         self.sslPorts = sslPort
 
+        self.inheritFDs = inheritFDs
+        self.inheritSSLFDs = inheritSSLFDs
+
         self.interfaces = interfaces
 
     def getName(self):
@@ -72,6 +83,8 @@
             return '%s-%s' % (self.prefix, self.ports[0])
         elif self.sslPorts is not None:
             return '%s-%s' % (self.prefix, self.sslPorts[0])
+        elif self.inheritFDs or self.inheritSSLFDs:
+            return '%s-%s' % (self.prefix, self.id)
 
         raise ConfigurationError(
             "Can't create TwistdSlaveProcess without a TCP Port")
@@ -101,6 +114,7 @@
              '-o', 'BindAddresses=%s' % (','.join(self.interfaces),),
              '-o', 'PIDFile=None',
              '-o', 'ErrorLogFile=None',
+             '-o', 'LogID=%s' % (self.id,),
              '-o', 'MultiProcess/ProcessCount=%d' % (
                     config.MultiProcess['ProcessCount'],)])
 
@@ -118,9 +132,16 @@
                     '-o',
                     'BindSSLPorts=%s' % (','.join(map(str, self.sslPorts)),)])
 
+        if self.inheritFDs:
+            args.extend([
+                    '-o',
+                    'InheritFDs=%s' % (','.join(map(str, self.inheritFDs)),)])
 
+        if self.inheritSSLFDs:
+            args.extend([
+                    '-o',
+                    'InheritSSLFDs=%s' % (','.join(map(str, self.inheritSSLFDs)),)])
 
-
         return args
 
     def getHostLine(self, ssl=False):
@@ -133,6 +154,9 @@
         if ssl and self.sslPorts is not None:
             port = self.sslPorts
 
+        if self.inheritFDs or self.inheritSSLFDs:
+            port = [self.id]
+
         if port is None:
             raise ConfigurationError(
                 "Can not add a host without a port")
@@ -186,8 +210,38 @@
         except process.ProcessExitedAlready:
             pass
 
+    def startProcess(self, name):
+        if self.protocols.has_key(name):
+            return
+        p = self.protocols[name] = procmon.LoggingProtocol()
+        p.service = self
+        p.name = name
+        args, uid, gid, env = self.processes[name]
+        self.timeStarted[name] = time.time()
+
+        childFDs = { 0 : "w", 1 : "r", 2 : "r" }
+
+        # Examine args for -o InheritFDs= and -o InheritSSLFDs=
+        # Add any file descriptors listed in those args to the childFDs
+        # dictionary so those don't get closed across the spawn.
+        for i in xrange(len(args)-1):
+            if args[i] == "-o" and args[i+1].startswith("Inherit"):
+                for fd in map(int, args[i+1].split("=")[1].split(",")):
+                    childFDs[fd] = fd
+
+        reactor.spawnProcess(p, args[0], args, uid=uid, gid=gid, env=env,
+            childFDs=childFDs)
+
 def makeService_Combined(self, options):
+
+
+    # Refresh directory information on behalf of the child processes
+    directoryClass = namedClass(config.DirectoryService["type"])
+    directory = directoryClass(dosetup=False, **config.DirectoryService["params"])
+    directory.refresh()
+
     s = service.MultiService()
+
     monitor = DelayedStartupProcessMonitor()
     monitor.setServiceParent(s)
     s.processMonitor = monitor
@@ -205,6 +259,9 @@
 
     bindAddress = ['127.0.0.1']
 
+    inheritFDs = []
+    inheritSSLFDs = []
+
     # Attempt to calculate the number of processes to use
     # 1 per processor
 
@@ -234,19 +291,68 @@
         if config.BindSSLPorts:
             sslPort = config.BindSSLPorts
 
+
     if port[0] == 0:
         port = None
 
     if sslPort[0] == 0:
         sslPort = None
 
-    # If the load balancer isn't enabled, or if we only have one process
-    # We listen directly on the interfaces.
+    # If we only have one process, disable the software load balancer and
+    # listen directly on the interfaces.
 
-    if ((not config.MultiProcess['LoadBalancer']['Enabled']) or
-        (config.MultiProcess['ProcessCount'] == 1)):
+    if config.MultiProcess['ProcessCount'] == 1:
+        config.MultiProcess['LoadBalancer']['Enabled'] = False
         bindAddress = config.BindAddresses
 
+    elif config.EnableConnectionInheriting:
+        # Open the socket(s) to be inherited by the slaves
+
+        config.MultiProcess['LoadBalancer']['Enabled'] = False
+
+        if not config.BindAddresses:
+            config.BindAddresses = [""]
+
+        s._inheritedSockets = [] # keep a reference to these so they don't close
+
+        for bindAddress in config.BindAddresses:
+            if config.BindHTTPPorts:
+                if config.HTTPPort == 0:
+                    raise UsageError(
+                        "HTTPPort required if BindHTTPPorts is not empty"
+                    )
+            elif config.HTTPPort != 0:
+                config.BindHTTPPorts = [config.HTTPPort]
+
+            if config.BindSSLPorts:
+                if config.SSLPort == 0:
+                    raise UsageError(
+                        "SSLPort required if BindSSLPorts is not empty"
+                    )
+            elif config.SSLPort != 0:
+                config.BindSSLPorts = [config.SSLPort]
+
+            def _openSocket(addr, port):
+                log.info("Opening socket for inheritance at %s:%d" % (addr, port))
+                sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+                sock.setblocking(0)
+                sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+                sock.bind((addr, port))
+                sock.listen(config.ListenBacklog)
+                s._inheritedSockets.append(sock)
+                return sock
+
+            for portNum in config.BindHTTPPorts:
+                sock = _openSocket(bindAddress, int(portNum))
+                inheritFDs.append(sock.fileno())
+
+            for portNum in config.BindSSLPorts:
+                sock = _openSocket(bindAddress, int(portNum))
+                inheritSSLFDs.append(sock.fileno())
+
+    if not config.MultiProcess['LoadBalancer']['Enabled']:
+        bindAddress = config.BindAddresses
+
     for p in xrange(0, config.MultiProcess['ProcessCount']):
         if config.MultiProcess['ProcessCount'] > 1:
             if port is not None:
@@ -255,11 +361,21 @@
             if sslPort is not None:
                 sslPort = [sslPort[0] + 1]
 
+        if inheritFDs:
+            port = None
+
+        if inheritSSLFDs:
+            sslPort = None
+
         process = TwistdSlaveProcess(config.Twisted['twistd'],
                                      self.tapname,
                                      options['config'],
+                                     p,
                                      bindAddress,
-                                     port, sslPort)
+                                     port, sslPort,
+                                     inheritFDs=inheritFDs,
+                                     inheritSSLFDs=inheritSSLFDs
+                                     )
 
         monitor.addProcess(process.getName(),
                            process.getCommandLine(),
@@ -340,8 +456,6 @@
                                      config.PythonDirector['pydir'],
                                      fname],
                            env=parentEnv)
-
-
     if config.Memcached["ServerEnabled"]:
         log.msg("Adding memcached service")
 

Modified: CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/config.py
===================================================================
--- CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/config.py	2009-11-05 20:41:04 UTC (rev 4709)
+++ CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/config.py	2009-11-05 23:13:07 UTC (rev 4710)
@@ -100,6 +100,9 @@
     "BindAddresses": [],   # List of IP addresses to bind to [empty = all]
     "BindHTTPPorts": [],   # List of port numbers to bind to for HTTP [empty = same as "Port"]
     "BindSSLPorts" : [],   # List of port numbers to bind to for SSL [empty = same as "SSLPort"]
+    "EnableConnectionInheriting" : False,
+    "InheritFDs"   : [],   # File descriptors to inherit for HTTP requests (empty = don't inherit)
+    "InheritSSLFDs": [],   # File descriptors to inherit for HTTPS requests (empty = don't inherit)
 
     #
     # Data store
@@ -168,6 +171,7 @@
     "EnableExtendedAccessLog" : True,
     "DefaultLogLevel"   : "",
     "LogLevels"         : {},
+    "LogID"          : "",
 
     "AccountingCategories": {
         "iTIP": False,
@@ -257,10 +261,13 @@
     # Performance tuning
     #
 
+    "EnableConnectionInheriting": True,
     # Set the maximum number of outstanding requests to this server.
-    "MaxRequests": 600,
+    "MaxRequests": 1,
+    "MaxAccepts": 1,
+    "ResumeRequests": 999,
 
-    "ListenBacklog": 50,
+    "ListenBacklog": 2024,
     "IdleConnectionTimeOut": 15,
     "UIDReservationTimeOut": 30 * 60,
 

Modified: CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/directory/appleopendirectory.py
===================================================================
--- CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/directory/appleopendirectory.py	2009-11-05 20:41:04 UTC (rev 4709)
+++ CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/directory/appleopendirectory.py	2009-11-05 23:13:07 UTC (rev 4710)
@@ -34,6 +34,8 @@
 import dsattributes
 import dsquery
 import memcacheclient
+import memcacheclient
+import cPickle as pickle
 
 try:
     from hashlib import md5
@@ -43,8 +45,10 @@
 
 from twisted.internet.reactor import callLater
 from twisted.internet.threads import deferToThread
+from twisted.internet.task import LoopingCall
 from twisted.cred.credentials import UsernamePassword
 from twisted.web2.auth.digest import DigestedCredentials
+from twisted.python.filepath import FilePath
 
 from twistedcaldav.config import config
 from twistedcaldav.directory.directory import DirectoryService, DirectoryRecord
@@ -64,7 +68,7 @@
     def __repr__(self):
         return "<%s %r: %r>" % (self.__class__.__name__, self.realmName, self.node)
 
-    def __init__(self, node="/Search", requireComputerRecord=True, dosetup=True, cacheTimeout=30):
+    def __init__(self, node="/Search", requireComputerRecord=True, dosetup=True, cacheTimeout=30, **kwds):
         """
         @param node: an OpenDirectory node name to bind to.
         @param requireComputerRecord: C{True} if the directory schema is to be used to determine
@@ -110,6 +114,71 @@
             for recordType in self.recordTypes():
                 self.recordsForType(recordType)
 
+    def refresh(self, loop=True):
+        """
+        This service works by having the master process call this method
+        which queries OD for all records, storing the pickled results into
+        files that the child processes stat/read every minute.
+        The files are only written by this method if there are actually
+        changes in the results.
+        The reloadCache( ) method below used to talk to OD, but now it reads
+        these files.
+        """
+
+        def _refresh(self):
+            dataRoot = FilePath(config.DataRoot)
+            cacheDir = dataRoot.child("DirectoryCache")
+            if not cacheDir.exists():
+                cacheDir.createDirectory()
+
+            for recordType in self.recordTypes():
+                self.log_debug("Master fetching %s from directory" % (recordType,))
+                cacheFile = cacheDir.child(recordType)
+                try:
+                    results = self._queryDirectory(recordType)
+                except Exception, e:
+                    self.log_error("Master query for %s failed: %s" % (recordType, e))
+                    continue
+
+                results.sort()
+                numNewResults = len(results)
+                pickled = pickle.dumps(results)
+                needsWrite = True
+                if cacheFile.exists():
+                    prevPickled = cacheFile.getContent()
+                    if prevPickled == pickled:
+                        needsWrite = False
+                    else:
+                        prevResults = pickle.loads(prevPickled)
+                        numPrevResults = len(prevResults)
+                        if numPrevResults == 0:
+                            needsWrite = True
+                        else:
+                            if float(numNewResults) / numPrevResults < 0.5:
+                                # New results is less than half of what it used
+                                # to be -- this indicates we might not have
+                                # gotten back enough records from OD.  Don't
+                                # write out the file, but log an error.
+                                self.log_error("OD results for %s substantially less than last time: was %d, now %d." % (recordType, numPrevResults, numNewResults))
+                                needsWrite = False
+                                continue
+
+                if needsWrite:
+                    self.log_info("Saving cache file for %s (%d items)" % (recordType, numNewResults))
+                    cacheFile.setContent(pickled)
+                else:
+                    self.log_debug("%s info hasn't changed" % (recordType,))
+
+        def _refreshInThread(self):
+            return deferToThread(_refresh, self)
+
+        _refresh(self)
+
+        if loop:
+            LoopingCall(_refreshInThread, self).start(self.cacheTimeout * 60)
+
+
+
     def _expandGroupMembership(self, members, nestedGroups, processedGUIDs=None):
 
         if processedGUIDs is None:
@@ -445,71 +514,28 @@
         try:
             return self.recordsForType(recordType)[shortName]
         except KeyError:
-            # Check negative cache
-            if shortName in self._storage(recordType)["disabled names"]:
-                return None
+            return None
 
-            # Cache miss; try looking the record up, in case it is new
-            # FIXME: This is a blocking call (hopefully it's a fast one)
-            self.reloadCache(recordType, shortName=shortName)
-            record = self.recordsForType(recordType).get(shortName, None)
-            if record is None:
-                # Add to negative cache
-                self._storage(recordType)["disabled names"].add(shortName)
-            return record
-
     def recordWithGUID(self, guid):
-        def lookup():
-            for recordType in self.recordTypes():
-                record = self._storage(recordType)["guids"].get(guid, None)
-                if record:
-                    return record
-            else:
-                return None
+        for recordType in self.recordTypes():
+            record = self._storage(recordType)["guids"].get(guid, None)
+            if record:
+                return record
+        else:
+            return None
 
-        record = lookup()
 
-        if record is None:
-            # Cache miss; try looking the record up, in case it is new
-            for recordType in self.recordTypes():
-                # Check negative cache
-                if guid in self._storage(recordType)["disabled guids"]:
-                    continue
-
-                self.reloadCache(recordType, guid=guid)
-                record = lookup()
-
-                if record is None:
-                    self._storage(recordType)["disabled guids"].add(guid)
-                else:
-                    self.log_info("Faulted record with GUID %s into %s record cache"
-                                  % (guid, recordType))
-                    break
-            else:
-                # Nothing found; add to negative cache
-                self.log_info("Unable to find any record with GUID %s" % (guid,))
-
-        return record
-
     def recordWithCalendarUserAddress(self, address):
         address = address.lower()
 
-        def lookup():
-            for recordType in self.recordTypes():
-                record = self._storage(recordType)["cuaddrs"].get(address, None)
-                if record:
-                    return record
-            else:
-                return None
+        for recordType in self.recordTypes():
+            record = self._storage(recordType)["cuaddrs"].get(address, None)
+            if record:
+                return record
+        else:
+            return None
 
-        record = lookup()
 
-        if record is None:
-            # Nothing found
-            self.log_info("Unable to find any record with calendar user address %s" % (address,))
-
-        return record
-
     def groupsForGUID(self, guid):
         
         # Lookup in index
@@ -609,47 +635,62 @@
                 self.log_error("OD search failed: %s" % (e,))
                 raise
 
-    def reloadCache(self, recordType, shortName=None, guid=None):
-        if shortName is not None:
-            self.log_info("Faulting record with shortName %s into %s record cache" % (shortName, recordType))
-        elif guid is not None:
-            self.log_info("Faulting record with guid %s into %s record cache" % (guid, recordType))
-        elif shortName is None and guid is None:
-            self.log_info("Reloading %s record cache" % (recordType,))
-        else:
-            raise AssertionError("%r.reloadCache(%s, %s, %s)" % (self, recordType, shortName, guid))
+    def reloadCache(self, recordType, forceUpdate=False):
 
-        results = self._queryDirectory(recordType, shortName=shortName, guid=guid)
-        
-        if shortName is None and guid is None:
-            records = {}
-            guids   = {}
-            cuaddrs = {}
+        def rot():
+            storage["status"] = "stale"
+            removals = set()
+            for call in self._delayedCalls:
+                if not call.active():
+                    removals.add(call)
+            for item in removals:
+                self._delayedCalls.remove(item)
 
-            disabledNames = set()
-            disabledGUIDs = set()
-            
-            if recordType == DirectoryService.recordType_groups:
-                groupsForGUID = {}
-            elif recordType in (DirectoryService.recordType_resources, DirectoryService.recordType_locations):
-                proxiesForGUID = {}
-                readOnlyProxiesForGUID = {}
+        cacheTimeout = 60 # child processes always check once per minute
+
+        dataRoot = FilePath(config.DataRoot)
+        cacheDir = dataRoot.child("DirectoryCache")
+        if not cacheDir.exists():
+            self.log_error("Directory cache directory for does not exist: %s" % (cacheDir.path,))
+            return
+
+        cacheFile = cacheDir.child(recordType)
+        if not cacheFile.exists():
+            self.log_debug("Directory cache file for %s does not exist: %s" % (recordType, cacheFile.path))
+            results = []
+            lastModified = 0
         else:
-            storage = self._records[recordType]
+            lastModified = cacheFile.getModificationTime()
+            try:
+                storage = self._records[recordType]
+                if not forceUpdate and (lastModified <= storage["last modified"]):
+                    self.log_debug("Directory cache file for %s unchanged" % (recordType,))
+                    storage["status"] = "new" # mark this as not stale
+                    self._delayedCalls.add(callLater(cacheTimeout, rot))
+                    return
+            except KeyError:
+                # Haven't read the file before
+                pass
 
-            records = storage["records"]
-            guids   = storage["guids"]
-            cuaddrs = storage["cuaddrs"]
+            self.log_debug("Reloading %s record cache" % (recordType,))
 
-            disabledNames = storage["disabled names"]
-            disabledGUIDs = storage["disabled guids"]
-            
-            if recordType == DirectoryService.recordType_groups:
-                groupsForGUID = storage["groupsForGUID"]
-            elif recordType in (DirectoryService.recordType_resources, DirectoryService.recordType_locations):
-                proxiesForGUID = storage["proxiesForGUID"]
-                readOnlyProxiesForGUID = storage["readOnlyProxiesForGUID"]
+            pickled = cacheFile.getContent()
+            results = pickle.loads(pickled)
+            # results = self._queryDirectory(recordType)
 
+        records = {}
+        guids   = {}
+        cuaddrs = {}
+
+        disabledNames = set()
+        disabledGUIDs = set()
+        
+        if recordType == DirectoryService.recordType_groups:
+            groupsForGUID = {}
+        elif recordType in (DirectoryService.recordType_resources, DirectoryService.recordType_locations):
+            proxiesForGUID = {}
+            readOnlyProxiesForGUID = {}
+
         for (recordShortName, value) in results:
             enabledForCalendaring = True
 
@@ -809,54 +850,38 @@
                         self._indexGroup(record, record._proxyGUIDs, proxiesForGUID)
                         self._indexGroup(record, record._readOnlyProxyGUIDs, readOnlyProxiesForGUID)
 
-        if shortName is None and guid is None:
-            #
-            # Replace the entire cache
-            #
-            storage = {
-                "status"        : "new",
-                "records"       : records,
-                "guids"         : guids,
-                "cuaddrs"       : cuaddrs,
-                "disabled names": disabledNames,
-                "disabled guids": disabledGUIDs,
-            }
+        #
+        # Replace the entire cache
+        #
+        storage = {
+            "status"        : "new",
+            "records"       : records,
+            "guids"         : guids,
+            "cuaddrs"       : cuaddrs,
+            "disabled names": disabledNames,
+            "disabled guids": disabledGUIDs,
+            "last modified" : lastModified,
+        }
 
-            # Add group indexing if needed
-            if recordType == DirectoryService.recordType_groups:
-                storage["groupsForGUID"] = groupsForGUID
+        # Add group indexing if needed
+        if recordType == DirectoryService.recordType_groups:
+            storage["groupsForGUID"] = groupsForGUID
 
-            # Add proxy indexing if needed
-            elif recordType in (DirectoryService.recordType_resources, DirectoryService.recordType_locations):
-                storage["proxiesForGUID"] = proxiesForGUID
-                storage["readOnlyProxiesForGUID"] = readOnlyProxiesForGUID
+        # Add proxy indexing if needed
+        elif recordType in (DirectoryService.recordType_resources, DirectoryService.recordType_locations):
+            storage["proxiesForGUID"] = proxiesForGUID
+            storage["readOnlyProxiesForGUID"] = readOnlyProxiesForGUID
 
-            def rot():
-                storage["status"] = "stale"
-                removals = set()
-                for call in self._delayedCalls:
-                    if not call.active():
-                        removals.add(call)
-                for item in removals:
-                    self._delayedCalls.remove(item)
+        self._delayedCalls.add(callLater(cacheTimeout, rot))
 
-            #
-            # Add jitter/fuzz factor to avoid stampede for large OD query
-            # Max out the jitter at 60 minutes
-            #
-            cacheTimeout = min(self.cacheTimeout, 60) * 60
-            cacheTimeout = (cacheTimeout * random()) - (cacheTimeout / 2)
-            cacheTimeout += self.cacheTimeout * 60
-            self._delayedCalls.add(callLater(cacheTimeout, rot))
+        self._records[recordType] = storage
 
-            self._records[recordType] = storage
+        self.log_info(
+            "Added %d records to %s OD record cache; expires in %d seconds"
+            % (len(self._records[recordType]["guids"]), recordType, cacheTimeout)
+        )
 
-            self.log_info(
-                "Added %d records to %s OD record cache; expires in %d seconds"
-                % (len(self._records[recordType]["guids"]), recordType, cacheTimeout)
-            )
-
-    def _queryDirectory(self, recordType, shortName=None, guid=None):
+    def _queryDirectory(self, recordType):
         attrs = [
             dsattributes.kDS1AttrGeneratedUID,
             dsattributes.kDS1AttrDistinguishedName,
@@ -887,45 +912,44 @@
 
         if self.requireComputerRecord:
             if self.isWorkgroupServer and recordType == DirectoryService.recordType_users:
-                if shortName is None and guid is None:
-                    self.log_debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
-                        self.directory,
-                        dsattributes.kDSNAttrRecordName,
-                        saclGroup,
-                        dsattributes.eDSExact,
-                        False,
-                        dsattributes.kDSStdRecordTypeGroups,
-                        [dsattributes.kDSNAttrGroupMembers, dsattributes.kDSNAttrNestedGroups],
-                    ))
-                    results = opendirectory.queryRecordsWithAttribute_list(
-                        self.directory,
-                        dsattributes.kDSNAttrRecordName,
-                        saclGroup,
-                        dsattributes.eDSExact,
-                        False,
-                        dsattributes.kDSStdRecordTypeGroups,
-                        [dsattributes.kDSNAttrGroupMembers, dsattributes.kDSNAttrNestedGroups]
-                    )
+                self.log_debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
+                    self.directory,
+                    dsattributes.kDSNAttrRecordName,
+                    saclGroup,
+                    dsattributes.eDSExact,
+                    False,
+                    dsattributes.kDSStdRecordTypeGroups,
+                    [dsattributes.kDSNAttrGroupMembers, dsattributes.kDSNAttrNestedGroups],
+                ))
+                results = opendirectory.queryRecordsWithAttribute_list(
+                    self.directory,
+                    dsattributes.kDSNAttrRecordName,
+                    saclGroup,
+                    dsattributes.eDSExact,
+                    False,
+                    dsattributes.kDSStdRecordTypeGroups,
+                    [dsattributes.kDSNAttrGroupMembers, dsattributes.kDSNAttrNestedGroups]
+                )
 
-                    if len(results) == 1:
-                        members      = results[0][1].get(dsattributes.kDSNAttrGroupMembers, [])
-                        nestedGroups = results[0][1].get(dsattributes.kDSNAttrNestedGroups, [])
-                    else:
-                        members = []
-                        nestedGroups = []
+                if len(results) == 1:
+                    members      = results[0][1].get(dsattributes.kDSNAttrGroupMembers, [])
+                    nestedGroups = results[0][1].get(dsattributes.kDSNAttrNestedGroups, [])
+                else:
+                    members = []
+                    nestedGroups = []
 
-                    guidQueries = []
+                guidQueries = []
 
-                    for GUID in self._expandGroupMembership(members, nestedGroups):
-                        guidQueries.append(
-                            dsquery.match(dsattributes.kDS1AttrGeneratedUID, GUID, dsattributes.eDSExact)
-                        )
+                for GUID in self._expandGroupMembership(members, nestedGroups):
+                    guidQueries.append(
+                        dsquery.match(dsattributes.kDS1AttrGeneratedUID, GUID, dsattributes.eDSExact)
+                    )
 
-                    if not guidQueries:
-                        self.log_warn("No SACL enabled users found.")
-                        return ()
+                if not guidQueries:
+                    self.log_warn("No SACL enabled users found.")
+                    return ()
 
-                    query = dsquery.expression(dsquery.expression.OR, guidQueries)
+                query = dsquery.expression(dsquery.expression.OR, guidQueries)
 
             #
             # For users and groups, we'll load all entries, even if
@@ -950,19 +974,8 @@
                 else:
                     query = dsquery.expression(dsquery.expression.AND, (subquery, query))
 
-        if shortName is not None:
-            subquery = dsquery.match(dsattributes.kDSNAttrRecordName, shortName, dsattributes.eDSExact)
-        elif guid is not None:
-            subquery = dsquery.match(dsattributes.kDS1AttrGeneratedUID, guid, dsattributes.eDSExact)
-        else:
-            subquery = None
+        subquery = None
 
-        if subquery is not None:
-            if query is None:
-                query = subquery
-            else:
-                query = dsquery.expression(dsquery.expression.AND, (subquery, query))
-
         try:
             if query:
                 if isinstance(query, dsquery.match):

Modified: CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/directory/directory.py
===================================================================
--- CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/directory/directory.py	2009-11-05 20:41:04 UTC (rev 4709)
+++ CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/directory/directory.py	2009-11-05 23:13:07 UTC (rev 4710)
@@ -139,6 +139,12 @@
             for record in self.listRecords(recordType):
                 yield record
 
+    def refresh(self):
+        """
+        This gets called in the master process to give the directory service
+        a chance to refresh a cache of directory information
+        """
+        pass
 
 class DirectoryRecord(LoggingMixIn):
     implements(IDirectoryRecord)

Modified: CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/directory/test/test_opendirectoryrecords.py
===================================================================
--- CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/directory/test/test_opendirectoryrecords.py	2009-11-05 20:41:04 UTC (rev 4709)
+++ CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/directory/test/test_opendirectoryrecords.py	2009-11-05 23:13:07 UTC (rev 4710)
@@ -28,29 +28,21 @@
 else:
     from twistedcaldav.directory.directory import DirectoryService
     from twistedcaldav.directory.util import uuidFromName
+    from twistedcaldav.test.util import TestCase
 
     class OpenDirectoryService (RealOpenDirectoryService):
-        def _queryDirectory(self, recordType, shortName=None, guid=None):
-            if shortName is None and guid is None:
+        def _queryDirectory(self, recordType):
+            try:
                 return self.fakerecords[recordType]
-
-            assert shortName is None or guid is None
-            if guid is not None:
-                guid = guid.lower()
-
-            records = []
-
-            for name, record in self.fakerecords[recordType]:
-                if name == shortName or record[dsattributes.kDS1AttrGeneratedUID] == guid:
-                    records.append((name, record))
-
-            return tuple(records)
+            except KeyError:
+                return []
     
     class ReloadCache(TestCase):
         def setUp(self):
             super(ReloadCache, self).setUp()
             self._service = OpenDirectoryService(node="/Search", dosetup=False)
             self._service.servicetags.add("FE588D50-0514-4DF9-BCB5-8ECA5F3DA274:030572AE-ABEC-4E0F-83C9-FCA304769E5F:calendar")
+            self._service.fakerecords = { }
             
         def tearDown(self):
             for call in self._service._delayedCalls:
@@ -110,6 +102,7 @@
                     fakeODRecord("Location 02"),
                 ],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
             self._service.reloadCache(DirectoryService.recordType_groups)
@@ -155,6 +148,7 @@
                     fakeODRecord("Location 04", addLocator=False),
                 ],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
             self._service.reloadCache(DirectoryService.recordType_groups)
@@ -179,6 +173,7 @@
                     fakeODRecord("User 01"),
                 ],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
 
@@ -192,9 +187,9 @@
                     fakeODRecord("User 03", guid="D10F3EE0-5014-41D3-8488-3819D3EF3B2A"),
                 ],
             }
+            self._service.refresh(loop=False)
 
-            self._service.reloadCache(DirectoryService.recordType_users, shortName="user02")
-            self._service.reloadCache(DirectoryService.recordType_users, guid="D10F3EE0-5014-41D3-8488-3819D3EF3B2A")
+            self._service.reloadCache(DirectoryService.recordType_users, forceUpdate=True)
 
             self._verifyRecords(DirectoryService.recordType_users, ("user01", "user02", "user03"))
             self._verifyDisabledRecords(DirectoryService.recordType_users, (), ())
@@ -207,6 +202,7 @@
                     fakeODRecord("User 02"),
                 ],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
 
@@ -223,6 +219,7 @@
                     fakeODRecord("User 02", guid="30CA2BB9-C935-4A5D-80E2-79266BCB0255"),
                 ],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
 
@@ -241,6 +238,7 @@
                     fakeODRecord("User 03", guid="113D7F74-F84A-4F17-8C96-CE8F10D68EF8"),
                 ],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
 
@@ -260,6 +258,7 @@
                     fakeODRecord("User 02", guid="136E369F-DB40-4135-878D-B75D38242D39"),
                 ],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
 
@@ -278,12 +277,12 @@
                     fakeODRecord("User 03", guid="D10F3EE0-5014-41D3-8488-3819D3EF3B2A"),
                 ],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
 
             self._verifyRecords(DirectoryService.recordType_users, ("user01", "user02", "user03"))
             self._verifyDisabledRecords(DirectoryService.recordType_users, (), ())
-            
             self._service.fakerecords = {
                 DirectoryService.recordType_users: [
                     fakeODRecord("User 01"),
@@ -293,15 +292,15 @@
                     fakeODRecord("User 03", guid="62368DDF-0C62-4C97-9A58-DE9FD46131A0", shortName="user05"),
                 ],
             }
+            self._service.refresh(loop=False)
 
-            self._service.reloadCache(DirectoryService.recordType_users, shortName="user04")
-            self._service.reloadCache(DirectoryService.recordType_users, guid="62368DDF-0C62-4C97-9A58-DE9FD46131A0")
+            self._service.reloadCache(DirectoryService.recordType_users, forceUpdate=True)
 
             self._verifyRecords(DirectoryService.recordType_users, ("user01",))
             self._verifyDisabledRecords(
                 DirectoryService.recordType_users,
                 ("user02", "user03", "user04", "user05"),
-                ("EDB9EE55-31F2-4EA9-B5FB-D8AE2A8BA35E", "62368DDF-0C62-4C97-9A58-DE9FD46131A0", "D10F3EE0-5014-41D3-8488-3819D3EF3B2A"),
+                ("EDB9EE55-31F2-4EA9-B5FB-D8AE2A8BA35E", "62368DDF-0C62-4C97-9A58-DE9FD46131A0"),
             )
 
         def test_groupmembers(self):
@@ -329,6 +328,7 @@
                     fakeODRecord("Location 02"),
                 ],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
             self._service.reloadCache(DirectoryService.recordType_groups)
@@ -358,7 +358,8 @@
                     guidForShortName("user02"),
                 ]),
             ]
-            self._service.reloadCache(DirectoryService.recordType_groups)
+            self._service.refresh(loop=False)
+            self._service.reloadCache(DirectoryService.recordType_groups, forceUpdate=True)
 
             group1 = self._service.recordWithShortName(DirectoryService.recordType_groups, "group01")
             self.assertTrue(group1 is not None)
@@ -375,12 +376,20 @@
             self.assertEqual(set((group2,)), user2.groups()) 
             
             self._service.fakerecords[DirectoryService.recordType_groups] = [
+                fakeODRecord("Group 01", members=[
+                    guidForShortName("user01"),
+                ]),
+                fakeODRecord("Group 02", members=[
+                    guidForShortName("resource01"),
+                    guidForShortName("user02"),
+                ]),
                 fakeODRecord("Group 03", members=[
                     guidForShortName("user01"),
                     guidForShortName("user02"),
                 ]),
             ]
-            self._service.reloadCache(DirectoryService.recordType_groups, guid=guidForShortName("group03"))
+            self._service.refresh(loop=False)
+            self._service.reloadCache(DirectoryService.recordType_groups, forceUpdate=True)
 
             group1 = self._service.recordWithShortName(DirectoryService.recordType_groups, "group01")
             self.assertTrue(group1 is not None)
@@ -409,6 +418,7 @@
                 DirectoryService.recordType_resources: [],
                 DirectoryService.recordType_locations: [],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
 
@@ -428,7 +438,8 @@
                 DirectoryService.recordType_resources: [],
                 DirectoryService.recordType_locations: [],
             }
-            self._service.reloadCache(DirectoryService.recordType_users)
+            self._service.refresh(loop=False)
+            self._service.reloadCache(DirectoryService.recordType_users, forceUpdate=True)
 
             user1 = self._service.recordWithCalendarUserAddress("mailto:user01 at example.com")
             self.assertTrue(user1 is not None)
@@ -445,7 +456,8 @@
                 DirectoryService.recordType_resources: [],
                 DirectoryService.recordType_locations: [],
             }
-            self._service.reloadCache(DirectoryService.recordType_users)
+            self._service.refresh(loop=False)
+            self._service.reloadCache(DirectoryService.recordType_users, forceUpdate=True)
 
             user1 = self._service.recordWithCalendarUserAddress("mailto:user01 at example.com")
             self.assertTrue(user1 is None)
@@ -454,6 +466,41 @@
             self.assertTrue(user3 is not None)
 
 
+        def test_substantialDecline(self):
+            """
+            Test the "substantial decline" protection logic in the case where an
+            od query returns less than half the results of the previous
+            successful one.
+            """
+
+            self._service.fakerecords = {
+                DirectoryService.recordType_users: [
+                    fakeODRecord("User 01"),
+                    fakeODRecord("User 02"),
+                    fakeODRecord("User 03"),
+                    fakeODRecord("User 04"),
+                    fakeODRecord("User 05"),
+                    fakeODRecord("User 06"),
+                ],
+                DirectoryService.recordType_groups: [],
+                DirectoryService.recordType_resources: [],
+                DirectoryService.recordType_locations: [],
+            }
+            self._service.refresh(loop=False)
+            self._service.reloadCache(DirectoryService.recordType_users, forceUpdate=True)
+            user1 = self._service.recordWithCalendarUserAddress("mailto:user01 at example.com")
+            self.assertTrue(user1 is not None)
+
+            # Pretend OD suddenly returned less than half:
+            self._service.fakerecords[DirectoryService.recordType_users] = [
+                    fakeODRecord("User 01"),
+                    fakeODRecord("User 02"),
+            ]
+            self._service.refresh(loop=False)
+            self._service.reloadCache(DirectoryService.recordType_users, forceUpdate=True)
+            user3 = self._service.recordWithCalendarUserAddress("mailto:user03 at example.com")
+            self.assertTrue(user3 is not None)
+
     class AuthCacheTests(TestCase):
 
         def _authenticateUserBasic(self, ignore, node, name, password):
@@ -482,6 +529,7 @@
                     fakeODRecord("User 01"),
                 ],
             }
+            self._service.refresh(loop=False)
             self._service.reloadCache(DirectoryService.recordType_users)
 
             user1 = self._service.recordWithCalendarUserAddress("mailto:user01 at example.com")
@@ -518,6 +566,7 @@
             self.assertTrue(user1.verifyCredentials(cred))
             self.assertFalse(self._odAccessed)
 
+
 def fakeODRecord(fullName, shortName=None, guid=None, email=None, addLocator=True, members=None):
     if shortName is None:
         shortName = shortNameForFullName(fullName)

Modified: CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/directory/xmlfile.py
===================================================================
--- CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/directory/xmlfile.py	2009-11-05 20:41:04 UTC (rev 4709)
+++ CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/directory/xmlfile.py	2009-11-05 23:13:07 UTC (rev 4710)
@@ -40,7 +40,7 @@
     def __repr__(self):
         return "<%s %r: %r>" % (self.__class__.__name__, self.realmName, self.xmlFile)
 
-    def __init__(self, xmlFile):
+    def __init__(self, xmlFile, **kwds):
         super(XMLDirectoryService, self).__init__()
 
         if type(xmlFile) is str:

Modified: CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/httpfactory.py
===================================================================
--- CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/httpfactory.py	2009-11-05 20:41:04 UTC (rev 4709)
+++ CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/httpfactory.py	2009-11-05 23:13:07 UTC (rev 4710)
@@ -18,7 +18,7 @@
 
 from twisted.internet import protocol
 from twisted.python import log
-from twisted.web2.channel.http import HTTPFactory
+from twisted.web2.channel.http import HTTPFactory, HTTPChannel
 
 from twistedcaldav.config import config
 
@@ -67,3 +67,34 @@
         for arg,value in self.protocolArgs.iteritems():
             setattr(p, arg, value)
         return p
+
+
+class LimitingHTTPChannel(HTTPChannel):
+
+    def connectionMade(self):
+        HTTPChannel.connectionMade(self)
+        if self.factory.outstandingRequests >= self.factory.maxRequests:
+            # log.msg("Overloaded")
+            self.factory.myServer.myPort.stopReading()
+
+    def connectionLost(self, reason):
+        HTTPChannel.connectionLost(self, reason)
+        if self.factory.outstandingRequests < self.factory.resumeRequests:
+            # log.msg("Resuming")
+            self.factory.myServer.myPort.startReading()
+
+class LimitingHTTPFactory(HTTPFactory):
+    protocol = LimitingHTTPChannel
+
+    def __init__(self, requestFactory, maxRequests=600, maxAccepts=100, resumeRequests=550,
+        **kwargs):
+        HTTPFactory.__init__(self, requestFactory, maxRequests, **kwargs)
+        self.maxAccepts = maxAccepts
+        self.resumeRequests = resumeRequests
+
+    def buildProtocol(self, addr):
+
+        p = protocol.ServerFactory.buildProtocol(self, addr)
+        for arg, value in self.protocolArgs.iteritems():
+            setattr(p, arg, value)
+        return p

Modified: CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/tap.py
===================================================================
--- CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/tap.py	2009-11-05 20:41:04 UTC (rev 4709)
+++ CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/tap.py	2009-11-05 23:13:07 UTC (rev 4710)
@@ -16,10 +16,12 @@
 
 import os
 import stat
+import socket
 
 from zope.interface import implements
 
 from twisted.internet.address import IPv4Address
+from twisted.internet import tcp, ssl
 
 from twisted.python.log import FileLogObserver
 from twisted.python.usage import Options, UsageError
@@ -49,7 +51,7 @@
 from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
 from twistedcaldav.directory.aggregate import AggregateDirectoryService
 from twistedcaldav.directory.sudo import SudoDirectoryService
-from twistedcaldav.httpfactory import HTTP503LoggingFactory
+from twistedcaldav.httpfactory import HTTP503LoggingFactory, LimitingHTTPFactory
 from twistedcaldav.static import CalendarHomeProvisioningFile
 from twistedcaldav.static import TimezoneServiceFile
 from twistedcaldav.timezones import TimezoneCache
@@ -418,7 +420,71 @@
 
         self._context = ctx
 
+class InheritedPort(tcp.Port):
 
+    def __init__(self, fd, factory, reactor):
+        tcp.Port.__init__(self, 0, factory, reactor=reactor)
+        # MOR: careful because fromfd dup()'s the socket, so we need to
+        # make sure we don't leak file descriptors
+        self.socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
+        self._realPortNumber = self.port = self.socket.getsockname()[1]
+
+    def createInternetSocket(self):
+        return self.socket
+
+    def startListening(self):
+        log.msg("%s starting on %s" % (self.factory.__class__, self._realPortNumber))
+        self.factory.doStart()
+        self.connected = 1
+        self.fileno = self.socket.fileno
+        self.numberAccepts = self.factory.maxRequests
+        self.startReading()
+
+    def doRead(self):
+        self.numberAccepts = min(self.factory.maxRequests - self.factory.outstandingRequests, self.factory.maxAccepts)
+        tcp.Port.doRead(self)
+
+
+class InheritedSSLPort(InheritedPort):
+    _socketShutdownMethod = 'sock_shutdown'
+
+    transport = ssl.Server
+
+    def __init__(self, fd, factory, ctxFactory, reactor):
+        InheritedPort.__init__(self, fd, factory, reactor)
+        self.ctxFactory = ctxFactory
+        self.socket = SSL.Connection(self.ctxFactory.getContext(), self.socket)
+
+    def _preMakeConnection(self, transport):
+        transport._startTLS()
+        return tcp.Port._preMakeConnection(self, transport)
+
+class InheritTCPServer(internet.TCPServer):
+
+    def __init__(self, *args, **kwargs):
+        internet.TCPServer.__init__(self, *args, **kwargs)
+        self.args[1].myServer = self
+
+    def _getPort(self):
+        from twisted.internet import reactor
+        port = InheritedPort(self.args[0], self.args[1], reactor)
+        port.startListening()
+        self.myPort = port
+        return port
+
+class InheritSSLServer(internet.SSLServer):
+
+    def __init__(self, *args, **kwargs):
+        internet.SSLServer.__init__(self, *args, **kwargs)
+        self.args[1].myServer = self
+
+    def _getPort(self):
+        from twisted.internet import reactor
+        port = InheritedSSLPort(self.args[0], self.args[1], self.args[2], reactor)
+        port.startListening()
+        self.myPort = port
+        return port
+
 class CalDAVServiceMaker(object):
     implements(IPlugin, service.IServiceMaker)
 
@@ -649,48 +715,30 @@
 
         site = Site(realRoot)
 
-        channel = HTTP503LoggingFactory(
-            site,
-            maxRequests=config.MaxRequests,
-            betweenRequestsTimeOut=config.IdleConnectionTimeOut)
 
-        def updateChannel(config, items):
-            channel.maxRequests = config.MaxRequests
+        # If inheriting file descriptors from the master, use those to handle
+        # requests instead of opening ports.
 
-        config.addHook(updateChannel)
+        if (config.EnableConnectionInheriting and
+           (config.InheritFDs or config.InheritSSLFDs)):
 
-        if not config.BindAddresses:
-            config.BindAddresses = [""]
+            channel = LimitingHTTPFactory(
+                site,
+                maxRequests=config.MaxRequests,
+                maxAccepts=config.MaxAccepts,
+                resumeRequests=config.ResumeRequests,
+                betweenRequestsTimeOut=config.IdleConnectionTimeOut)
 
-        for bindAddress in config.BindAddresses:
-            if config.BindHTTPPorts:
-                if config.HTTPPort == 0:
-                    raise UsageError(
-                        "HTTPPort required if BindHTTPPorts is not empty"
-                    )
-            elif config.HTTPPort != 0:
-                    config.BindHTTPPorts = [config.HTTPPort]
-
-            if config.BindSSLPorts:
-                if config.SSLPort == 0:
-                    raise UsageError(
-                        "SSLPort required if BindSSLPorts is not empty"
-                    )
-            elif config.SSLPort != 0:
-                config.BindSSLPorts = [config.SSLPort]
-
-            for port in config.BindHTTPPorts:
-                log.info("Adding server at %s:%s" % (bindAddress, port))
-
-                httpService = internet.TCPServer(
-                    int(port), channel,
-                    interface=bindAddress,
+            for fd in config.InheritFDs:
+                fd = int(fd)
+                inheritedService = InheritTCPServer(
+                    fd, channel,
                     backlog=config.ListenBacklog
                 )
-                httpService.setServiceParent(service)
+                inheritedService.setServiceParent(service)
 
-            for port in config.BindSSLPorts:
-                log.info("Adding SSL server at %s:%s" % (bindAddress, port))
+            for fd in config.InheritSSLFDs:
+                fd = int(fd)
 
                 try:
                     contextFactory = ChainingOpenSSLContextFactory(
@@ -701,15 +749,81 @@
                     )
                 except SSL.Error, e:
                     log.error("Unable to set up SSL context factory: %s" % (e,))
-                    log.error("Disabling SSL port: %s" % (port,))
                 else:
-                    httpsService = internet.SSLServer(
+                    inheritedService = InheritSSLServer(
+                        fd, channel,
+                        contextFactory,
+                        backlog=config.ListenBacklog
+                    )
+                    inheritedService.setServiceParent(service)
+
+
+        else: # Not inheriting, therefore open our own:
+
+            channel = HTTP503LoggingFactory(
+                site,
+                maxRequests=config.MaxRequests,
+                betweenRequestsTimeOut=config.IdleConnectionTimeOut)
+
+            if not config.BindAddresses:
+                config.BindAddresses = [""]
+
+            for bindAddress in config.BindAddresses:
+                if config.BindHTTPPorts:
+                    if config.HTTPPort == 0:
+                        raise UsageError(
+                            "HTTPPort required if BindHTTPPorts is not empty"
+                        )
+                elif config.HTTPPort != 0:
+                    config.BindHTTPPorts = [config.HTTPPort]
+
+                if config.BindSSLPorts:
+                    if config.SSLPort == 0:
+                        raise UsageError(
+                            "SSLPort required if BindSSLPorts is not empty"
+                        )
+                elif config.SSLPort != 0:
+                    config.BindSSLPorts = [config.SSLPort]
+
+                for port in config.BindHTTPPorts:
+                    log.info("Adding server at %s:%s" % (bindAddress, port))
+
+                    httpService = internet.TCPServer(
                         int(port), channel,
-                        contextFactory, interface=bindAddress,
+                        interface=bindAddress,
                         backlog=config.ListenBacklog
                     )
-                    httpsService.setServiceParent(service)
+                    httpService.setServiceParent(service)
 
+                for port in config.BindSSLPorts:
+                    log.info("Adding SSL server at %s:%s" % (bindAddress, port))
+
+                    try:
+                        contextFactory = ChainingOpenSSLContextFactory(
+                            config.SSLPrivateKey,
+                            config.SSLCertificate,
+                            certificateChainFile=config.SSLAuthorityChain,
+                            passwdCallback=_getSSLPassphrase
+                        )
+                    except SSL.Error, e:
+                        log.error("Unable to set up SSL context factory: %s" % (e,))
+                        log.error("Disabling SSL port: %s" % (port,))
+                    else:
+                        httpsService = internet.SSLServer(
+                            int(port), channel,
+                            contextFactory, interface=bindAddress,
+                            backlog=config.ListenBacklog
+                        )
+                        httpsService.setServiceParent(service)
+
+        def updateChannel(config, items):
+            channel.maxRequests = config.MaxRequests
+            channel.maxAccepts = config.MaxAccepts
+            channel.requestsRequests = config.ResumeRequests
+
+        config.addHook(updateChannel)
+
+
         # Change log level back to what it was before
         setLogLevelForNamespace(None, oldLogLevel)
 

Modified: CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/test/test_tap.py
===================================================================
--- CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/test/test_tap.py	2009-11-05 20:41:04 UTC (rev 4709)
+++ CalendarServer/branches/users/wsanchez/deployment/twistedcaldav/test/test_tap.py	2009-11-05 23:13:07 UTC (rev 4710)
@@ -227,6 +227,8 @@
         """
 
         self.options.parseOptions(['-f', self.configFile])
+        config.Memcached.ClientEnabled = False
+        config.Memcached.ServerEnabled = False
 
         return CalDAVServiceMaker().makeService(self.options)
 
@@ -251,7 +253,7 @@
         """
         validServices = ['Slave', 'Master', 'Combined']
 
-        self.config['HTTPPort'] = 80
+        self.config['HTTPPort'] = 8008
 
         for service in validServices:
             self.config['ProcessType'] = service
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20091105/2720e64d/attachment-0001.html>


More information about the calendarserver-changes mailing list