[CalendarServer-changes] [10079] CalendarServer/branches/users/gaya/sharedgroups

source_changes at macosforge.org source_changes at macosforge.org
Fri Nov 16 20:03:40 PST 2012


Revision: 10079
          http://trac.calendarserver.org//changeset/10079
Author:   gaya at apple.com
Date:     2012-11-16 20:03:40 -0800 (Fri, 16 Nov 2012)
Log Message:
-----------
update from trunk

Modified Paths:
--------------
    CalendarServer/branches/users/gaya/sharedgroups/bin/calendarserver_command_gateway
    CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tap/util.py
    CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/backup_pg.py
    CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/gateway.py
    CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/principals.py
    CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/shell/cmd.py
    CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/shell/terminal.py
    CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/shell/test/test_vfs.py
    CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/shell/vfs.py
    CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/test/gateway/caldavd.plist
    CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/test/gateway/users-groups.xml
    CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/test/test_gateway.py
    CalendarServer/branches/users/gaya/sharedgroups/conf/caldavd-apple.plist
    CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/calendarmigrator.py
    CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/calendarpromotion.py
    CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/test/test_migrator.py
    CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/test/test_promotion.py
    CalendarServer/branches/users/gaya/sharedgroups/doc/Admin/MultiServerDeployment.rst
    CalendarServer/branches/users/gaya/sharedgroups/doc/Extensions/caldav-proxy.txt
    CalendarServer/branches/users/gaya/sharedgroups/doc/Extensions/caldav-proxy.xml
    CalendarServer/branches/users/gaya/sharedgroups/doc/calendarserver_manage_principals.8
    CalendarServer/branches/users/gaya/sharedgroups/support/Makefile.Apple
    CalendarServer/branches/users/gaya/sharedgroups/support/build.sh
    CalendarServer/branches/users/gaya/sharedgroups/twext/enterprise/dal/test/test_parseschema.py
    CalendarServer/branches/users/gaya/sharedgroups/twext/web2/dav/resource.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/aggregate.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/augment.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/directory.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/idirectory.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/ldapdirectory.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/principal.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/augments.xml
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/test_directory.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/test_ldapdirectory.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/test_principal.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/test_xmlfile.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/xmlaugmentsparser.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/method/report_addressbook_query.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/method/report_multiget_common.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/method/report_sync_collection.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/scheduling/processing.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/stdconfig.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/test/test_xmlutil.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/test/util.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/upgrade.py
    CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/xmlutil.py
    CalendarServer/branches/users/gaya/sharedgroups/txdav/base/datastore/subpostgres.py
    CalendarServer/branches/users/gaya/sharedgroups/txdav/base/datastore/test/test_subpostgres.py
    CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/datastore/sql.py
    CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/datastore/test/common.py
    CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/datastore/test/test_file.py
    CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/datastore/test/test_sql.py
    CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/icalendarstore.py
    CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/datastore/sql.py
    CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/datastore/test/common.py
    CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/datastore/test/test_file.py
    CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/datastore/test/test_sql.py
    CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/iaddressbookstore.py
    CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/file.py
    CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql.py
    CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_legacy.py
    CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_schema/current-oracle-dialect.sql
    CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_schema/current.sql
    CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/test/test_sql.py
    CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/test/util.py
    CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/upgrade/migrate.py
    CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/upgrade/test/test_migrate.py

Added Paths:
-----------
    CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/calendarcommonextra.py
    CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/test/test_commonextra.py
    CalendarServer/branches/users/gaya/sharedgroups/lib-patches/pycrypto/
    CalendarServer/branches/users/gaya/sharedgroups/lib-patches/pycrypto/__init__.py.patch
    CalendarServer/branches/users/gaya/sharedgroups/txdav/base/datastore/test/importFile.sql
    CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_schema/old/oracle-dialect/v11.sql
    CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_schema/old/postgres-dialect/v11.sql
    CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/test/test_sql_schema_files.py

Removed Paths:
-------------
    CalendarServer/branches/users/gaya/sharedgroups/contrib/create_caldavd_db.sh
    CalendarServer/branches/users/gaya/sharedgroups/lib-patches/pycrypto/__init__.py.patch

Property Changed:
----------------
    CalendarServer/branches/users/gaya/sharedgroups/


Property changes on: CalendarServer/branches/users/gaya/sharedgroups
___________________________________________________________________
Modified: svn:mergeinfo
   - /CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/q:9560-9688
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/wsanchez/transations:5515-5593
/CalendarServer/trunk:9885-9988
   + /CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/one-home-list-api:10048-10073
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/q:9560-9688
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/wsanchez/transations:5515-5593
/CalendarServer/trunk:9885-10076

Modified: CalendarServer/branches/users/gaya/sharedgroups/bin/calendarserver_command_gateway
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/bin/calendarserver_command_gateway	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/bin/calendarserver_command_gateway	2012-11-17 04:03:40 UTC (rev 10079)
@@ -16,8 +16,13 @@
 # limitations under the License.
 ##
 
+import os
 import sys
 
+# In OS X Server context, add to PATH to find Postgres utilities (initdb, pg_ctl)
+if "Server.app" in sys.argv[0]:
+    os.environ["PATH"] += ":" + os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), "bin")
+
 #PYTHONPATH
 
 if __name__ == "__main__":

Modified: CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tap/util.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tap/util.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tap/util.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -131,7 +131,8 @@
         maxConnections=config.Postgres.MaxConnections,
         options=config.Postgres.Options,
         uid=uid, gid=gid,
-        spawnedDBUser=config.SpawnedDBUser
+        spawnedDBUser=config.SpawnedDBUser,
+        importFileName=config.DBImportFile
     )
 
 

Modified: CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/backup_pg.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/backup_pg.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/backup_pg.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -112,7 +112,7 @@
             print e.output
         raise BackupError(
             "%s failed:\n%s (exit code = %d)" %
-            (PGDUMP, e.output, e.returncode)
+            (PSQL, e.output, e.returncode)
         )
 
 

Modified: CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/gateway.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/gateway.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/gateway.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -17,28 +17,25 @@
 ##
 
 from getopt import getopt, GetoptError
-from grp import getgrnam
-from pwd import getpwnam
 import os
 import sys
 import xml
 
 from twext.python.plistlib import readPlistFromString, writePlistToString
 
-from twisted.internet import reactor
 from twisted.internet.defer import inlineCallbacks
-from twisted.python.util import switchUID
-from twistedcaldav.config import config, ConfigurationError
 from twistedcaldav.directory.directory import DirectoryError
 from txdav.xml import element as davxml
 
-from calendarserver.tools.util import loadConfig, getDirectory, setupMemcached, checkDirectory
 from calendarserver.tools.principals import (
     principalForPrincipalID, proxySubprincipal, addProxy, removeProxy,
     getProxies, setProxies, ProxyError, ProxyWarning, updateRecord
 )
+from calendarserver.tools.purge import WorkerService, purgeOldEvents, DEFAULT_BATCH_SIZE, DEFAULT_RETAIN_DAYS
+from calendarserver.tools.cmdline import utilityMain
 
 from twext.python.log import StandardIOObserver
+from pycalendar.datetime import PyCalendarDateTime
 
 
 def usage(e=None):
@@ -60,6 +57,25 @@
         sys.exit(0)
 
 
+class RunnerService(WorkerService):
+    """
+    A wrapper around Runner which uses utilityMain to get the store
+    """
+
+    commands = None
+
+    @inlineCallbacks
+    def doWork(self):
+        """
+        Create/run a Runner to execute the commands
+        """
+        rootResource = self.rootResource()
+        directory = rootResource.getDirectory()
+        runner = Runner(rootResource, directory, self._store, self.commands)
+        if runner.validate():
+            yield runner.run( )
+
+
 def main():
 
     try:
@@ -92,40 +108,7 @@
         else:
             raise NotImplementedError(opt)
 
-    try:
-        loadConfig(configFileName)
 
-        # Create the DataRoot directory before shedding privileges
-        if config.DataRoot.startswith(config.ServerRoot + os.sep):
-            checkDirectory(
-                config.DataRoot,
-                "Data root",
-                access=os.W_OK,
-                create=(0750, config.UserName, config.GroupName),
-            )
-
-        # Shed privileges
-        if config.UserName and config.GroupName and os.getuid() == 0:
-            uid = getpwnam(config.UserName).pw_uid
-            gid = getgrnam(config.GroupName).gr_gid
-            switchUID(uid, uid, gid)
-
-        os.umask(config.umask)
-
-        # Configure memcached client settings prior to setting up resource
-        # hierarchy (in getDirectory)
-        setupMemcached(config)
-
-        try:
-            config.directory = getDirectory()
-        except DirectoryError, e:
-            respondWithError(str(e))
-            return
-
-    except ConfigurationError, e:
-        respondWithError(str(e))
-        return
-
     #
     # Read commands from stdin
     #
@@ -143,17 +126,10 @@
     else:
         commands = [plist]
 
-    runner = Runner(config.directory, commands)
-    if not runner.validate():
-        return
+    RunnerService.commands = commands
+    utilityMain(configFileName, RunnerService)
 
-    #
-    # Start the reactor
-    #
-    reactor.callLater(0, runner.run)
-    reactor.run()
 
-
 attrMap = {
     'GeneratedUID' : { 'attr' : 'guid', },
     'RealName' : { 'attr' : 'fullName', },
@@ -171,12 +147,15 @@
     'Country' : { 'extras' : True, 'attr' : 'country', },
     'Phone' : { 'extras' : True, 'attr' : 'phone', },
     'AutoSchedule' : { 'attr' : 'autoSchedule', },
+    'AutoAcceptGroup' : { 'attr' : 'autoAcceptGroup', },
 }
 
 class Runner(object):
 
-    def __init__(self, directory, commands):
+    def __init__(self, root, directory, store, commands):
+        self.root = root
         self.dir = directory
+        self.store = store
         self.commands = commands
 
     def validate(self):
@@ -207,9 +186,6 @@
             respondWithError("Command failed: '%s'" % (str(e),))
             raise
 
-        finally:
-            reactor.stop()
-
     # Locations
 
     def command_getLocationList(self, command):
@@ -217,7 +193,6 @@
 
     @inlineCallbacks
     def command_createLocation(self, command):
-
         kwargs = {}
         for key, info in attrMap.iteritems():
             if command.has_key(key):
@@ -232,7 +207,7 @@
         readProxies = command.get("ReadProxies", None)
         writeProxies = command.get("WriteProxies", None)
         principal = principalForPrincipalID(record.guid, directory=self.dir)
-        (yield setProxies(principal, readProxies, writeProxies))
+        (yield setProxies(principal, readProxies, writeProxies, directory=self.dir))
 
         respondWithRecordsOfType(self.dir, command, "locations")
 
@@ -249,7 +224,9 @@
             respondWithError("Principal not found: %s" % (guid,))
             return
         recordDict['AutoSchedule'] = principal.getAutoSchedule()
-        recordDict['ReadProxies'], recordDict['WriteProxies'] = (yield getProxies(principal))
+        recordDict['AutoAcceptGroup'] = principal.getAutoAcceptGroup()
+        recordDict['ReadProxies'], recordDict['WriteProxies'] = (yield getProxies(principal,
+            directory=self.dir))
         respond(command, recordDict)
 
     command_getResourceAttributes = command_getLocationAttributes
@@ -262,6 +239,7 @@
         principal = principalForPrincipalID(command['GeneratedUID'],
             directory=self.dir)
         (yield principal.setAutoSchedule(command.get('AutoSchedule', False)))
+        (yield principal.setAutoAcceptGroup(command.get('AutoAcceptGroup', "")))
 
         kwargs = {}
         for key, info in attrMap.iteritems():
@@ -276,7 +254,7 @@
         readProxies = command.get("ReadProxies", None)
         writeProxies = command.get("WriteProxies", None)
         principal = principalForPrincipalID(record.guid, directory=self.dir)
-        (yield setProxies(principal, readProxies, writeProxies))
+        (yield setProxies(principal, readProxies, writeProxies, directory=self.dir))
 
         yield self.command_getLocationAttributes(command)
 
@@ -313,7 +291,7 @@
         readProxies = command.get("ReadProxies", None)
         writeProxies = command.get("WriteProxies", None)
         principal = principalForPrincipalID(record.guid, directory=self.dir)
-        (yield setProxies(principal, readProxies, writeProxies))
+        (yield setProxies(principal, readProxies, writeProxies, directory=self.dir))
 
         respondWithRecordsOfType(self.dir, command, "resources")
 
@@ -325,6 +303,7 @@
         principal = principalForPrincipalID(command['GeneratedUID'],
             directory=self.dir)
         (yield principal.setAutoSchedule(command.get('AutoSchedule', False)))
+        (yield principal.setAutoAcceptGroup(command.get('AutoAcceptGroup', "")))
 
         kwargs = {}
         for key, info in attrMap.iteritems():
@@ -339,7 +318,7 @@
         readProxies = command.get("ReadProxies", None)
         writeProxies = command.get("WriteProxies", None)
         principal = principalForPrincipalID(record.guid, directory=self.dir)
-        (yield setProxies(principal, readProxies, writeProxies))
+        (yield setProxies(principal, readProxies, writeProxies, directory=self.dir))
 
         yield self.command_getResourceAttributes(command)
 
@@ -452,6 +431,23 @@
         (yield respondWithProxies(self.dir, command, principal, "read"))
 
 
+    @inlineCallbacks
+    def command_purgeOldEvents(self, command):
+        """
+        Convert RetainDays from the command dictionary into a date, then purge
+        events older than that date.
+
+        @param command: the dictionary parsed from the plist read from stdin
+        @type command: C{dict}
+        """
+        retainDays = command.get("RetainDays", DEFAULT_RETAIN_DAYS)
+        cutoff = PyCalendarDateTime.getToday()
+        cutoff.setDateOnly(False)
+        cutoff.offsetDay(-retainDays)
+        eventCount = (yield purgeOldEvents(self.store, self.dir, self.root, cutoff, DEFAULT_BATCH_SIZE))
+        respond(command, {'EventsRemoved' : eventCount, "RetainDays" : retainDays})
+
+
 @inlineCallbacks
 def respondWithProxies(directory, command, principal, proxyType):
     proxies = []
@@ -460,7 +456,7 @@
         membersProperty = (yield subPrincipal.readProperty(davxml.GroupMemberSet, None))
         if membersProperty.children:
             for member in membersProperty.children:
-                proxyPrincipal = principalForPrincipalID(str(member))
+                proxyPrincipal = principalForPrincipalID(str(member), directory=directory)
                 proxies.append(proxyPrincipal.record.guid)
 
     respond(command, {

Modified: CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/principals.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/principals.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/principals.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -88,6 +88,8 @@
     print "  --get-auto-schedule: read auto-schedule state"
     print "  --set-auto-schedule-mode={default|none|accept-always|decline-always|accept-if-free|decline-if-busy|automatic}: set auto-schedule mode"
     print "  --get-auto-schedule-mode: read auto-schedule mode"
+    print "  --set-auto-accept-group=principal: set auto-accept-group"
+    print "  --get-auto-accept-group: read auto-accept-group"
     print "  --add {locations|resources} 'full name' [record name] [GUID]: add a principal"
     print "  --remove: remove a principal"
 
@@ -118,6 +120,8 @@
                 "get-auto-schedule",
                 "set-auto-schedule-mode=",
                 "get-auto-schedule-mode",
+                "set-auto-accept-group=",
+                "get-auto-accept-group",
                 "verbose",
             ],
         )
@@ -223,6 +227,18 @@
         elif opt in ("", "--get-auto-schedule-mode"):
             principalActions.append((action_getAutoScheduleMode,))
 
+        elif opt in ("", "--set-auto-accept-group"):
+            try:
+                principalForPrincipalID(arg, checkOnly=True)
+            except ValueError, e:
+                abort(e)
+
+            principalActions.append((action_setAutoAcceptGroup, arg))
+
+        elif opt in ("", "--get-auto-accept-group"):
+            principalActions.append((action_getAutoAcceptGroup,))
+
+
         else:
             raise NotImplementedError(opt)
 
@@ -768,7 +784,50 @@
         autoScheduleMode,
     )
 
+ at inlineCallbacks
+def action_setAutoAcceptGroup(principal, autoAcceptGroup):
+    if principal.record.recordType == "groups":
+        print "Setting auto-accept-group for %s is not allowed." % (principal,)
 
+    elif principal.record.recordType == "users" and not config.Scheduling.Options.AutoSchedule.AllowUsers:
+        print "Setting auto-accept-group for %s is not allowed." % (principal,)
+
+    else:
+        groupPrincipal = principalForPrincipalID(autoAcceptGroup)
+        if groupPrincipal is None or groupPrincipal.record.recordType != "groups":
+            print "Invalid principal ID: %s" % (autoAcceptGroup,)
+        else:
+            print "Setting auto-accept-group to %s for %s" % (
+                prettyPrincipal(groupPrincipal),
+                prettyPrincipal(principal),
+            )
+
+            (yield updateRecord(False, config.directory,
+                principal.record.recordType,
+                guid=principal.record.guid,
+                shortNames=principal.record.shortNames,
+                fullName=principal.record.fullName,
+                autoAcceptGroup=groupPrincipal.record.guid,
+                **principal.record.extras
+            ))
+
+def action_getAutoAcceptGroup(principal):
+    autoAcceptGroup = principal.getAutoAcceptGroup()
+    if autoAcceptGroup:
+        record = config.directory.recordWithGUID(autoAcceptGroup)
+        if record is not None:
+            groupPrincipal = config.directory.principalCollection.principalForUID(record.uid)
+            if groupPrincipal is not None:
+                print "Auto-accept-group for %s is %s" % (
+                    prettyPrincipal(principal),
+                    prettyPrincipal(groupPrincipal),
+                )
+                return
+        print "Invalid auto-accept-group assigned: %s" % (autoAcceptGroup,)
+    else:
+        print "No auto-accept-group assigned to %s" % (prettyPrincipal(principal),)
+
+
 def abort(msg, status=1):
     sys.stdout.write("%s\n" % (msg,))
     try:
@@ -856,18 +915,33 @@
     matching the guid in kwargs.
     """
 
+    assignAutoSchedule = False
     if kwargs.has_key("autoSchedule"):
+        assignAutoSchedule = True
         autoSchedule = kwargs["autoSchedule"]
         del kwargs["autoSchedule"]
-    else:
+    elif create:
+        assignAutoSchedule = True
         autoSchedule = recordType in ("locations", "resources")
 
+    assignAutoScheduleMode = False
     if kwargs.has_key("autoScheduleMode"):
+        assignAutoScheduleMode = True
         autoScheduleMode = kwargs["autoScheduleMode"]
         del kwargs["autoScheduleMode"]
-    else:
+    elif create:
+        assignAutoScheduleMode = True
         autoScheduleMode = None
 
+    assignAutoAcceptGroup = False
+    if kwargs.has_key("autoAcceptGroup"):
+        assignAutoAcceptGroup = True
+        autoAcceptGroup = kwargs["autoAcceptGroup"]
+        del kwargs["autoAcceptGroup"]
+    elif create:
+        assignAutoAcceptGroup = True
+        autoAcceptGroup = None
+
     for key, value in kwargs.items():
         if isinstance(value, unicode):
             kwargs[key] = value.encode("utf-8")
@@ -890,8 +964,13 @@
 
     augmentService = directory.serviceForRecordType(recordType).augmentService
     augmentRecord = (yield augmentService.getAugmentRecord(kwargs['guid'], recordType))
-    augmentRecord.autoSchedule = autoSchedule
-    augmentRecord.autoScheduleMode = autoScheduleMode
+
+    if assignAutoSchedule:
+        augmentRecord.autoSchedule = autoSchedule
+    if assignAutoScheduleMode:
+        augmentRecord.autoScheduleMode = autoScheduleMode
+    if assignAutoAcceptGroup:
+        augmentRecord.autoAcceptGroup = autoAcceptGroup
     (yield augmentService.addAugmentRecords([augmentRecord]))
     try:
         directory.updateRecord(recordType, **kwargs)

Modified: CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/shell/cmd.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/shell/cmd.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/shell/cmd.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -67,6 +67,12 @@
 
 
 class CommandsBase(object):
+    """
+    Base class for commands.
+
+    @ivar protocol: a protocol for parsing the incoming command line.
+    @type protocol: L{calendarserver.tools.shell.terminal.ShellProtocol}
+    """
     def __init__(self, protocol):
         self.protocol = protocol
 

Modified: CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/shell/terminal.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/shell/terminal.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/shell/terminal.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -89,7 +89,28 @@
         super(ShellOptions, self).__init__()
 
 
+
 class ShellService(Service, object):
+    """
+    A L{ShellService} collects all the information that a shell needs to run;
+    when run, it invokes the shell on stdin/stdout.
+
+    @ivar store: the calendar / addressbook store.
+    @type store: L{txdav.idav.IDataStore}
+
+    @ivar directory: the directory service, to look up principals' names
+    @type directory: L{twistedcaldav.directory.idirectory.IDirectoryService}
+
+    @ivar options: the command-line options used to create this shell service
+    @type options: L{ShellOptions}
+
+    @ivar reactor: the reactor under which this service is running
+    @type reactor: L{IReactorTCP}, L{IReactorTime}, L{IReactorThreads} etc
+
+    @ivar config: the configuration associated with this shell service.
+    @type config: L{twistedcaldav.config.Config}
+    """
+
     def __init__(self, store, directory, options, reactor, config):
         super(ShellService, self).__init__()
         self.store      = store
@@ -100,6 +121,7 @@
         self.terminalFD = None
         self.protocol   = None
 
+
     def startService(self):
         """
         Start the service.
@@ -114,6 +136,7 @@
         self.protocol = ServerProtocol(lambda: ShellProtocol(self))
         StandardIO(self.protocol)
 
+
     def stopService(self):
         """
         Stop the service.
@@ -123,9 +146,13 @@
         os.write(self.terminalFD, "\r\x1bc\r")
 
 
+
 class ShellProtocol(ReceiveLineProtocol):
     """
     Data store shell protocol.
+
+    @ivar service: a service representing the running shell
+    @type service: L{ShellService}
     """
 
     # FIXME:

Modified: CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/shell/test/test_vfs.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/shell/test/test_vfs.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/shell/test/test_vfs.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -15,14 +15,18 @@
 # limitations under the License.
 ##
 
-import twisted.trial.unittest 
-from twisted.internet.defer import succeed
+from twisted.trial.unittest import TestCase
+from twisted.internet.defer import succeed, inlineCallbacks
 
 from calendarserver.tools.shell.vfs import ListEntry
 from calendarserver.tools.shell.vfs import File, Folder
+from calendarserver.tools.shell.vfs import UIDsFolder
+from calendarserver.tools.shell.terminal import ShellService
+from twistedcaldav.directory.test.test_xmlfile import XMLFileBase
+from txdav.common.datastore.test.util import buildStore
 
 
-class TestListEntry(twisted.trial.unittest.TestCase):
+class TestListEntry(TestCase):
     def test_toString(self):
         self.assertEquals(ListEntry(None, File  , "thingo"           ).toString(), "thingo" )
         self.assertEquals(ListEntry(None, File  , "thingo", Foo="foo").toString(), "thingo" )
@@ -100,3 +104,58 @@
             def list(self): return succeed(())
             list.fieldNames = ()
         self.assertEquals(fields(MyFile), ("thingo",))
+
+
+
+class DirectoryStubber(XMLFileBase):
+    """
+    Object which creates a stub L{IDirectoryService}.
+    """
+    def __init__(self, testCase):
+        self.testCase = testCase
+
+    def mktemp(self):
+        return self.testCase.mktemp()
+
+
+
+class UIDsFolderTests(TestCase):
+    """
+    L{UIDsFolder} contains all principals and is keyed by UID.
+    """
+
+    @inlineCallbacks
+    def setUp(self):
+        """
+        Create a L{UIDsFolder}.
+        """
+        self.svc = ShellService(store=(yield buildStore(self, None)),
+                                directory=DirectoryStubber(self).service(),
+                                options=None, reactor=None, config=None)
+        self.folder = UIDsFolder(self.svc, ())
+
+
+    @inlineCallbacks
+    def test_list(self):
+        """
+        L{UIDsFolder.list} returns a L{Deferred} firing an iterable of
+        L{ListEntry} objects, reflecting the directory information for all
+        calendars and addressbooks created in the store.
+        """
+        txn = self.svc.store.newTransaction()
+        wsanchez = "6423F94A-6B76-4A3A-815B-D52CFD77935D"
+        dreid = "5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1"
+        yield txn.calendarHomeWithUID(wsanchez, create=True)
+        yield txn.addressbookHomeWithUID(dreid, create=True)
+        yield txn.commit()
+        listing = list((yield self.folder.list()))
+        self.assertEquals(
+            [x.fields for x in listing],
+            [{"Record Type": "users", "Short Name": "wsanchez",
+              "Full Name": "Wilfredo Sanchez", "Name": wsanchez},
+              {"Record Type": "users", "Short Name": "dreid",
+              "Full Name": "David Reid", "Name": dreid}]
+        )
+
+
+

Modified: CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/shell/vfs.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/shell/vfs.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/shell/vfs.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -1,3 +1,4 @@
+# -*- test-case-name: calendarserver.tools.shell.test.test_vfs -*-
 ##
 # Copyright (c) 2011-2012 Apple Inc. All rights reserved.
 #
@@ -56,6 +57,7 @@
     """
     Information about a C{File} as returned by C{File.list()}.
     """
+
     def __init__(self, parent, Class, Name, **fields):
         self.parent    = parent # The class implementing list()
         self.fileClass = Class
@@ -64,9 +66,11 @@
 
         fields["Name"] = Name
 
+
     def __str__(self):
         return self.toString()
 
+
     def __repr__(self):
         fields = self.fields.copy()
         del fields["Name"]
@@ -83,15 +87,18 @@
             fields,
         )
 
+
     def isFolder(self):
         return issubclass(self.fileClass, Folder)
 
+
     def toString(self):
         if self.isFolder():
             return "%s/" % (self.fileName,)
         else:
             return self.fileName
 
+
     @property
     def fieldNames(self):
         if not hasattr(self, "_fieldNames"):
@@ -101,10 +108,12 @@
                 else:
                     self._fieldNames = ("Name",) + tuple(self.parent.list.fieldNames)
             else:
-                self._fieldNames = ["Name"] + sorted(n for n in self.fields if n != "Name")
+                self._fieldNames = ["Name"] + sorted(n for n in self.fields
+                                                     if n != "Name")
 
         return self._fieldNames
 
+
     def toFields(self):
         try:
             return tuple(self.fields[fieldName] for fieldName in self.fieldNames)
@@ -115,6 +124,7 @@
             )
 
 
+
 class File(object):
     """
     Object in virtual data hierarchy.
@@ -217,7 +227,8 @@
     """
     Root of virtual data hierarchy.
 
-    Hierarchy:
+    Hierarchy::
+
       /                    RootFolder
         uids/              UIDsFolder
           <uid>/           PrincipalHomeFolder
@@ -262,9 +273,8 @@
         # FIXME: Merge in directory UIDs also?
         # FIXME: Add directory info (eg. name) to list entry
 
-        def addResult(uid):
-            if uid in results:
-                return
+        def addResult(ignoredTxn, home):
+            uid = home.uid()
 
             record = self.service.directory.recordWithUID(uid)
             if record:
@@ -277,22 +287,12 @@
                 info = {}
 
             results[uid] = ListEntry(self, PrincipalHomeFolder, uid, **info)
-
-        txn = self.service.store.newTransaction()
-        try:
-            for home in (yield txn.calendarHomes()):
-                addResult(home.uid())
-            for home in (yield txn.addressbookHomes()):
-                addResult(home.uid())
-        finally:
-            (yield txn.abort())
-
+        yield self.service.store.withEachCalendarHomeDo(addResult)
+        yield self.service.store.withEachAddressbookHomeDo(addResult)
         returnValue(results.itervalues())
 
-        list.fieldNames = ("Record Name", "Short Name", "Full Name")
 
 
-
 class RecordFolder(Folder):
     def _recordForName(self, name):
         recordTypeAttr = "recordType_" + self.recordType

Modified: CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/test/gateway/caldavd.plist
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/test/gateway/caldavd.plist	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/test/gateway/caldavd.plist	2012-11-17 04:03:40 UTC (rev 10079)
@@ -91,11 +91,11 @@
 
     <!-- Log root -->
     <key>LogRoot</key>
-    <string>/var/log/caldavd</string>
+    <string>Logs</string>
 
     <!-- Run root -->
     <key>RunRoot</key>
-    <string>/var/run</string>
+    <string>Logs/state</string>
 
     <!-- Child aliases -->
     <key>Aliases</key>
@@ -279,7 +279,7 @@
      -->
 
 	<key>ProxyLoadFromFile</key>
-    <string>conf/auth/proxies-test.xml</string>
+    <string></string>
 
     <!--
         Special principals

Modified: CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/test/gateway/users-groups.xml
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/test/gateway/users-groups.xml	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/test/gateway/users-groups.xml	2012-11-17 04:03:40 UTC (rev 10079)
@@ -37,4 +37,13 @@
       <member type="users">user02</member>
     </members>
   </group>
+  <group>
+    <uid>testgroup2</uid>
+    <guid>f5a6142c-4189-4e9e-90b0-9cd0268b314b</guid>
+    <password>test</password>
+    <name>Group 02</name>
+    <members>
+      <member type="users">user01</member>
+    </members>
+  </group>
 </accounts>

Modified: CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/test/test_gateway.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/test/test_gateway.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/calendarserver/tools/test/test_gateway.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -121,6 +121,7 @@
         self.assertEquals(results["result"]["RealName"], "Created Location 01 %s" % unichr(208))
         self.assertEquals(results["result"]["Comment"], "Test Comment")
         self.assertEquals(results["result"]["AutoSchedule"], True)
+        self.assertEquals(results["result"]["AutoAcceptGroup"], "E5A6142C-4189-4E9E-90B0-9CD0268B314B")
         self.assertEquals(set(results["result"]["ReadProxies"]), set(['user03', 'user04']))
         self.assertEquals(set(results["result"]["WriteProxies"]), set(['user05', 'user06']))
 
@@ -202,9 +203,11 @@
         self.assertEquals(record.extras["country"], "Updated USA")
         self.assertEquals(record.extras["phone"], "(408) 555-1213")
         self.assertEquals(record.autoSchedule, True)
+        self.assertEquals(record.autoAcceptGroup, "F5A6142C-4189-4E9E-90B0-9CD0268B314B")
 
         results = yield self.runCommand(command_getLocationAttributes)
         self.assertEquals(results["result"]["AutoSchedule"], True)
+        self.assertEquals(results["result"]["AutoAcceptGroup"], "F5A6142C-4189-4E9E-90B0-9CD0268B314B")
         self.assertEquals(set(results["result"]["ReadProxies"]), set(['user03']))
         self.assertEquals(set(results["result"]["WriteProxies"]), set(['user05', 'user06', 'user07']))
 
@@ -274,6 +277,13 @@
         results = yield self.runCommand(command_removeWriteProxy)
         self.assertEquals(len(results["result"]["Proxies"]), 0)
 
+    @inlineCallbacks
+    def test_purgeOldEvents(self):
+        results = yield self.runCommand(command_purgeOldEvents)
+        self.assertEquals(results["result"]["EventsRemoved"], 0)
+        self.assertEquals(results["result"]["RetainDays"], 42)
+        results = yield self.runCommand(command_purgeOldEventsNoDays)
+        self.assertEquals(results["result"]["RetainDays"], 365)
 
 
 command_addReadProxy = """<?xml version="1.0" encoding="UTF-8"?>
@@ -312,6 +322,8 @@
         <string>createLocation</string>
         <key>AutoSchedule</key>
         <true/>
+        <key>AutoAcceptGroup</key>
+        <string>E5A6142C-4189-4E9E-90B0-9CD0268B314B</string>
         <key>GeneratedUID</key>
         <string>836B1B66-2E9A-4F46-8B1C-3DD6772C20B2</string>
         <key>RealName</key>
@@ -495,6 +507,8 @@
         <string>setLocationAttributes</string>
         <key>AutoSchedule</key>
         <true/>
+        <key>AutoAcceptGroup</key>
+        <string>F5A6142C-4189-4E9E-90B0-9CD0268B314B</string>
         <key>GeneratedUID</key>
         <string>836B1B66-2E9A-4F46-8B1C-3DD6772C20B2</string>
         <key>RealName</key>
@@ -582,3 +596,25 @@
 </dict>
 </plist>
 """
+
+command_purgeOldEvents = """<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+        <key>command</key>
+        <string>purgeOldEvents</string>
+        <key>RetainDays</key>
+        <integer>42</integer>
+</dict>
+</plist>
+"""
+
+command_purgeOldEventsNoDays = """<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+        <key>command</key>
+        <string>purgeOldEvents</string>
+</dict>
+</plist>
+"""

Modified: CalendarServer/branches/users/gaya/sharedgroups/conf/caldavd-apple.plist
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/conf/caldavd-apple.plist	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/conf/caldavd-apple.plist	2012-11-17 04:03:40 UTC (rev 10079)
@@ -96,9 +96,11 @@
 
     <!-- Database connection -->
     <key>DBType</key>
-    <string>postgres</string>
+    <string></string>
     <key>DSN</key>
-    <string>/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::</string>
+    <string></string>
+    <key>DBImportFile</key>
+    <string>/Library/Server/Calendar and Contacts/DataDump.sql</string>
 
     <!-- Data root -->
     <key>DataRoot</key>

Deleted: CalendarServer/branches/users/gaya/sharedgroups/contrib/create_caldavd_db.sh
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/contrib/create_caldavd_db.sh	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/contrib/create_caldavd_db.sh	2012-11-17 04:03:40 UTC (rev 10079)
@@ -1,5 +0,0 @@
-#!/usr/bin/env bash
-
-/Applications/Server.app/Contents/ServerRoot/usr/sbin/calendarserver_bootstrap_database
-
-exit 0

Copied: CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/calendarcommonextra.py (from rev 10076, CalendarServer/trunk/contrib/migration/calendarcommonextra.py)
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/calendarcommonextra.py	                        (rev 0)
+++ CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/calendarcommonextra.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -0,0 +1,185 @@
+#!/usr/bin/env python
+#
+# CommonExtra script for calendar server.
+#
+# Copyright (c) 2012 Apple Inc.  All Rights Reserved.
+#
+# IMPORTANT NOTE:  This file is licensed only for use on Apple-labeled
+# computers and is subject to the terms and conditions of the Apple
+# Software License Agreement accompanying the package this file is a
+# part of.  You may not port this file to another platform without
+# Apple's written consent.
+
+import datetime
+import subprocess
+from plistlib import readPlist, writePlist
+
+LOG = "/Library/Logs/Migration/calendarmigrator.log"
+SERVER_APP_ROOT = "/Applications/Server.app/Contents/ServerRoot"
+CALENDAR_SERVER_ROOT = "/Library/Server/Calendar and Contacts"
+CALDAVD_PLIST = "%s/Config/caldavd.plist" % (CALENDAR_SERVER_ROOT,)
+SERVER_ADMIN = "%s/usr/sbin/serveradmin" % (SERVER_APP_ROOT,)
+CERT_ADMIN = "/Applications/Server.app/Contents/ServerRoot/usr/sbin/certadmin"
+PGDUMP = "%s/usr/bin/pg_dump" % (SERVER_APP_ROOT,)
+DROPDB = "%s/usr/bin/dropdb" % (SERVER_APP_ROOT,)
+POSTGRES_SERVICE_NAME = "postgres_server"
+PGSOCKETDIR = "/Library/Server/PostgreSQL For Server Services/Socket"
+USERNAME      = "caldav"
+DATABASENAME  = "caldav"
+DATADUMPFILENAME = "%s/DataDump.sql" % (CALENDAR_SERVER_ROOT,)
+
+def log(msg):
+    try:
+        timestamp = datetime.datetime.now().strftime("%b %d %H:%M:%S")
+        msg = "calendarcommonextra: %s %s" % (timestamp, msg)
+        print msg # so it appears in Setup.log
+        with open(LOG, 'a') as output:
+            output.write("%s\n" % (msg,)) # so it appears in our log
+    except IOError:
+        # Could not write to log
+        pass
+
+
+def startPostgres():
+    """
+    Start postgres via serveradmin
+
+    This will block until postgres is up and running
+    """
+    log("Starting %s via %s" % (POSTGRES_SERVICE_NAME, SERVER_ADMIN))
+    ret = subprocess.call([SERVER_ADMIN, "start", POSTGRES_SERVICE_NAME])
+    log("serveradmin exited with %d" % (ret,))
+
+def stopPostgres():
+    """
+    Stop postgres via serveradmin
+    """
+    log("Stopping %s via %s" % (POSTGRES_SERVICE_NAME, SERVER_ADMIN))
+    ret = subprocess.call([SERVER_ADMIN, "stop", POSTGRES_SERVICE_NAME])
+    log("serveradmin exited with %d" % (ret,))
+
+
+def dumpOldDatabase(dumpFile):
+    """
+    Use pg_dump to dump data to dumpFile
+    """
+
+    cmdArgs = [
+        PGDUMP,
+        "-h", PGSOCKETDIR,
+        "--username=%s" % (USERNAME,),
+        "--inserts",
+        "--no-privileges",
+        "--file=%s" % (dumpFile,),
+        DATABASENAME
+    ]
+    try:
+        log("Dumping data to %s" % (dumpFile,))
+        log("Executing: %s" % (" ".join(cmdArgs)))
+        out = subprocess.check_output(cmdArgs, stderr=subprocess.STDOUT)
+        log(out)
+        return True
+    except subprocess.CalledProcessError, e:
+        log(e.output)
+        return False
+
+
+def dropOldDatabase():
+    """
+    Use dropdb to delete the caldav database from the shared postgres server
+    """
+
+    cmdArgs = [
+        DROPDB,
+        "-h", PGSOCKETDIR,
+        "--username=%s" % (USERNAME,),
+        DATABASENAME
+    ]
+    try:
+        log("\nDropping %s database" % (DATABASENAME,))
+        log("Executing: %s" % (" ".join(cmdArgs)))
+        out = subprocess.check_output(cmdArgs, stderr=subprocess.STDOUT)
+        log(out)
+        return True
+    except subprocess.CalledProcessError, e:
+        log(e.output)
+        return False
+
+
+def getDefaultCert():
+    """
+    Ask certadmin for default cert
+    @returns: path to default certificate, or empty string if no default
+    @rtype: C{str}
+    """
+    child = subprocess.Popen(
+        args=[CERT_ADMIN, "--default-certificate-path"],
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+    )
+    output, error = child.communicate()
+    if child.returncode:
+        log("Error looking up default certificate (%d): %s" % (child.returncode, error))
+        return ""
+    else:
+        certPath = output.strip()
+        log("Default certificate is: %s" % (certPath,))
+        return certPath
+
+def updateSettings(settings, otherCert):
+    """
+    Replace SSL settings based on otherCert path
+    """
+    basePath = otherCert[:-len("cert.pem")]
+    log("Base path is %s" % (basePath,))
+
+    log("Setting SSLCertificate to %s" % (otherCert,))
+    settings["SSLCertificate"] = otherCert
+
+    otherChain = basePath + "chain.pem"
+    log("Setting SSLAuthorityChain to %s" % (otherChain,))
+    settings["SSLAuthorityChain"] = otherChain
+
+    otherKey = basePath + "key.pem"
+    log("Setting SSLPrivateKey to %s" % (otherKey,))
+    settings["SSLPrivateKey"] = otherKey
+
+    settings["EnableSSL"] = True
+    settings["RedirectHTTPToHTTPS"] = True
+
+def setCert(plistPath, otherCert):
+    """
+    Replace SSL settings in plist at plistPath based on otherCert path
+    """
+    log("Reading plist %s" % (plistPath,))
+    plist = readPlist(plistPath)
+    log("Read in plist %s" % (plistPath,))
+
+    updateSettings(plist, otherCert)
+
+    log("Writing plist %s" % (plistPath,))
+    writePlist(plist, plistPath)
+
+def isSSLEnabled(plistPath):
+    """
+    Examine plist for EnableSSL
+    """
+    log("Reading plist %s" % (plistPath,))
+    plist = readPlist(plistPath)
+    return plist.get("EnableSSL", False)
+
+def main():
+    startPostgres()
+    if dumpOldDatabase(DATADUMPFILENAME):
+        dropOldDatabase()
+    stopPostgres()
+
+    if not isSSLEnabled(CALDAVD_PLIST):
+        defaultCertPath = getDefaultCert()
+        log("Default cert path: %s" % (defaultCertPath,))
+        if defaultCertPath:
+            setCert(CALDAVD_PLIST, defaultCertPath)
+
+
+if __name__ == "__main__":
+    main()

Modified: CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/calendarmigrator.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/calendarmigrator.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/calendarmigrator.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -169,8 +169,13 @@
             # Trigger migration of locations and resources from OD
             triggerResourceMigration(newServerRoot)
 
-            setRunState(options, enableCalDAV, enableCardDAV)
+            # TODO: instead of starting now, leave breadcrumbs for
+            # the commonextra to start the service, so that data can
+            # be dumped from the old Postgres to a file which will
+            # be executed by calendar server when it next starts up.
 
+            # setRunState(options, enableCalDAV, enableCardDAV)
+
     else:
         log("ERROR: --sourceRoot and --sourceVersion must be specified")
         sys.exit(1)
@@ -479,9 +484,15 @@
     # If SSL is enabled, redirect HTTP to HTTPS.
     combined["RedirectHTTPToHTTPS"] = enableSSL
 
-    # New DSN value for server-specific Postgres
-    combined["DSN"] = "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::"
+    # New DBType value indicating we launch our own Postgres
+    combined["DBType"] = ""
 
+    # No DSN value since we launch our own Postgres
+    combined["DSN"] = ""
+
+    # Path to SQL file to import previous data from
+    combined["DBImportFile"] = "/Library/Server/Calendar and Contacts/DataDump.sql"
+
     # ConfigRoot is now always "Config"
     combined["ConfigRoot"] = "Config"
 

Modified: CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/calendarpromotion.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/calendarpromotion.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/calendarpromotion.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -27,8 +27,9 @@
 
 def updatePlist(plistData):
     """
-    Update the passed-in plist data with new values for disabling the XMPPNotifier, and
-    to set the DSN to use the server-specific Postgres.
+    Update the passed-in plist data with new values for disabling the XMPPNotifier,
+    to set DBType to empty string indicating we'll be starting our own Postgres server,
+    and to specify the new location for ConfigRoot ("Config" directory beneath ServerRoot).
 
     @param plistData: the plist data to update in place
     @type plistData: C{dict}
@@ -38,9 +39,13 @@
             plistData["Notifications"]["Services"]["XMPPNotifier"]["Enabled"] = False
     except KeyError:
         pass
-    plistData["DSN"] = "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::"
+    plistData["DBType"] = ""
+    plistData["DSN"] = ""
+    plistData["ConfigRoot"] = "Config"
+    plistData["DBImportFile"] = "/Library/Server/Calendar and Contacts/DataDump.sql"
 
 
+
 def main():
 
     try:

Copied: CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/test/test_commonextra.py (from rev 10076, CalendarServer/trunk/contrib/migration/test/test_commonextra.py)
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/test/test_commonextra.py	                        (rev 0)
+++ CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/test/test_commonextra.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -0,0 +1,44 @@
+##
+# Copyright (c) 2012 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+import twistedcaldav.test.util
+from contrib.migration.calendarcommonextra import updateSettings
+
+class CommonExtraTests(twistedcaldav.test.util.TestCase):
+    """
+    Calendar Server CommonExtra Tests
+    """
+
+    def test_updateSettings(self):
+        """
+        Verify SSL values are updated
+        """
+
+        # suppress prints
+        from contrib.migration import calendarcommonextra
+        self.patch(calendarcommonextra, "log", lambda x : x)
+
+        orig = {
+        }
+        expected = {
+            'EnableSSL': True,
+            'RedirectHTTPToHTTPS': True,
+            'SSLAuthorityChain': '/test/pchain.pem',
+            'SSLCertificate': '/test/path.cert',
+            'SSLPrivateKey': '/test/pkey.pem',
+        }
+        updateSettings(orig, "/test/path.cert")
+        self.assertEquals(orig, expected)

Modified: CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/test/test_migrator.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/test/test_migrator.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/test/test_migrator.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -90,7 +90,9 @@
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
             "ConfigRoot" : "Config",
-            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
+            "DSN" : "",
+            "DBType" : "",
+            "DBImportFile" : "/Library/Server/Calendar and Contacts/DataDump.sql",
             "EnableSSL" : True,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": True,
@@ -129,7 +131,9 @@
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
             "ConfigRoot" : "Config",
-            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
+            "DSN" : "",
+            "DBType" : "",
+            "DBImportFile" : "/Library/Server/Calendar and Contacts/DataDump.sql",
             "EnableSSL" : False,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": False,
@@ -168,7 +172,9 @@
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
             "ConfigRoot" : "Config",
-            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
+            "DSN" : "",
+            "DBType" : "",
+            "DBImportFile" : "/Library/Server/Calendar and Contacts/DataDump.sql",
             "EnableSSL" : True,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": True,
@@ -207,7 +213,9 @@
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
             "ConfigRoot" : "Config",
-            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
+            "DSN" : "",
+            "DBType" : "",
+            "DBImportFile" : "/Library/Server/Calendar and Contacts/DataDump.sql",
             "EnableSSL" : True,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": True,
@@ -246,7 +254,9 @@
             "BindHTTPPorts": [1111, 2222, 4444, 5555, 7777, 8888],
             "BindSSLPorts": [3333, 6666, 9999, 11111],
             "ConfigRoot" : "Config",
-            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
+            "DSN" : "",
+            "DBType" : "",
+            "DBImportFile" : "/Library/Server/Calendar and Contacts/DataDump.sql",
             "EnableSSL" : True,
             "HTTPPort": 8888,
             "RedirectHTTPToHTTPS": True,
@@ -282,7 +292,9 @@
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
             "ConfigRoot" : "Config",
-            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
+            "DSN" : "",
+            "DBType" : "",
+            "DBImportFile" : "/Library/Server/Calendar and Contacts/DataDump.sql",
             "EnableSSL" : False,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": False,
@@ -313,7 +325,9 @@
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
             "ConfigRoot" : "Config",
-            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
+            "DSN" : "",
+            "DBType" : "",
+            "DBImportFile" : "/Library/Server/Calendar and Contacts/DataDump.sql",
             "EnableSSL" : True,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": True,
@@ -335,7 +349,9 @@
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
             "ConfigRoot" : "Config",
-            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
+            "DSN" : "",
+            "DBType" : "",
+            "DBImportFile" : "/Library/Server/Calendar and Contacts/DataDump.sql",
             "EnableSSL" : False,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": False,
@@ -383,7 +399,9 @@
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
             "ConfigRoot" : "Config",
-            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
+            "DSN" : "",
+            "DBType" : "",
+            "DBImportFile" : "/Library/Server/Calendar and Contacts/DataDump.sql",
             "EnableSSL" : False,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": False,
@@ -423,7 +441,9 @@
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
             "ConfigRoot" : "Config",
-            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
+            "DSN" : "",
+            "DBType" : "",
+            "DBImportFile" : "/Library/Server/Calendar and Contacts/DataDump.sql",
             "EnableSSL" : False,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": False,
@@ -476,7 +496,9 @@
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
             "ConfigRoot" : "Config",
-            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
+            "DSN" : "",
+            "DBType" : "",
+            "DBImportFile" : "/Library/Server/Calendar and Contacts/DataDump.sql",
             "EnableSSL" : False,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": False,
@@ -518,7 +540,9 @@
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
             "ConfigRoot" : "Config",
-            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
+            "DSN" : "",
+            "DBType" : "",
+            "DBImportFile" : "/Library/Server/Calendar and Contacts/DataDump.sql",
             "EnableSSL" : False,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": False,
@@ -560,7 +584,9 @@
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
             "ConfigRoot" : "Config",
-            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
+            "DSN" : "",
+            "DBType" : "",
+            "DBImportFile" : "/Library/Server/Calendar and Contacts/DataDump.sql",
             "EnableSSL" : False,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": False,
@@ -596,7 +622,9 @@
             "BindHTTPPorts": [8008, 8800],
             "BindSSLPorts": [8443, 8843],
             "ConfigRoot" : "Config",
-            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
+            "DSN" : "",
+            "DBType" : "",
+            "DBImportFile" : "/Library/Server/Calendar and Contacts/DataDump.sql",
             "EnableSSL" : False,
             "HTTPPort": 8008,
             "RedirectHTTPToHTTPS": False,

Modified: CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/test/test_promotion.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/test/test_promotion.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/contrib/migration/test/test_promotion.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -32,7 +32,10 @@
         }
         expected = {
             "ignored" : "ignored",
-            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
+            "DBImportFile" : "/Library/Server/Calendar and Contacts/DataDump.sql",
+            "DBType" : "",
+            "DSN" : "",
+            "ConfigRoot" : "Config",
         }
         updatePlist(orig)
         self.assertEquals(orig, expected)
@@ -44,7 +47,9 @@
                         "Enabled" : True
                     }
                 }
-            }
+            },
+            "ConfigRoot" : "/etc/caldavd",
+
         }
         expected = {
             "Notifications" : {
@@ -54,7 +59,10 @@
                     }
                 }
             },
-            "DSN" : "/Library/Server/PostgreSQL For Server Services/Socket:caldav:caldav:::",
+            "DBImportFile" : "/Library/Server/Calendar and Contacts/DataDump.sql",
+            "DBType" : "",
+            "DSN" : "",
+            "ConfigRoot" : "Config",
         }
         updatePlist(orig)
         self.assertEquals(orig, expected)

Modified: CalendarServer/branches/users/gaya/sharedgroups/doc/Admin/MultiServerDeployment.rst
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/doc/Admin/MultiServerDeployment.rst	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/doc/Admin/MultiServerDeployment.rst	2012-11-17 04:03:40 UTC (rev 10079)
@@ -18,7 +18,7 @@
 
 * `Shared Storage for Attachments`_: AttachmentsRoot should point to storage shared across all servers, e.g. an NFS mount. Used for file attachments to calendar events.
 
-* `General Advise`_: *No one wants advice - only corroboration.*  --John Steinbeck
+* `General Advice`_: *No one wants advice - only corroboration.*  --John Steinbeck
 
 ---------------------
 Database Connectivity
@@ -170,7 +170,7 @@
 Set the caldavd.plist key AttachmentsRoot to a filesystem directory that is shared and writable by all Calendar Server machines, for example an NFS export. This will be used to store file attachements that users may attach to calendar events.
 
 -------------------
-General Advise
+General Advice
 -------------------
 
 * Ensure caldavd.plist is identical on all Calendar Server hosts. This is not strictly required, but recommended to keep things as predictable as possible. Since you already have shared storage for AttachmentsRoot, use that to host the 'conf' directory for all servers as well; this way you don't need to push config changes out to the servers.

Modified: CalendarServer/branches/users/gaya/sharedgroups/doc/Extensions/caldav-proxy.txt
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/doc/Extensions/caldav-proxy.txt	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/doc/Extensions/caldav-proxy.txt	2012-11-17 04:03:40 UTC (rev 10079)
@@ -2,12 +2,11 @@
 
 
 Calendar Server Extension                                       C. Daboo
-                                                          Apple Computer
-                                                             May 3, 2007
+                                                              Apple Inc.
+                                                       November 13, 2012
 
 
               Calendar User Proxy Functionality in CalDAV
-                           caldav-cu-proxy-02
 
 Abstract
 
@@ -25,14 +24,18 @@
      3.2.  Client . . . . . . . . . . . . . . . . . . . . . . . . . .  3
    4.  Open Issues  . . . . . . . . . . . . . . . . . . . . . . . . .  4
    5.  New features in CalDAV . . . . . . . . . . . . . . . . . . . .  4
-     5.1.  Proxy Principal Resource . . . . . . . . . . . . . . . . .  4
-     5.2.  Privilege Provisioning . . . . . . . . . . . . . . . . . .  8
-   6.  Security Considerations  . . . . . . . . . . . . . . . . . . .  9
-   7.  IANA Considerations  . . . . . . . . . . . . . . . . . . . . .  9
-   8.  Normative References . . . . . . . . . . . . . . . . . . . . .  9
-   Appendix A.  Acknowledgments . . . . . . . . . . . . . . . . . . .  9
-   Appendix B.  Change History  . . . . . . . . . . . . . . . . . . . 10
-   Author's Address . . . . . . . . . . . . . . . . . . . . . . . . . 10
+     5.1.  Feature Discovery  . . . . . . . . . . . . . . . . . . . .  4
+     5.2.  Proxy Principal Resource . . . . . . . . . . . . . . . . .  4
+     5.3.  New Principal Properties . . . . . . . . . . . . . . . . .  8
+       5.3.1.  CS:calendar-proxy-read-for Property  . . . . . . . . .  8
+       5.3.2.  CS:calendar-proxy-write-for Property . . . . . . . . .  8
+     5.4.  Privilege Provisioning . . . . . . . . . . . . . . . . . .  9
+   6.  Security Considerations  . . . . . . . . . . . . . . . . . . . 10
+   7.  IANA Considerations  . . . . . . . . . . . . . . . . . . . . . 10
+   8.  Normative References . . . . . . . . . . . . . . . . . . . . . 10
+   Appendix A.  Acknowledgments . . . . . . . . . . . . . . . . . . . 11
+   Appendix B.  Change History  . . . . . . . . . . . . . . . . . . . 11
+   Author's Address . . . . . . . . . . . . . . . . . . . . . . . . . 12
 
 
 
@@ -49,12 +52,9 @@
 
 
 
-
-
-
 Daboo                                                           [Page 1]
 
-                              CalDAV Proxy                      May 2007
+                              CalDAV Proxy                 November 2012
 
 
 1.  Introduction
@@ -110,7 +110,7 @@
 
 Daboo                                                           [Page 2]
 
-                              CalDAV Proxy                      May 2007
+                              CalDAV Proxy                 November 2012
 
 
    this namespace are referenced in this document outside of the context
@@ -146,9 +146,13 @@
        "proxy group" inheritable read-write access.
 
    c.  Add an ACE to each of the calendar Inbox and Outbox collections
-       giving the CALDAV:schedule privilege
-       [I-D.desruisseaux-caldav-sched] to the read-write "proxy group".
+       giving the CALDAV:schedule privilege [RFC6638] to the read-write
+       "proxy group".
 
+   On each user principal resource, the server maintains two WebDAV
+   properties containing lists of other user principals for which the
+   target principal is a read-only or read-write proxy.
+
 3.2.  Client
 
    A client can see who the proxies are for the current principal by
@@ -157,24 +161,22 @@
 
    The client can edit the list of proxies for the current principal by
    editing the DAV:group-member-set property on the relevant "proxy
-   group" principal resource.
 
-   The client can find out who the current principal is a proxy for by
-   running a DAV:principal-match REPORT on the principal collection.
 
 
-
 Daboo                                                           [Page 3]
 
-                              CalDAV Proxy                      May 2007
+                              CalDAV Proxy                 November 2012
 
 
-   Alternatively, the client can find out who the current principal is a
-   proxy for by examining the DAV:group-membership property on the
-   current principal resource looking for membership in other users'
-   "proxy groups".
+   group" principal resource.
 
+   The client can find out who the current principal is a proxy for by
+   examining the CS:calendar-proxy-read-for and CS:calendar-proxy-write-
+   for properties, possibly using the DAV:expand-property REPORT to get
+   other useful properties about the principals being proxied for.
 
+
 4.  Open Issues
 
    1.  Do we want to separate read-write access to calendars vs the
@@ -194,8 +196,14 @@
 
 5.  New features in CalDAV
 
-5.1.  Proxy Principal Resource
+5.1.  Feature Discovery
 
+   A server that supports the features described in this document MUST
+   include "calendar-proxy" as a field in the DAV response header from
+   an OPTIONS request on any resource that supports these features.
+
+5.2.  Proxy Principal Resource
+
    Each "regular" principal resource that needs to allow calendar user
    proxy support MUST be a collection resource. i.e. in addition to
    including the DAV:principal XML element in the DAV:resourcetype
@@ -209,6 +217,14 @@
    resources that are groups contain the list of principals for calendar
    users who can act as a read-only or read-write proxy respectively.
 
+
+
+
+Daboo                                                           [Page 4]
+
+                              CalDAV Proxy                 November 2012
+
+
    The server MUST include the CS:calendar-proxy-read or CS:calendar-
    proxy-write XML elements in the DAV:resourcetype property of the
    child resources, respectively.  This allows clients to discover the
@@ -216,15 +232,6 @@
    current user's principal resource and requesting the DAV:resourcetype
    property be returned.  The element type declarations are:
 
-
-
-
-
-Daboo                                                           [Page 4]
-
-                              CalDAV Proxy                      May 2007
-
-
    <!ELEMENT calendar-proxy-read EMPTY>
 
    <!ELEMENT calendar-proxy-write EMPTY>
@@ -265,24 +272,25 @@
    The DAV:group-membership property on the resource /principals/users/
    red/ would be:
 
-   <DAV:group-membership>
-     <DAV:href>/principals/users/cyrus/calendar-proxy-write</DAV:href>
-   </DAV:group-membership>
 
-   If the principal "red" was also a read-only proxy for the principal
-   "wilfredo", then the DA:group-membership property on the resource
-   /principals/users/red/ would be:
 
 
 
-
 Daboo                                                           [Page 5]
 
-                              CalDAV Proxy                      May 2007
+                              CalDAV Proxy                 November 2012
 
 
    <DAV:group-membership>
      <DAV:href>/principals/users/cyrus/calendar-proxy-write</DAV:href>
+   </DAV:group-membership>
+
+   If the principal "red" was also a read-only proxy for the principal
+   "wilfredo", then the DA:group-membership property on the resource
+   /principals/users/red/ would be:
+
+   <DAV:group-membership>
+     <DAV:href>/principals/users/cyrus/calendar-proxy-write</DAV:href>
      <DAV:href>/principals/users/wilfredo/calendar-proxy-read</DAV:href>
    </DAV:group-membership>
 
@@ -324,17 +332,9 @@
 
 
 
-
-
-
-
-
-
-
-
 Daboo                                                           [Page 6]
 
-                              CalDAV Proxy                      May 2007
+                              CalDAV Proxy                 November 2012
 
 
    >> Response <<
@@ -390,11 +390,84 @@
 
 Daboo                                                           [Page 7]
 
-                              CalDAV Proxy                      May 2007
+                              CalDAV Proxy                 November 2012
 
 
-5.2.  Privilege Provisioning
+5.3.  New Principal Properties
 
+   Each "regular" principal that is a proxy for other principals MUST
+   have the CS:calendar-proxy-read-for and CS:calendar-proxy-write-for
+   WebDAV properties available on its principal resource, to allow
+   clients to quickly find the "proxy for" information.
+
+5.3.1.  CS:calendar-proxy-read-for Property
+
+   Name:  calendar-proxy-read-for
+
+   Namespace:  http://calendarserver.org/ns/
+
+   Purpose:  Lists principals for whom the current principal is a read-
+      only proxy for.
+
+   Protected:  This property MUST be protected.
+
+   PROPFIND behavior:  This property SHOULD NOT be returned by a
+      PROPFIND allprop request (as defined in Section 14.2 of
+      [RFC4918]).
+
+   Description:  This property allows a client to quickly determine the
+      principal for whom the current principal is a read-only proxy for.
+      The server MUST account for any group memberships of the current
+      principal that are either direct or indirect members of a proxy
+      group. e.g., if principal "A" assigns a group "G" as a read-only
+      proxy, and principal "B" is a member of group "G", then principal
+      "B" will see principal "A" listed in the CS:calendar-proxy-read-
+      for property on their principal resource.
+
+   Definition:
+
+     <!ELEMENT calendar-proxy-read-for (DAV:href*)>
+
+5.3.2.  CS:calendar-proxy-write-for Property
+
+   Name:  calendar-proxy-write-for
+
+   Namespace:  http://calendarserver.org/ns/
+
+   Purpose:  Lists principals for whom the current principal is a read-
+      write proxy for.
+
+   Protected:  This property MUST be protected.
+
+
+
+
+
+
+Daboo                                                           [Page 8]
+
+                              CalDAV Proxy                 November 2012
+
+
+   PROPFIND behavior:  This property SHOULD NOT be returned by a
+      PROPFIND allprop request (as defined in Section 14.2 of
+      [RFC4918]).
+
+   Description:  This property allows a client to quickly determine the
+      principal for whom the current principal is a read-write proxy
+      for.  The server MUST account for any group memberships of the
+      current principal that are either direct or indirect members of a
+      proxy group. e.g., if principal "A" assigns a group "G" as a read-
+      write proxy, and principal "B" is a member of group "G", then
+      principal "B" will see principal "A" listed in the CS:calendar-
+      proxy-write-for property on their principal resource.
+
+   Definition:
+
+     <!ELEMENT calendar-proxy-write-for (DAV:href*)>
+
+5.4.  Privilege Provisioning
+
    In order for a calendar user proxy to be able to access the calendars
    of the user they are proxying for the server MUST ensure that the
    privileges on the relevant calendars are setup accordingly:
@@ -407,14 +480,31 @@
 
    Additionally, the CalDAV scheduling Inbox and Outbox calendar
    collections for the user allowing proxy access, MUST have the CALDAV:
-   schedule privilege [I-D.desruisseaux-caldav-sched] granted for read-
-   write calendar user proxy principals.
+   schedule privilege [RFC6638] granted for read-write calendar user
+   proxy principals.
 
    Note that with a suitable repository layout, a server may be able to
    grant the appropriate privileges on a parent collection and ensure
    that all the contained collections and resources inherit that.  For
    example, given the following repository layout:
 
+
+
+
+
+
+
+
+
+
+
+
+
+Daboo                                                           [Page 9]
+
+                              CalDAV Proxy                 November 2012
+
+
            + /
              + calendars/
                + users/
@@ -440,15 +530,6 @@
    on the resource /calendars/users/cyrus/ and all children of that
    resource:
 
-
-
-
-
-Daboo                                                           [Page 8]
-
-                              CalDAV Proxy                      May 2007
-
-
    <DAV:ace>
      <DAV:principal>
        <DAV:href>/principals/users/cyrus/calendar-proxy-write</DAV:href>
@@ -471,12 +552,15 @@
 
 8.  Normative References
 
-   [I-D.desruisseaux-caldav-sched]
-              Desruisseaux, B., "Scheduling Extensions to CalDAV",
-              draft-desruisseaux-caldav-sched-03 (work in progress),
-              January 2007.
+   [RFC2119]  Bradner, S., "Key words for use in RFCs to Indicate
 
-   [RFC2119]  Bradner, S., "Key words for use in RFCs to Indicate
+
+
+Daboo                                                          [Page 10]
+
+                              CalDAV Proxy                 November 2012
+
+
               Requirement Levels", BCP 14, RFC 2119, March 1997.
 
    [RFC2518]  Goland, Y., Whitehead, E., Faizi, A., Carter, S., and D.
@@ -484,40 +568,55 @@
               WEBDAV", RFC 2518, February 1999.
 
    [RFC3744]  Clemm, G., Reschke, J., Sedlar, E., and J. Whitehead, "Web
-              Distributed Authoring and Versioning (WebDAV) Access
-              Control Protocol", RFC 3744, May 2004.
+              Distributed Authoring and Versioning (WebDAV)
+              Access Control Protocol", RFC 3744, May 2004.
 
    [RFC4791]  Daboo, C., Desruisseaux, B., and L. Dusseault,
               "Calendaring Extensions to WebDAV (CalDAV)", RFC 4791,
               March 2007.
 
+   [RFC4918]  Dusseault, L., "HTTP Extensions for Web Distributed
+              Authoring and Versioning (WebDAV)", RFC 4918, June 2007.
 
+   [RFC6638]  Daboo, C. and B. Desruisseaux, "Scheduling Extensions to
+              CalDAV", RFC 6638, June 2012.
+
+
 Appendix A.  Acknowledgments
 
    This specification is the result of discussions between the Apple
    calendar server and client teams.
 
 
+Appendix B.  Change History
 
+   Changes in -03:
 
-Daboo                                                           [Page 9]
-
-                              CalDAV Proxy                      May 2007
+   1.  Added OPTIONS DAV header token.
 
+   2.  Added CS:calendar-proxy-read-for and CS:calendar-proxy-write-for
+       properties for faster discovery of proxy relationships.
 
-Appendix B.  Change History
+   Changes in -02:
 
-   Changes from -00:
-
    1.  Updated to RFC 4791 reference.
 
-   Changes from -00:
+   Changes in -01:
 
    1.  Added more details on actual CalDAV protocol changes.
 
    2.  Changed namespace from http://apple.com/ns/calendarserver/ to
        http://calendarserver.org/ns/.
 
+
+
+
+
+Daboo                                                          [Page 11]
+
+                              CalDAV Proxy                 November 2012
+
+
    3.  Made "proxy group" principals child resources of their "owner"
        principals.
 
@@ -527,7 +626,7 @@
 Author's Address
 
    Cyrus Daboo
-   Apple Computer, Inc.
+   Apple, Inc.
    1 Infinite Loop
    Cupertino, CA  95014
    USA
@@ -556,5 +655,18 @@
 
 
 
-Daboo                                                          [Page 10]
+
+
+
+
+
+
+
+
+
+
+
+
+
+Daboo                                                          [Page 12]
 

Modified: CalendarServer/branches/users/gaya/sharedgroups/doc/Extensions/caldav-proxy.xml
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/doc/Extensions/caldav-proxy.xml	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/doc/Extensions/caldav-proxy.xml	2012-11-17 04:03:40 UTC (rev 10079)
@@ -1,11 +1,12 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="../rfc2629.xslt"?>
 <!DOCTYPE rfc SYSTEM 'rfc2629.dtd' [
 <!ENTITY rfc2119 PUBLIC '' 'bibxml/reference.RFC.2119.xml'>
 <!ENTITY rfc2518 PUBLIC '' 'bibxml/reference.RFC.2518.xml'>
 <!ENTITY rfc3744 PUBLIC '' 'bibxml/reference.RFC.3744.xml'>
 <!ENTITY rfc4791 PUBLIC '' 'bibxml/reference.RFC.4791.xml'>
-<!ENTITY I-D.dusseault-caldav PUBLIC '' 'bibxml3/reference.I-D.dusseault-caldav.xml'>
-<!ENTITY I-D.desruisseaux-caldav-sched PUBLIC '' 'bibxml3/reference.I-D.desruisseaux-caldav-sched.xml'>
+<!ENTITY rfc4918 PUBLIC '' 'bibxml/reference.RFC.4918.xml'>
+<!ENTITY rfc6638 PUBLIC '' 'bibxml/reference.RFC.6638.xml'>
 ]> 
 <?rfc toc="yes"?>
 <?rfc tocdepth="4"?>
@@ -17,12 +18,12 @@
 <?rfc compact="yes"?>
 <?rfc subcompact="no"?>
 <?rfc private="Calendar Server Extension"?>
-<rfc ipr="none" docName='caldav-cu-proxy-02'>
+<rfc ipr="none" docName='caldav-cu-proxy-03'>
     <front>
         <title abbrev="CalDAV Proxy">Calendar User Proxy Functionality in CalDAV</title> 
         <author initials="C." surname="Daboo" fullname="Cyrus Daboo">
-            <organization abbrev="Apple Computer">
-                Apple Computer, Inc.
+            <organization abbrev="Apple Inc.">
+                Apple, Inc.
             </organization>
             <address>
                 <postal>
@@ -36,7 +37,7 @@
                 <uri>http://www.apple.com/</uri>
             </address>
         </author>
-        <date year='2007'/>
+        <date/>
         <abstract>
             <t>
                 This specification defines an extension to CalDAV that makes it easy for clients to setup and manage calendar user proxies, using the WebDAV Access Control List extension as a basis.
@@ -94,10 +95,13 @@
                             Add an ACE to the calendar home collection giving the read-write "proxy group" inheritable read-write access.
                         </t>
                         <t>
-                            Add an ACE to each of the calendar Inbox and Outbox collections giving the <xref target='I-D.desruisseaux-caldav-sched'>CALDAV:schedule privilege</xref> to the read-write "proxy group".
+                            Add an ACE to each of the calendar Inbox and Outbox collections giving the <xref target='RFC6638'>CALDAV:schedule privilege</xref> to the read-write "proxy group".
                         </t>
                     </list>
                 </t>
+                <t>
+                	On each user principal resource, the server maintains two WebDAV properties containing lists of other user principals for which the target principal is a read-only or read-write proxy.
+                </t>
             </section>
             <section title='Client'>
                 <t>
@@ -107,11 +111,8 @@
                     The client can edit the list of proxies for the current principal by editing the DAV:group-member-set property on the relevant "proxy group" principal resource.
                 </t>
                 <t>
-                    The client can find out who the current principal is a proxy for by running a DAV:principal-match REPORT on the principal collection.
+                    The client can find out who the current principal is a proxy for by examining the CS:calendar-proxy-read-for and CS:calendar-proxy-write-for properties, possibly using the DAV:expand-property REPORT to get other useful properties about the principals being proxied for.
                 </t>
-                <t>
-                    Alternatively, the client can find out who the current principal is a proxy for by examining the DAV:group-membership property on the current principal resource looking for membership in other users' "proxy groups".
-                </t>
             </section>
         </section>
 
@@ -135,6 +136,11 @@
         </section>
             
         <section title='New features in CalDAV' anchor='changes'>
+            <section title="Feature Discovery">
+                <t>
+                    A server that supports the features described in this document MUST include "calendar-proxy" as a field in the DAV response header from an OPTIONS request on any resource that supports these features.
+                </t>
+            </section>
             <section title='Proxy Principal Resource'>
                 <t>
                     Each "regular" principal resource that needs to allow calendar user proxy support MUST be a collection resource. i.e. in addition to including the DAV:principal XML element in the DAV:resourcetype property on the resource, it MUST also include the DAV:collection XML element.
@@ -279,6 +285,47 @@
                   </figure>
                 </t>
             </section>
+            <section title="New Principal Properties">
+            	<t>
+            		Each "regular" principal that is a proxy for other principals MUST have the CS:calendar-proxy-read-for and CS:calendar-proxy-write-for WebDAV properties available on its principal resource, to allow clients to quickly find the "proxy for" information.
+            	</t>
+        <section title="CS:calendar-proxy-read-for Property">
+          <t>
+            <list style="hanging">
+              <t hangText="Name:">calendar-proxy-read-for</t>
+              <t hangText="Namespace:">http://calendarserver.org/ns/</t>
+              <t hangText="Purpose:">Lists principals for whom the current principal is a read-only proxy for.</t>
+              <t hangText="Protected:">This property MUST be protected.</t>
+              <t hangText="PROPFIND behavior:">This property SHOULD NOT be returned by a PROPFIND allprop request (as defined in Section 14.2 of <xref target="RFC4918"/>).</t>
+
+              <t hangText="Description:">This property allows a client to quickly determine the principal for whom the current principal is a read-only proxy for. The server MUST account for any group memberships of the current principal that are either direct or indirect members of a proxy group. e.g., if principal "A" assigns a group "G" as a read-only proxy, and principal "B" is a member of group "G", then principal "B" will see principal "A" listed in the CS:calendar-proxy-read-for property on their principal resource.</t>
+              <t hangText="Definition:">
+                <figure><artwork><![CDATA[
+  <!ELEMENT calendar-proxy-read-for (DAV:href*)>
+]]></artwork></figure>
+              </t>
+            </list>
+          </t>
+        </section>
+        <section title="CS:calendar-proxy-write-for Property">
+          <t>
+            <list style="hanging">
+              <t hangText="Name:">calendar-proxy-write-for</t>
+              <t hangText="Namespace:">http://calendarserver.org/ns/</t>
+              <t hangText="Purpose:">Lists principals for whom the current principal is a read-write proxy for.</t>
+              <t hangText="Protected:">This property MUST be protected.</t>
+              <t hangText="PROPFIND behavior:">This property SHOULD NOT be returned by a PROPFIND allprop request (as defined in Section 14.2 of <xref target="RFC4918"/>).</t>
+
+              <t hangText="Description:">This property allows a client to quickly determine the principal for whom the current principal is a read-write proxy for. The server MUST account for any group memberships of the current principal that are either direct or indirect members of a proxy group. e.g., if principal "A" assigns a group "G" as a read-write proxy, and principal "B" is a member of group "G", then principal "B" will see principal "A" listed in the CS:calendar-proxy-write-for property on their principal resource.</t>
+              <t hangText="Definition:">
+                <figure><artwork><![CDATA[
+  <!ELEMENT calendar-proxy-write-for (DAV:href*)>
+]]></artwork></figure>
+              </t>
+            </list>
+          </t>
+        </section>
+            </section>
             <section title='Privilege Provisioning'>
                 <t>
                     In order for a calendar user proxy to be able to access the calendars of the user they are proxying for the server MUST ensure that the privileges on the relevant calendars are setup accordingly:
@@ -286,7 +333,7 @@
                         <t>The DAV:read privilege MUST be granted for read-only and read-write calendar user proxy principals</t>
                         <t>The DAV:write privilege MUST be granted for read-write calendar user proxy principals.</t>
                     </list>
-                    Additionally, the  CalDAV scheduling Inbox and Outbox calendar collections for the user allowing proxy access, MUST have the <xref target='I-D.desruisseaux-caldav-sched'>CALDAV:schedule privilege</xref> granted for read-write calendar user proxy principals.
+                    Additionally, the  CalDAV scheduling Inbox and Outbox calendar collections for the user allowing proxy access, MUST have the <xref target='RFC6638'>CALDAV:schedule privilege</xref> granted for read-write calendar user proxy principals.
                 </t>
                 <t>
                     Note that with a suitable repository layout, a server may be able to grant the appropriate privileges on a parent collection  and ensure that all the contained collections and resources inherit that. For example, given the following repository layout:
@@ -348,7 +395,8 @@
             &rfc2518;
             &rfc3744;
             &rfc4791;
-            &I-D.desruisseaux-caldav-sched; 
+            &rfc4918;
+            &rfc6638; 
         </references>
 <!--
 <references title='Informative References'>
@@ -360,14 +408,24 @@
             </t>
         </section>
         <section title='Change History'>
-            <t>Changes from -00:
+            <t>Changes in -03:
                 <list style='numbers'>
                     <t>
+                        Added OPTIONS DAV header token.
+                    </t>
+                    <t>
+                    	Added CS:calendar-proxy-read-for and CS:calendar-proxy-write-for properties for faster discovery of proxy relationships.
+                    </t>
+                </list>
+            </t>
+            <t>Changes in -02:
+                <list style='numbers'>
+                    <t>
                         Updated to RFC 4791 reference.
                     </t>
                 </list>
             </t>
-            <t>Changes from -00:
+            <t>Changes in -01:
                 <list style='numbers'>
                     <t>
                         Added more details on actual CalDAV protocol changes.

Modified: CalendarServer/branches/users/gaya/sharedgroups/doc/calendarserver_manage_principals.8
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/doc/calendarserver_manage_principals.8	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/doc/calendarserver_manage_principals.8	2012-11-17 04:03:40 UTC (rev 10079)
@@ -38,6 +38,8 @@
 .Op Fl -get-auto-schedule
 .Op Fl -set-auto-schedule-mode Ar none|accept-always|decline-always|accept-if-free|decline-if-busy|automatic
 .Op Fl -get-auto-schedule-mode
+.Op Fl -set-auto-accept-group Ar group
+.Op Fl -get-auto-accept-group
 .Op Fl -add Ar locations|resources full-name [record-name] [GUID]
 .Op Fl -remove
 .Ar principal
@@ -123,6 +125,11 @@
 Enable or disable automatic scheduling.
 .It Fl -get-auto-schedule
 Get the automatic scheduling state.
+.It Fl -set-auto-accept-group Ar group
+The principal will auto-accept any invites from any member of the group (as long
+as there are no conflicts).
+.It Fl -get-auto-accept-group
+Get the currently assigned auto-accept group for the principal.
 .It Fl -add Ar locations|resources full-name [record-name] [GUID]
 Add a new location or resource. Record name and GUID are optional.  If
 GUID is not specified, one will be generated.  If record name is not

Deleted: CalendarServer/branches/users/gaya/sharedgroups/lib-patches/pycrypto/__init__.py.patch
===================================================================
--- CalendarServer/trunk/lib-patches/pycrypto/__init__.py.patch	2012-11-17 00:37:58 UTC (rev 10076)
+++ CalendarServer/branches/users/gaya/sharedgroups/lib-patches/pycrypto/__init__.py.patch	2012-11-17 04:03:40 UTC (rev 10079)
@@ -1,6 +0,0 @@
-Index: lib/Crypto/Random/Fortuna/__init__.py
-===================================================================
---- lib/Crypto/Random/Fortuna/__init__.py
-+++ lib/Crypto/Random/Fortuna/__init__.py
-@@ -0,0 +1 @@
-+#

Copied: CalendarServer/branches/users/gaya/sharedgroups/lib-patches/pycrypto/__init__.py.patch (from rev 10076, CalendarServer/trunk/lib-patches/pycrypto/__init__.py.patch)
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/lib-patches/pycrypto/__init__.py.patch	                        (rev 0)
+++ CalendarServer/branches/users/gaya/sharedgroups/lib-patches/pycrypto/__init__.py.patch	2012-11-17 04:03:40 UTC (rev 10079)
@@ -0,0 +1,6 @@
+Index: lib/Crypto/Random/Fortuna/__init__.py
+===================================================================
+--- lib/Crypto/Random/Fortuna/__init__.py
++++ lib/Crypto/Random/Fortuna/__init__.py
+@@ -0,0 +1 @@
++#

Modified: CalendarServer/branches/users/gaya/sharedgroups/support/Makefile.Apple
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/support/Makefile.Apple	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/support/Makefile.Apple	2012-11-17 04:03:40 UTC (rev 10079)
@@ -60,16 +60,17 @@
 sqlparse-0.1.2::        $(BuildDirectory)/sqlparse-0.1.2
 setproctitle-1.1.6::	$(BuildDirectory)/setproctitle-1.1.6
 psutil-0.6.1::		$(BuildDirectory)/psutil-0.6.1
+pycrypto-2.5::		$(BuildDirectory)/pycrypto-2.5
 $(Project)::            $(BuildDirectory)/$(Project)
 
-build:: PyKerberos pycalendar PyGreSQL-4.0 sqlparse-0.1.2 setproctitle-1.1.6 psutil-0.6.1 $(Project)
+build:: PyKerberos pycalendar PyGreSQL-4.0 sqlparse-0.1.2 setproctitle-1.1.6 psutil-0.6.1 pycrypto-2.5 $(Project)
 
 setup:
 	$(_v) ./run -g
 
-prep:: setup CalDAVTester.tgz PyKerberos.tgz pycalendar.tgz PyGreSQL-4.0.tgz sqlparse-0.1.2.tgz setproctitle-1.1.6.tgz psutil-0.6.1.tgz
+prep:: setup CalDAVTester.tgz PyKerberos.tgz pycalendar.tgz PyGreSQL-4.0.tgz sqlparse-0.1.2.tgz setproctitle-1.1.6.tgz psutil-0.6.1.tgz pycrypto-2.5.tgz
 
-PyKerberos pycalendar PyGreSQL-4.0 sqlparse-0.1.2 setproctitle-1.1.6 psutil-0.6.1 $(Project)::
+PyKerberos pycalendar PyGreSQL-4.0 sqlparse-0.1.2 setproctitle-1.1.6 psutil-0.6.1 pycrypto-2.5 $(Project)::
 	@echo "Building $@..."
 	$(_v) cd $(BuildDirectory)/$@ && $(Environment) $(PYTHON) setup.py build
 
@@ -81,6 +82,7 @@
 	$(_v) cd $(BuildDirectory)/sqlparse-0.1.2     && $(Environment) $(PYTHON) setup.py install $(PY_INSTALL_FLAGS)
 	$(_v) cd $(BuildDirectory)/setproctitle-1.1.6 && $(Environment) $(PYTHON) setup.py install $(PY_INSTALL_FLAGS)
 	$(_v) cd $(BuildDirectory)/psutil-0.6.1       && $(Environment) $(PYTHON) setup.py install $(PY_INSTALL_FLAGS)
+	$(_v) cd $(BuildDirectory)/pycrypto-2.5       && $(Environment) $(PYTHON) setup.py install $(PY_INSTALL_FLAGS)
 	$(_v) for so in $$(find "$(DSTROOT)$(PY_HOME)/lib" -type f -name '*.so'); do $(STRIP) -Sx "$${so}"; done 
 	$(_v) $(INSTALL_DIRECTORY) "$(DSTROOT)$(SIPP)$(ETCDIR)$(CALDAVDSUBDIR)"
 	$(_v) $(INSTALL_FILE) "$(Sources)/conf/caldavd-apple.plist" "$(DSTROOT)$(SIPP)$(ETCDIR)$(CALDAVDSUBDIR)/caldavd.plist"
@@ -110,23 +112,23 @@
 	$(_v) $(INSTALL_DIRECTORY) -o "$(CS_USER)" -g "$(CS_GROUP)" -m 0755 "$(DSTROOT)$(VARDIR)/log$(CALDAVDSUBDIR)"
 	$(_v) $(INSTALL_DIRECTORY) "$(DSTROOT)$(SIPP)$(NSLIBRARYDIR)/LaunchDaemons"
 	$(_v) $(INSTALL_FILE) "$(Sources)/contrib/launchd/calendarserver.plist" "$(DSTROOT)$(SIPP)$(NSLIBRARYDIR)/LaunchDaemons/org.calendarserver.calendarserver.plist"
-	@echo "Installing migration config..."
+	@echo "Installing migration extras script..."
 	$(_v) $(INSTALL_DIRECTORY) "$(DSTROOT)$(SERVERSETUP)/MigrationExtras"
 	$(_v) $(INSTALL_FILE) "$(Sources)/contrib/migration/calendarmigrator.py" "$(DSTROOT)$(SERVERSETUP)/MigrationExtras/70_calendarmigrator.py"
 	$(_v) chmod ugo+x "$(DSTROOT)$(SERVERSETUP)/MigrationExtras/70_calendarmigrator.py"
-	@echo "Installing server promotion config..."
+	@echo "Installing common extras script..."
+	$(_v) $(INSTALL_DIRECTORY) "$(DSTROOT)$(SERVERSETUP)/CommonExtras"
+	$(_v) $(INSTALL_FILE) "$(Sources)/contrib/migration/calendarcommonextra.py" "$(DSTROOT)$(SERVERSETUP)/CommonExtras/70_calendarcommonextra.py"
+	$(_v) chmod ugo+x "$(DSTROOT)$(SERVERSETUP)/CommonExtras/70_calendarcommonextra.py"
+	@echo "Installing server promotion extras script..."
 	$(_v) $(INSTALL_DIRECTORY) "$(DSTROOT)$(SERVERSETUP)/PromotionExtras"
 	$(_v) $(INSTALL_FILE) "$(Sources)/contrib/migration/calendarpromotion.py" "$(DSTROOT)$(SERVERSETUP)/PromotionExtras/59_calendarpromotion.py"
 	$(_v) chmod ugo+x "$(DSTROOT)$(SERVERSETUP)/PromotionExtras/59_calendarpromotion.py"
-	@echo "Installing server demotion config..."
+	@echo "Installing server uninstall extras script..."
 	$(_v) $(INSTALL_DIRECTORY) "$(DSTROOT)$(SERVERSETUP)/UninstallExtras"
 	$(_v) $(INSTALL_FILE) "$(Sources)/contrib/migration/calendardemotion.py" "$(DSTROOT)$(SERVERSETUP)/UninstallExtras/59_calendardemotion.py"
 	$(_v) chmod ugo+x "$(DSTROOT)$(SERVERSETUP)/UninstallExtras/59_calendardemotion.py"
-	@echo "Installing database configuration scripts..."
-	$(_v) $(INSTALL_DIRECTORY) "$(DSTROOT)$(SERVERSETUP)/CommonExtras/PostgreSQLExtras"
-	$(_v) $(INSTALL_FILE) "$(Sources)/contrib/create_caldavd_db.sh" "$(DSTROOT)$(SERVERSETUP)/CommonExtras/PostgreSQLExtras/create_caldavd_db.sh"
-	$(_v) chmod ugo+x "$(DSTROOT)$(SERVERSETUP)/CommonExtras/PostgreSQLExtras/create_caldavd_db.sh"
-	@echo "Installing changeip config..."
+	@echo "Installing changeip script..."
 	$(_v) $(INSTALL_DIRECTORY) "$(DSTROOT)$(SIPP)$(LIBEXECDIR)/changeip"
 	$(_v) $(INSTALL_FILE) "$(Sources)/calendarserver/tools/changeip_calendar.py" "$(DSTROOT)$(SIPP)$(LIBEXECDIR)/changeip/changeip_calendar.py"
 	$(_v) chmod ugo+x "$(DSTROOT)$(SIPP)$(LIBEXECDIR)/changeip/changeip_calendar.py"

Modified: CalendarServer/branches/users/gaya/sharedgroups/support/build.sh
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/support/build.sh	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/support/build.sh	2012-11-17 04:03:40 UTC (rev 10079)
@@ -93,16 +93,34 @@
 
   patches="${caldav}/lib-patches";
 
-  # Find a command that can hash up a string for us
-  if type -t openssl > /dev/null; then
-    hash="md5";
-    hash () { openssl dgst -md5 "$@"; }
-  elif type -t md5 > /dev/null; then
-    hash="md5";
+  # Find some hashing commands
+  # sha1() = sha1 hash, if available
+  # md5()  = md5 hash, if available
+  # hash() = default hash function
+  # $hash  = name of the type of hash used by hash()
+
+  hash="";
+
+  if type -ft openssl > /dev/null; then
+    if [ -z "${hash}" ]; then hash="md5"; fi;
+    md5 () { "$(type -p openssl)" dgst -md5 "$@"; }
+  elif type -ft md5 > /dev/null; then
+    if [ -z "${hash}" ]; then hash="md5"; fi;
+    md5 () { "$(type -p md5)" "$@"; }
+  elif type -ft md5sum > /dev/null; then
+    if [ -z "${hash}" ]; then hash="md5"; fi;
+    md5 () { "$(type -p md5sum)" "$@"; }
+  fi;
+
+  if type -ft shasum > /dev/null; then
+    if [ -z "${hash}" ]; then hash="sha1"; fi;
+    sha1 () { "$(type -p shasum)" "$@"; }
+  fi;
+
+  if [ "${hash}" == "sha1" ]; then
+    hash () { sha1 "$@"; }
+  elif [ "${hash}" == "md5" ]; then
     hash () { md5 "$@"; }
-  elif type -t md5sum > /dev/null; then
-    hash="md5";
-    hash () { md5sum "$@"; }
   elif type -t cksum > /dev/null; then
     hash="hash";
     hash () { cksum "$@" | cut -f 1 -d " "; }
@@ -110,7 +128,6 @@
     hash="hash";
     hash () { sum "$@" | cut -f 1 -d " "; }
   else
-    hash="";
     hash () { echo "INTERNAL ERROR: No hash function."; exit 1; }
   fi;
 
@@ -173,12 +190,14 @@
 www_get () {
   if ! "${do_get}"; then return 0; fi;
 
-  local md5="";
+  local  md5="";
+  local sha1="";
 
   OPTIND=1;
-  while getopts "m:" option; do
+  while getopts "m:s:" option; do
     case "${option}" in
-      'm') md5="${OPTARG}"; ;;
+      'm')  md5="${OPTARG}"; ;;
+      's') sha1="${OPTARG}"; ;;
     esac;
   done;
   shift $((${OPTIND} - 1));
@@ -211,18 +230,27 @@
       check_hash () {
         local file="$1"; shift;
 
-        if [ "${hash}" == "md5" ]; then
-          local sum="$(hash "${file}" | perl -pe 's|^.*([0-9a-f]{32}).*$|\1|')";
-          if [ -n "${md5}" ]; then
-            echo "Checking MD5 sum for ${name}...";
-            if [ "${md5}" != "${sum}" ]; then
-              echo "ERROR: MD5 sum for downloaded file is wrong: ${sum} != ${md5}";
-              return 1;
-            fi;
-          else
-            echo "MD5 sum for ${name} is ${sum}";
+        local sum="$(md5 "${file}" | perl -pe 's|^.*([0-9a-f]{32}).*$|\1|')";
+        if [ -n "${md5}" ]; then
+          echo "Checking MD5 sum for ${name}...";
+          if [ "${md5}" != "${sum}" ]; then
+            echo "ERROR: MD5 sum for downloaded file is wrong: ${sum} != ${md5}";
+            return 1;
           fi;
+        else
+          echo "MD5 sum for ${name} is ${sum}";
         fi;
+
+        local sum="$(sha1 "${file}" | perl -pe 's|^.*([0-9a-f]{40}).*$|\1|')";
+        if [ -n "${sha1}" ]; then
+          echo "Checking SHA1 sum for ${name}...";
+          if [ "${sha1}" != "${sum}" ]; then
+            echo "ERROR: SHA1 sum for downloaded file is wrong: ${sum} != ${sha1}";
+            return 1;
+          fi;
+        else
+          echo "SHA1 sum for ${name} is ${sum}";
+        fi;
       }
 
       if [ ! -f "${cache_file}" ]; then
@@ -264,7 +292,7 @@
 
           if egrep "^${pkg_host}" "${HOME}/.ssh/known_hosts" > /dev/null 2>&1; then
             echo "Copying cache file up to ${pkg_host}.";
-            if ! scp "${tmp}" "${pkg_host}:/www/hosts/${pkg_host}${pkg_path}/${cache_basename}"; then
+            if ! scp "${tmp}" "${pkg_host}:/var/www/static${pkg_path}/${cache_basename}"; then
               echo "Failed to copy cache file up to ${pkg_host}.";
             fi;
             echo ""
@@ -441,10 +469,10 @@
   local revision="0";     # Revision (if svn)
   local get_type="www";   # Protocol to use
   local  version="";      # Minimum version required
-  local   f_hash="";      # Checksum
+  local   f_hash="";      # Checksum flag
 
   OPTIND=1;
-  while getopts "ofi:er:v:m:" option; do
+  while getopts "ofi:er:v:m:s:" option; do
     case "${option}" in
       'o') optional="true"; ;;
       'f') override="true"; ;;
@@ -452,6 +480,7 @@
       'r') get_type="svn"; revision="${OPTARG}"; ;;
       'v')  version="-v ${OPTARG}"; ;;
       'm')   f_hash="-m ${OPTARG}"; ;;
+      's')   f_hash="-s ${OPTARG}"; ;;
       'i')
         if [ -z "${OPTARG}" ]; then
           inplace=".";
@@ -535,9 +564,10 @@
   local f_hash="";
 
   OPTIND=1;
-  while getopts "m:" option; do
+  while getopts "m:s:" option; do
     case "${option}" in
       'm') f_hash="-m ${OPTARG}"; ;;
+      's') f_hash="-s ${OPTARG}"; ;;
     esac;
   done;
   shift $((${OPTIND} - 1));
@@ -703,12 +733,6 @@
       "${svn_uri_base}/PyKerberos/trunk";
   fi;
 
-  if [ "$(uname -s)" == "Darwin" ]; then
-    py_dependency -r 6656 \
-      "PyOpenDirectory" "opendirectory" "PyOpenDirectory" \
-      "${svn_uri_base}/PyOpenDirectory/trunk";
-  fi;
-
   py_dependency -v 0.5 -r 1038 \
     "xattr" "xattr" "xattr" \
     "http://svn.red-bean.com/bob/xattr/releases/xattr-0.6.1/";
@@ -759,7 +783,7 @@
 
   local sv="0.1.2";
   local sq="sqlparse-${sv}";
-  py_dependency -o -v "${sv}" -m "aa9852ad81822723adcd9f96838de14e" \
+  py_dependency -o -v "${sv}" -s "978874e5ebbd78e6d419e8182ce4fb3c30379642" \
     "SQLParse" "sqlparse" "${sq}" \
     "http://python-sqlparse.googlecode.com/files/${sq}.tar.gz";
 

Modified: CalendarServer/branches/users/gaya/sharedgroups/twext/enterprise/dal/test/test_parseschema.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twext/enterprise/dal/test/test_parseschema.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twext/enterprise/dal/test/test_parseschema.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -216,9 +216,9 @@
         """
         for identicalSchema in [
                 "create table sample (example integer unique);",
-                "create table sample (example integer, unique(example));",
+                "create table sample (example integer, unique (example));",
                 "create table sample "
-                "(example integer, constraint unique_example unique(example))"]:
+                "(example integer, constraint unique_example unique (example))"]:
             s = self.schemaFromString(identicalSchema)
             table = s.tableNamed('sample')
             column = table.columnNamed('example')
@@ -242,14 +242,14 @@
             self.assertEqual(expr.op, '>')
             self.assertEqual(constraint.name, checkName)
         checkOneConstraint(
-            "create table sample (example integer check(example >  5));"
+            "create table sample (example integer check (example >  5));"
         )
         checkOneConstraint(
-            "create table sample (example integer, check(example  > 5));"
+            "create table sample (example integer, check (example  > 5));"
         )
         checkOneConstraint(
             "create table sample "
-            "(example integer, constraint gt_5 check(example>5))", "gt_5"
+            "(example integer, constraint gt_5 check (example>5))", "gt_5"
         )
 
 
@@ -273,7 +273,7 @@
             )
         checkOneConstraint(
             "create table sample "
-            "(example integer check(example = lower(example)));"
+            "(example integer check (example = lower (example)));"
         )
 
 
@@ -283,7 +283,7 @@
         listing that column as a unique set.
         """
         s = self.schemaFromString(
-            "create table a (b integer, c integer, unique(b, c), unique(c));"
+            "create table a (b integer, c integer, unique (b, c), unique (c));"
         )
         a = s.tableNamed('a')
         b = a.columnNamed('b')
@@ -310,7 +310,7 @@
         C{primaryKey} attribute on the Table object.
         """
         s = self.schemaFromString(
-            "create table a (b integer, c integer, primary key(b, c))"
+            "create table a (b integer, c integer, primary key (b, c))"
         )
         a = s.tableNamed("a")
         self.assertEquals(

Modified: CalendarServer/branches/users/gaya/sharedgroups/twext/web2/dav/resource.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twext/web2/dav/resource.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twext/web2/dav/resource.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -136,29 +136,29 @@
     # use them.  This (generic) class does not.
 
     def liveProperties(self):
-
+    
         return (
-            (dav_namespace, "resourcetype"),
-            (dav_namespace, "getetag"),
-            (dav_namespace, "getcontenttype"),
-            (dav_namespace, "getcontentlength"),
-            (dav_namespace, "getlastmodified"),
-            (dav_namespace, "creationdate"),
-            (dav_namespace, "displayname"),
-            (dav_namespace, "supportedlock"),
-            (dav_namespace, "supported-report-set"), # RFC 3253, section 3.1.5
+            (dav_namespace, "resourcetype"              ),
+            (dav_namespace, "getetag"                   ),
+            (dav_namespace, "getcontenttype"            ),
+            (dav_namespace, "getcontentlength"          ),
+            (dav_namespace, "getlastmodified"           ),
+            (dav_namespace, "creationdate"              ),
+            (dav_namespace, "displayname"               ),
+            (dav_namespace, "supportedlock"             ),
+            (dav_namespace, "supported-report-set"      ), # RFC 3253, section 3.1.5
            #(dav_namespace, "owner"                     ), # RFC 3744, section 5.1
            #(dav_namespace, "group"                     ), # RFC 3744, section 5.2
-            (dav_namespace, "supported-privilege-set"), # RFC 3744, section 5.3
+            (dav_namespace, "supported-privilege-set"   ), # RFC 3744, section 5.3
             (dav_namespace, "current-user-privilege-set"), # RFC 3744, section 5.4
-            (dav_namespace, "current-user-principal"), # RFC 5397, Section 3
-            (dav_namespace, "acl"), # RFC 3744, section 5.5
-            (dav_namespace, "acl-restrictions"), # RFC 3744, section 5.6
-            (dav_namespace, "inherited-acl-set"), # RFC 3744, section 5.7
-            (dav_namespace, "principal-collection-set"), # RFC 3744, section 5.8
-            (dav_namespace, "quota-available-bytes"), # RFC 4331, section 3
-            (dav_namespace, "quota-used-bytes"), # RFC 4331, section 4
-
+            (dav_namespace, "current-user-principal"    ), # RFC 5397, Section 3
+            (dav_namespace, "acl"                       ), # RFC 3744, section 5.5
+            (dav_namespace, "acl-restrictions"          ), # RFC 3744, section 5.6
+            (dav_namespace, "inherited-acl-set"         ), # RFC 3744, section 5.7
+            (dav_namespace, "principal-collection-set"  ), # RFC 3744, section 5.8
+            (dav_namespace, "quota-available-bytes"     ), # RFC 4331, section 3
+            (dav_namespace, "quota-used-bytes"          ), # RFC 4331, section 4
+    
             (twisted_dav_namespace, "resource-class"),
         )
 
@@ -204,7 +204,7 @@
                 d = self.hasQuota(request)
                 d.addCallback(lambda result: result)
                 return d
-
+        
         return succeed(
             qname in self.liveProperties() or
             self.deadProperties().contains(qname)
@@ -450,7 +450,7 @@
         # Add dynamic live properties that exist
         dynamicLiveProperties = (
             (dav_namespace, "quota-available-bytes"),
-            (dav_namespace, "quota-used-bytes"),
+            (dav_namespace, "quota-used-bytes"     ),
         )
         for dqname in dynamicLiveProperties:
             has = (yield self.hasProperty(dqname, request))
@@ -635,7 +635,7 @@
 
             if privileges is None:
                 return child
-
+   
             d = child.checkPrivileges(
                 request, privileges,
                 inherited_aces=inherited_aces
@@ -716,7 +716,7 @@
             returnValue(None)
 
         # First find all depth 1 children
-        names1 = []
+        names1= []
         namesDeep = []
         collections1 = []
         if names:
@@ -791,14 +791,14 @@
                         child_collections[collection_name] if names else None, privileges,
                         inherited_aces=collection_inherited_aces
                     )
-
+                
         returnValue(None)
 
     @inlineCallbacks
     def checkACLPrivilege(
         self, request, acl, privyset, privileges, inherited_aces
     ):
-
+        
         if acl is None:
             returnValue(False)
 
@@ -875,7 +875,7 @@
         acl = element.ACL(*aces)
 
         return acl
-
+    
     def supportedReports(self):
         """
         See L{IDAVResource.supportedReports}.
@@ -1120,7 +1120,7 @@
         This implementation stores the ACL in the private property
         """
         # C{(L{twisted_private_namespace}, "acl")}.
-
+        
         # Steps for ACL evaluation:
         #  1. Check that ace's on incoming do not match a protected ace
         #  2. Check that ace's on incoming do not match an inherited ace
@@ -1332,7 +1332,7 @@
         if recurse:
             yield self.findChildren(
                 "infinity", request,
-                lambda x, y: resources.append((x, y))
+                lambda x, y: resources.append((x,y))
             )
 
         for resource, uri in resources:
@@ -1377,12 +1377,12 @@
 
             denied += pending # If no matching ACE, then denied
 
-            if denied:
+            if denied: 
                 errors.append((uri, denied))
 
         if errors:
             raise AccessDeniedError(errors,)
-
+        
         returnValue(None)
 
     def supportedPrivileges(self, request):
@@ -1473,20 +1473,20 @@
             if inherited_aces is None:
                 if myURL != "/":
                     parentURL = parentForURL(myURL)
-
+    
                     parent = (yield request.locateResource(parentURL))
-
+    
                     if parent:
                         parent_acl = (yield
                             parent.accessControlList(
                                 request, inheritance=True, expanding=True
                             )
                         )
-
+    
                         # Check disabled
                         if parent_acl is None:
                             returnValue(None)
-
+    
                         for ace in parent_acl.children:
                             if ace.inherited:
                                 aces.append(ace)
@@ -1527,7 +1527,7 @@
         @return: a C{list} of L{Ace}s that child resources of this one
             will inherit.
         """
-
+        
         # Get the parent ACLs with inheritance and preserve the
         # <inheritable> element.
 
@@ -1632,7 +1632,7 @@
             and URI respectively.
         """
         return succeed(authnPrincipal)
-
+        
     def samePrincipal(self, principal1, principal2):
         """
         Check whether the two principals are exactly the same in terms of
@@ -1662,7 +1662,7 @@
                 return True
         else:
             return False
-
+                
     def matchPrincipal(self, principal1, principal2, request):
         """
         Check whether the principal1 is a principal in the set defined
@@ -1758,7 +1758,7 @@
             returnValue(isContained)
         returnValue(False)
 
-
+        
     def validPrincipal(self, ace_principal, request):
         """
         Check whether the supplied principal is valid for this resource.
@@ -1808,7 +1808,7 @@
 
         # Must have the principal resource type and must match the
         # principal-URL
-
+        
         def _matchPrincipalURL(resource):
             return (
                 isPrincipalResource(resource) and
@@ -1963,9 +1963,9 @@
         for a in acl.children:
             if self.samePrincipal(a.principal, ace.principal):
                 return a
-
+        
         return None
-
+    
     def principalSearchPropertySet(self):
         """
         @return: a L{element.PrincipalSearchPropertySet} element describing the
@@ -1980,7 +1980,7 @@
     ##
     # Quota
     ##
-
+    
     """
     The basic policy here is to define a private 'quota-root' property
     on a collection.  That property will contain the maximum allowed
@@ -2012,7 +2012,7 @@
             quota-used-bytes, or C{None} if quota is not defined on
             the resource.
         """
-
+        
         # See if already cached
         if hasattr(request, "quota"):
             if request.quota.has_key(self):
@@ -2031,7 +2031,7 @@
                             available = 0
                         request.quota[self] = (available, used)
                         return (available, used)
-
+    
                     d = qroot_resource.currentQuotaUse(request)
                     d.addCallback(gotUsage)
                     return d
@@ -2039,11 +2039,11 @@
             request.quota[self] = None
             return None
 
-
+            
         d = self.quotaRootResource(request)
         d.addCallback(gotQuotaRootResource)
         return d
-
+    
     def hasQuota(self, request):
         """
         Check whether this resource is under quota control by checking
@@ -2053,9 +2053,9 @@
         """
 
         def gotQuotaRootResource(qroot_resource):
-
+            
             return qroot_resource is not None
-
+        
         d = self.quotaRootResource(request)
         d.addCallback(gotQuotaRootResource)
         return d
@@ -2065,7 +2065,7 @@
         @return: a C{True} if this resource has quota root, C{False} otherwise.
         """
         return self.hasDeadProperty(TwistedQuotaRootProperty)
-
+    
     def quotaRoot(self, request):
         """
         @return: a C{int} containing the maximum allowed bytes if this
@@ -2076,7 +2076,7 @@
             return int(str(self.readDeadProperty(TwistedQuotaRootProperty)))
         else:
             return None
-
+    
     @inlineCallbacks
     def quotaRootResource(self, request):
         """
@@ -2115,14 +2115,14 @@
         assert maxsize is None or isinstance(maxsize, int), (
             "maxsize must be an int or None"
         )
-
+        
         if maxsize is not None:
             self.writeDeadProperty(TwistedQuotaRootProperty(str(maxsize)))
         else:
             # Remove both the root and the cached used value
             self.removeDeadProperty(TwistedQuotaRootProperty)
             self.removeDeadProperty(TwistedQuotaUsedProperty)
-
+    
     def quotaSize(self, request):
         """
         Get the size of this resource (if its a collection get total
@@ -2145,7 +2145,7 @@
         @return: C{True} if there is sufficient quota remaining on all
             quota roots, C{False} otherwise.
         """
-
+        
         def _defer(quotaroot):
             if quotaroot:
                 # Check quota on this root (if it has one)
@@ -2153,7 +2153,7 @@
                 if quota is not None:
                     if available > quota[0]:
                         return False
-
+    
             return True
 
         d = self.quotaRootResource(request)
@@ -2169,8 +2169,8 @@
             (positive) or removed (negative) that should be used to
             adjust the cached total.
         """
-
-
+        
+        
         def _defer(quotaroot):
             if quotaroot:
                 # Check quota on this root (if it has one)
@@ -2193,7 +2193,7 @@
         assert self.hasQuotaRoot(request), (
             "Quota use only on quota root collection"
         )
-
+        
         # Try to get the cached value property
         if self.hasDeadProperty(TwistedQuotaUsedProperty):
             return succeed(
@@ -2221,11 +2221,11 @@
             or C{None} if not quota controlled.
         """
         assert self.isCollection(), "Only collections can have a quota root"
-
+        
         # Get current value
         def _defer(size):
             size += adjust
-
+            
             # Sanity check the resulting size
             if size >= 0:
                 self.writeDeadProperty(TwistedQuotaUsedProperty(str(size)))
@@ -2241,7 +2241,7 @@
         d = self.currentQuotaUse(request)
         d.addCallback(_defer)
         return d
-
+        
     ##
     # HTTP
     ##
@@ -2259,7 +2259,7 @@
                 request.unparseURL(
                     path=urllib.quote(
                         urllib.unquote(request.path),
-                        safe=':/') + '/'
+                        safe=':/')+'/'
                 )
             )
 
@@ -2311,12 +2311,12 @@
     ##
 
     def liveProperties(self):
-
+        
         return super(DAVPrincipalResource, self).liveProperties() + (
             (dav_namespace, "alternate-URI-set"),
-            (dav_namespace, "principal-URL"),
-            (dav_namespace, "group-member-set"),
-            (dav_namespace, "group-membership"),
+            (dav_namespace, "principal-URL"    ),
+            (dav_namespace, "group-member-set" ),
+            (dav_namespace, "group-membership" ),
         )
 
     def davComplianceClasses(self):
@@ -2351,7 +2351,7 @@
                             element.HRef(p.principalURL())
                             for p in members
                         ])
-
+                    
                     d = self.groupMembers()
                     d.addCallback(callback)
                     return d
@@ -2362,7 +2362,7 @@
                             element.HRef(g.principalURL())
                             for g in memberships
                         ])
-
+                    
                     d = self.groupMemberships()
                     d.addCallback(callback)
                     return d

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/aggregate.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/aggregate.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/aggregate.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -67,6 +67,8 @@
                     )
                 recordTypes[recordType] = service
 
+            service.aggregateService = self
+
         self.realmName = realmName
         self._recordTypes = recordTypes
 

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/augment.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/augment.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/augment.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -60,6 +60,7 @@
         enabledForCalendaring=False,
         autoSchedule=False,
         autoScheduleMode="default",
+        autoAcceptGroup="",
         enabledForAddressBooks=False,
         enabledForLogin=True,
     ):
@@ -72,6 +73,7 @@
         self.enabledForLogin = enabledForLogin
         self.autoSchedule = autoSchedule
         self.autoScheduleMode = autoScheduleMode if autoScheduleMode in allowedAutoScheduleModes else "default"
+        self.autoAcceptGroup = autoAcceptGroup
         self.clonedFromDefault = False
 
 recordTypesMap = {
@@ -459,6 +461,8 @@
         addSubElement(recordNode, xmlaugmentsparser.ELEMENT_AUTOSCHEDULE, "true" if record.autoSchedule else "false")
         if record.autoScheduleMode:
             addSubElement(recordNode, xmlaugmentsparser.ELEMENT_AUTOSCHEDULE_MODE, record.autoScheduleMode)
+        if record.autoAcceptGroup:
+            addSubElement(recordNode, xmlaugmentsparser.ELEMENT_AUTOACCEPTGROUP, record.autoAcceptGroup)
 
     def refresh(self):
         """
@@ -570,11 +574,11 @@
         """
         
         # Query for the record information
-        results = (yield self.query("select UID, ENABLED, SERVERID, PARTITIONID, CALENDARING, ADDRESSBOOKS, AUTOSCHEDULE, AUTOSCHEDULEMODE, LOGINENABLED from AUGMENTS where UID = :1", (uid,)))
+        results = (yield self.query("select UID, ENABLED, SERVERID, PARTITIONID, CALENDARING, ADDRESSBOOKS, AUTOSCHEDULE, AUTOSCHEDULEMODE, AUTOACCEPTGROUP, LOGINENABLED from AUGMENTS where UID = :1", (uid,)))
         if not results:
             returnValue(None)
         else:
-            uid, enabled, serverid, partitionid, enabledForCalendaring, enabledForAddressBooks, autoSchedule, autoScheduleMode, enabledForLogin = results[0]
+            uid, enabled, serverid, partitionid, enabledForCalendaring, enabledForAddressBooks, autoSchedule, autoScheduleMode, autoAcceptGroup, enabledForLogin = results[0]
             
             record = AugmentRecord(
                 uid = uid,
@@ -586,6 +590,7 @@
                 enabledForLogin = enabledForLogin == "T",
                 autoSchedule = autoSchedule == "T",
                 autoScheduleMode = autoScheduleMode,
+                autoAcceptGroup = autoAcceptGroup,
             )
             
             returnValue(record)
@@ -648,6 +653,7 @@
                 ("ADDRESSBOOKS",     "text(1)"),
                 ("AUTOSCHEDULE",     "text(1)"),
                 ("AUTOSCHEDULEMODE", "text"),
+                ("AUTOACCEPTGROUP",  "text"),
                 ("LOGINENABLED",     "text(1)"),
             ),
             ifnotexists=True,
@@ -671,8 +677,8 @@
     def _addRecord(self, record):
         yield self.execute(
             """insert or replace into AUGMENTS
-            (UID, ENABLED, SERVERID, PARTITIONID, CALENDARING, ADDRESSBOOKS, AUTOSCHEDULE, AUTOSCHEDULEMODE, LOGINENABLED)
-            values (:1, :2, :3, :4, :5, :6, :7, :8, :9)""",
+            (UID, ENABLED, SERVERID, PARTITIONID, CALENDARING, ADDRESSBOOKS, AUTOSCHEDULE, AUTOSCHEDULEMODE, AUTOACCEPTGROUP, LOGINENABLED)
+            values (:1, :2, :3, :4, :5, :6, :7, :8, :9, :10)""",
             (
                 record.uid,
                 "T" if record.enabled else "F",
@@ -682,6 +688,7 @@
                 "T" if record.enabledForAddressBooks else "F",
                 "T" if record.autoSchedule else "F",
                 record.autoScheduleMode if record.autoScheduleMode else "",
+                record.autoAcceptGroup,
                 "T" if record.enabledForLogin else "F",
             )
         )
@@ -703,8 +710,8 @@
     def _addRecord(self, record):
         yield self.execute(
             """insert into AUGMENTS
-            (UID, ENABLED, SERVERID, PARTITIONID, CALENDARING, ADDRESSBOOKS, AUTOSCHEDULE, AUTOSCHEDULEMODE, LOGINENABLED)
-            values (:1, :2, :3, :4, :5, :6, :7, :8, :9)""",
+            (UID, ENABLED, SERVERID, PARTITIONID, CALENDARING, ADDRESSBOOKS, AUTOSCHEDULE, AUTOSCHEDULEMODE, AUTOACCEPTGROUP, LOGINENABLED)
+            values (:1, :2, :3, :4, :5, :6, :7, :8, :9, :10)""",
             (
                 record.uid,
                 "T" if record.enabled else "F",
@@ -714,6 +721,7 @@
                 "T" if record.enabledForAddressBooks else "F",
                 "T" if record.autoSchedule else "F",
                 record.autoScheduleMode if record.autoScheduleMode else "",
+                record.autoAcceptGroup,
                 "T" if record.enabledForLogin else "F",
             )
         )
@@ -722,8 +730,8 @@
     def _modifyRecord(self, record):
         yield self.execute(
             """update AUGMENTS set
-            (UID, ENABLED, SERVERID, PARTITIONID, CALENDARING, ADDRESSBOOKS, AUTOSCHEDULE, AUTOSCHEDULEMODE, LOGINENABLED) =
-            (:1, :2, :3, :4, :5, :6, :7, :8, :9) where UID = :10""",
+            (UID, ENABLED, SERVERID, PARTITIONID, CALENDARING, ADDRESSBOOKS, AUTOSCHEDULE, AUTOSCHEDULEMODE, AUTOACCEPTGROUP, LOGINENABLED) =
+            (:1, :2, :3, :4, :5, :6, :7, :8, :9, :10) where UID = :11""",
             (
                 record.uid,
                 "T" if record.enabled else "F",
@@ -733,6 +741,7 @@
                 "T" if record.enabledForAddressBooks else "F",
                 "T" if record.autoSchedule else "F",
                 record.autoScheduleMode if record.autoScheduleMode else "",
+                record.autoAcceptGroup,
                 "T" if record.enabledForLogin else "F",
                 record.uid,
             )

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/directory.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/directory.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/directory.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -82,6 +82,8 @@
     searchContext_location = "location"
     searchContext_attendee = "attendee"
 
+    aggregateService = None
+
     def _generatedGUID(self):
         if not hasattr(self, "_guid"):
             realmName = self.realmName
@@ -477,6 +479,7 @@
             autoaccept = wpframework.get("AutoAcceptsInvitation", False)
             proxy = wpframework.get("CalendaringDelegate", None)
             read_only_proxy = wpframework.get("ReadOnlyCalendaringDelegate", None)
+            autoAcceptGroup = wpframework.get("AutoAcceptGroup", "")
         except (ExpatError, AttributeError), e:
             self.log_error(
                 "Failed to parse ResourceInfo attribute of record (%s)%s (guid=%s): %s\n%s" %
@@ -484,7 +487,7 @@
             )
             raise ValueError("Invalid ResourceInfo")
 
-        return (autoaccept, proxy, read_only_proxy,)
+        return (autoaccept, proxy, read_only_proxy, autoAcceptGroup)
 
 
     def getExternalProxyAssignments(self):
@@ -1245,6 +1248,7 @@
         firstName=None, lastName=None, emailAddresses=set(),
         calendarUserAddresses=set(),
         autoSchedule=False, autoScheduleMode=None,
+        autoAcceptGroup="",
         enabledForCalendaring=None,
         enabledForAddressBooks=None,
         uid=None,
@@ -1280,6 +1284,7 @@
         self.enabledForCalendaring = enabledForCalendaring
         self.autoSchedule = autoSchedule
         self.autoScheduleMode = autoScheduleMode
+        self.autoAcceptGroup = autoAcceptGroup
         self.enabledForAddressBooks = enabledForAddressBooks
         self.enabledForLogin = enabledForLogin
         self.extProxies = extProxies
@@ -1353,6 +1358,7 @@
             self.enabledForAddressBooks = augment.enabledForAddressBooks
             self.autoSchedule = augment.autoSchedule
             self.autoScheduleMode = augment.autoScheduleMode
+            self.autoAcceptGroup = augment.autoAcceptGroup
             self.enabledForLogin = augment.enabledForLogin
 
             if (self.enabledForCalendaring or self.enabledForAddressBooks) and self.recordType == self.service.recordType_groups:
@@ -1556,7 +1562,28 @@
         return True
 
 
+    def autoAcceptMembers(self):
+        """
+        Return the list of GUIDs for which this record will automatically accept
+        invites from (assuming no conflicts).  This list is based on the group
+        assigned to record.autoAcceptGroup.  Cache the expanded group membership
+        within the record.
 
+        @return: the list of members of the autoAcceptGroup, or an empty list if
+            not assigned
+        @rtype: C{list} of GUID C{str}
+        """
+        if not hasattr(self, "_cachedAutoAcceptMembers"):
+            self._cachedAutoAcceptMembers = []
+            if self.autoAcceptGroup:
+                service = self.service.aggregateService or self.service
+                groupRecord = service.recordWithGUID(self.autoAcceptGroup)
+                if groupRecord is not None:
+                    self._cachedAutoAcceptMembers = [m.guid for m in groupRecord.expandedMembers()]
+
+        return self._cachedAutoAcceptMembers
+
+
 class DirectoryError(RuntimeError):
     """
     Generic directory error.

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/idirectory.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/idirectory.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/idirectory.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -102,20 +102,21 @@
         """
         @param tokens: The tokens to search on
         @type tokens: C{list} of C{str} (utf-8 bytes)
-        @param context: An indication of what the end user is searching
-            for; "attendee", "location", or None
+
+        @param context: An indication of what the end user is searching for;
+            "attendee", "location", or None
         @type context: C{str}
-        @return: a deferred sequence of L{IDirectoryRecord}s which
-            match the given tokens and optional context.
 
-        Each token is searched for within each record's full name and
-        email address; if each token is found within a record that
-        record is returned in the results.
+        @return: a deferred sequence of L{IDirectoryRecord}s which match the
+            given tokens and optional context.
 
-        If context is None, all record types are considered.  If
-        context is "location", only locations are considered.  If
-        context is "attendee", only users, groups, and resources
-        are considered.
+            Each token is searched for within each record's full name and email
+            address; if each token is found within a record that record is
+            returned in the results.
+
+            If context is None, all record types are considered.  If context is
+            "location", only locations are considered.  If context is
+            "attendee", only users, groups, and resources are considered.
         """
 
 

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/ldapdirectory.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/ldapdirectory.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/ldapdirectory.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -182,6 +182,7 @@
                 "autoScheduleEnabledValue": "yes",
                 "proxyAttr": None, # list of GUIDs
                 "readOnlyProxyAttr": None, # list of GUIDs
+                "autoAcceptGroupAttr": None, # single group GUID
             },
             "partitionSchema": {
                 "serverIdAttr": None, # maps to augments server-id
@@ -261,6 +262,8 @@
             attrSet.add(self.resourceSchema["resourceInfoAttr"])
         if self.resourceSchema["autoScheduleAttr"]:
             attrSet.add(self.resourceSchema["autoScheduleAttr"])
+        if self.resourceSchema["autoAcceptGroupAttr"]:
+            attrSet.add(self.resourceSchema["autoAcceptGroupAttr"])
         if self.resourceSchema["proxyAttr"]:
             attrSet.add(self.resourceSchema["proxyAttr"])
         if self.resourceSchema["readOnlyProxyAttr"]:
@@ -787,6 +790,7 @@
         proxyGUIDs = ()
         readOnlyProxyGUIDs = ()
         autoSchedule = False
+        autoAcceptGroup = ""
         memberGUIDs = []
 
         # LDAP attribute -> principal matchings
@@ -836,7 +840,8 @@
                         (
                             autoSchedule,
                             proxy,
-                            readOnlyProxy
+                            readOnlyProxy,
+                            autoAcceptGroup
                         ) = self.parseResourceInfo(
                             resourceInfo,
                             guid,
@@ -861,6 +866,9 @@
                 if self.resourceSchema["readOnlyProxyAttr"]:
                     readOnlyProxyGUIDs = set(self._getMultipleLdapAttributes(attrs,
                         self.resourceSchema["readOnlyProxyAttr"]))
+                if self.resourceSchema["autoAcceptGroupAttr"]:
+                    autoAcceptGroup = self._getUniqueLdapAttribute(attrs,
+                        self.resourceSchema["autoAcceptGroupAttr"])
 
         serverID = partitionID = None
         if self.partitionSchema["serverIdAttr"]:
@@ -906,6 +914,7 @@
                 partitionID=partitionID,
                 enabledForCalendaring=enabledForCalendaring,
                 autoSchedule=autoSchedule,
+                autoAcceptGroup=autoAcceptGroup,
                 enabledForAddressBooks=enabledForAddressBooks, # TODO: add to LDAP?
                 enabledForLogin=enabledForLogin,
             )

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/principal.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/principal.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/principal.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -994,14 +994,20 @@
     def getAutoSchedule(self):
         return self.record.autoSchedule
 
-    def canAutoSchedule(self):
+    def canAutoSchedule(self, organizer=None):
         """
         Determine the auto-schedule state based on record state, type and config settings.
+
+        @param organizer: the CUA of the organizer trying to schedule this principal
+        @type organizer: C{str}
         """
-        
+
         if config.Scheduling.Options.AutoSchedule.Enabled:
-            if config.Scheduling.Options.AutoSchedule.Always or self.getAutoSchedule():
-                if self.getCUType() != "INDIVIDUAL" or config.Scheduling.Options.AutoSchedule.AllowUsers:
+            if (config.Scheduling.Options.AutoSchedule.Always or
+                self.getAutoSchedule() or
+                self.autoAcceptFromOrganizer(organizer)):
+                if (self.getCUType() != "INDIVIDUAL" or
+                    config.Scheduling.Options.AutoSchedule.AllowUsers):
                     return True
         return False
 
@@ -1012,9 +1018,65 @@
         augmentRecord.autoScheduleMode = autoScheduleMode
         (yield self.record.service.augmentService.addAugmentRecords([augmentRecord]))
 
-    def getAutoScheduleMode(self):
-        return self.record.autoScheduleMode
+    def getAutoScheduleMode(self, organizer=None):
+        """
+        Return the auto schedule mode value for the principal.  If the optional
+        organizer is provided, and that organizer is a member of the principal's
+        auto-accept group, return "automatic" instead; this allows specifying a
+        priliveged group whose scheduling requests are automatically accepted or
+        declined, regardless of whether the principal is normally managed by a
+        delegate.
 
+        @param organizer: the CUA of the organizer scheduling this principal
+        @type organizer: C{str}
+        @return: auto schedule mode; one of: none, accept-always, decline-always,
+            accept-if-free, decline-if-busy, automatic (see stdconfig.py)
+        @rtype: C{str}
+        """
+        autoScheduleMode = self.record.autoScheduleMode
+        if self.autoAcceptFromOrganizer(organizer):
+            autoScheduleMode = "automatic"
+        return autoScheduleMode
+
+
+    @inlineCallbacks
+    def setAutoAcceptGroup(self, autoAcceptGroup):
+        """
+        Sets the group whose members can automatically schedule with this principal
+        even if this principal's auto-schedule is False (assuming no conflicts).
+
+        @param autoAcceptGroup:  GUID of the group
+        @type autoAcceptGroup: C{str}
+        """
+        self.record.autoAcceptGroup = autoAcceptGroup
+        augmentRecord = (yield self.record.service.augmentService.getAugmentRecord(self.record.guid, self.record.recordType))
+        augmentRecord.autoAcceptGroup = autoAcceptGroup
+        (yield self.record.service.augmentService.addAugmentRecords([augmentRecord]))
+
+    def getAutoAcceptGroup(self):
+        """
+        Returns the GUID of the auto accept group assigned to this principal, or empty
+        string if not assigned
+        """
+        return self.record.autoAcceptGroup
+
+    def autoAcceptFromOrganizer(self, organizer):
+        """
+        Is the organizer a member of this principal's autoAcceptGroup?
+
+        @param organizer: CUA of the organizer
+        @type organizer: C{str}
+        @return: True if the autoAcceptGroup is assigned, and the organizer is a member
+            of that group.  False otherwise.
+        @rtype: C{bool}
+        """
+        if organizer is not None and self.record.autoAcceptGroup is not None:
+            organizerPrincipal = self.parent.principalForCalendarUserAddress(organizer)
+            if organizerPrincipal is not None:
+                if organizerPrincipal.record.guid in self.record.autoAcceptMembers():
+                    return True
+        return False
+
     def getCUType(self):
         return self.record.getCUType()
 

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/augments.xml
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/augments.xml	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/augments.xml	2012-11-17 04:03:40 UTC (rev 10079)
@@ -120,6 +120,7 @@
     <enable>true</enable>
     <enable-calendar>true</enable-calendar>
     <enable-addressbook>true</enable-addressbook>
+    <auto-accept-group>both_coasts</auto-accept-group>
   </record>
   <record>
     <uid>orion</uid>

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/test_directory.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/test_directory.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/test_directory.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -757,6 +757,30 @@
             }
         )
 
+    def test_autoAcceptMembers(self):
+        """
+        autoAcceptMembers( ) returns an empty list if no autoAcceptGroup is
+        assigned, or the expanded membership if assigned.
+        """
+
+        # No auto-accept-group for "orion" in augments.xml
+        orion = self.directoryService.recordWithGUID("orion")
+        self.assertEquals( orion.autoAcceptMembers(), [])
+
+        # "both_coasts" group assigned to "apollo" in augments.xml
+        apollo = self.directoryService.recordWithGUID("apollo")
+        self.assertEquals(
+            set(apollo.autoAcceptMembers()),
+            set([
+                "8B4288F6-CC82-491D-8EF9-642EF4F3E7D0",
+                 "5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1",
+                 "5A985493-EE2C-4665-94CF-4DFEA3A89500",
+                 "6423F94A-6B76-4A3A-815B-D52CFD77935D",
+                 "right_coast",
+                 "left_coast",
+            ])
+        )
+
 class RecordsMatchingTokensTests(TestCase):
 
     @inlineCallbacks

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/test_ldapdirectory.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/test_ldapdirectory.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/test_ldapdirectory.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -547,6 +547,7 @@
                     "autoScheduleAttr": None,
                     "proxyAttr": None,
                     "readOnlyProxyAttr": None,
+                    "autoAcceptGroupAttr": None,
                 },
                 "partitionSchema": {
                     "serverIdAttr": "server-id", # maps to augments server-id
@@ -762,6 +763,7 @@
                     "autoScheduleAttr": None,
                     "proxyAttr": None,
                     "readOnlyProxyAttr": None,
+                    "autoAcceptGroupAttr": None,
                 },
                 "partitionSchema": {
                     "serverIdAttr": "server-id", # maps to augments server-id
@@ -979,6 +981,7 @@
                     "autoScheduleAttr": None,
                     "proxyAttr": None,
                     "readOnlyProxyAttr": None,
+                    "autoAcceptGroupAttr": None,
                 },
                 "partitionSchema": {
                     "serverIdAttr": "server-id", # maps to augments server-id
@@ -1192,6 +1195,7 @@
                     "autoScheduleAttr": None,
                     "proxyAttr": None,
                     "readOnlyProxyAttr": None,
+                    "autoAcceptGroupAttr": None,
                 },
                 "partitionSchema": {
                     "serverIdAttr": "server-id", # maps to augments server-id
@@ -1363,7 +1367,7 @@
                      ])
             )
 
-            # Resource with delegates and autoSchedule = True
+            # Resource with delegates, autoSchedule = True, and autoAcceptGroup
 
             dn = "cn=odtestresource,cn=resources,dc=example,dc=com"
             guid = 'D3094652-344B-4633-8DB8-09639FA00FB6'
@@ -1382,6 +1386,8 @@
 <string>6C6CD280-E6E3-11DF-9492-0800200C9A66</string>
 <key>ReadOnlyCalendaringDelegate</key>
 <string>6AA1AE12-592F-4190-A069-547CD83C47C0</string>
+<key>AutoAcceptGroup</key>
+<string>77A8EB52-AA2A-42ED-8843-B2BEE863AC70</string>
 </dict>
 </dict>
 </plist>"""]
@@ -1394,6 +1400,8 @@
             self.assertEquals(record.externalReadOnlyProxies(),
                 set(['6AA1AE12-592F-4190-A069-547CD83C47C0']))
             self.assertTrue(record.autoSchedule)
+            self.assertEquals(record.autoAcceptGroup,
+                '77A8EB52-AA2A-42ED-8843-B2BEE863AC70')
 
             # Resource with no delegates and autoSchedule = False
 
@@ -1422,6 +1430,7 @@
             self.assertEquals(record.externalReadOnlyProxies(),
                 set())
             self.assertFalse(record.autoSchedule)
+            self.assertEquals(record.autoAcceptGroup, "")
 
 
             # Now switch off the resourceInfoAttr and switch to individual
@@ -1432,6 +1441,7 @@
                 "autoScheduleEnabledValue" : "yes",
                 "proxyAttr" : "proxy",
                 "readOnlyProxyAttr" : "read-only-proxy",
+                "autoAcceptGroupAttr" : "auto-accept-group",
             }
 
             # Resource with delegates and autoSchedule = True
@@ -1444,6 +1454,7 @@
                 'auto-schedule' : ['yes'],
                 'proxy' : ['6C6CD280-E6E3-11DF-9492-0800200C9A66'],
                 'read-only-proxy' : ['6AA1AE12-592F-4190-A069-547CD83C47C0'],
+                'auto-accept-group' : ['77A8EB52-AA2A-42ED-8843-B2BEE863AC70'],
             }
             record = self.service._ldapResultToRecord(dn, attrs,
                 self.service.recordType_resources)
@@ -1453,6 +1464,8 @@
             self.assertEquals(record.externalReadOnlyProxies(),
                 set(['6AA1AE12-592F-4190-A069-547CD83C47C0']))
             self.assertTrue(record.autoSchedule)
+            self.assertEquals(record.autoAcceptGroup,
+                '77A8EB52-AA2A-42ED-8843-B2BEE863AC70')
 
         def test_listRecords(self):
             """

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/test_principal.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/test_principal.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/test_principal.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -560,7 +560,7 @@
         """
         DirectoryPrincipalResource.canAutoSchedule()
         """
-        
+
         # Set all resources and locations to auto-schedule, plus one user
         for provisioningResource, recordType, recordResource, record in self._allRecords():
             if record.enabledForCalendaring:
@@ -590,6 +590,27 @@
             if record.enabledForCalendaring:
                 self.assertFalse(recordResource.canAutoSchedule())
 
+
+    def test_canAutoScheduleAutoAcceptGroup(self):
+        """
+        DirectoryPrincipalResource.canAutoSchedule(organizer)
+        """
+
+        # Location "apollo" has an auto-accept group ("both_coasts") set in augments.xml,
+        # therefore any organizer in that group should be able to auto schedule
+
+        for provisioningResource, recordType, recordResource, record in self._allRecords():
+            if record.uid == "apollo":
+
+                # No organizer
+                self.assertFalse(recordResource.canAutoSchedule())
+
+                # Organizer in auto-accept group
+                self.assertTrue(recordResource.canAutoSchedule(organizer="mailto:wsanchez at example.com"))
+                # Organizer not in auto-accept group
+                self.assertFalse(recordResource.canAutoSchedule(organizer="mailto:a at example.com"))
+
+
     @inlineCallbacks
     def test_defaultAccessControlList_principals(self):
         """

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/test_xmlfile.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/test_xmlfile.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/test/test_xmlfile.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -25,6 +25,11 @@
 # FIXME: Add tests for GUID hooey, once we figure out what that means here
 
 class XMLFileBase(object):
+    """
+    L{XMLFileBase} is a base/mix-in object for testing L{XMLDirectoryService}
+    (or things that depend on L{IDirectoryService} and need a simple
+    implementation to use).
+    """
     recordTypes = set((
         DirectoryService.recordType_users,
         DirectoryService.recordType_groups,
@@ -44,30 +49,30 @@
     }
 
     groups = {
-        "admin"      : { "password": "admin",       "guid": None, "addresses": (), "members": ((DirectoryService.recordType_groups, "managers"),)                                      },
-        "managers"   : { "password": "managers",    "guid": None, "addresses": (), "members": ((DirectoryService.recordType_users , "lecroy"),)                                         },
+        "admin"      : { "password": "admin",       "guid": None, "addresses": (), "members": ((DirectoryService.recordType_groups, "managers"),)},
+        "managers"   : { "password": "managers",    "guid": None, "addresses": (), "members": ((DirectoryService.recordType_users , "lecroy"),)},
         "grunts"     : { "password": "grunts",      "guid": None, "addresses": (), "members": ((DirectoryService.recordType_users , "wsanchez"),
                                                                                                (DirectoryService.recordType_users , "cdaboo"),
-                                                                                               (DirectoryService.recordType_users , "dreid")) },
-        "right_coast": { "password": "right_coast", "guid": None, "addresses": (), "members": ((DirectoryService.recordType_users , "cdaboo"),)                                         },
+                                                                                               (DirectoryService.recordType_users , "dreid"))},
+        "right_coast": { "password": "right_coast", "guid": None, "addresses": (), "members": ((DirectoryService.recordType_users , "cdaboo"),)},
         "left_coast" : { "password": "left_coast",  "guid": None, "addresses": (), "members": ((DirectoryService.recordType_users , "wsanchez"),
                                                                                                (DirectoryService.recordType_users , "dreid"),
-                                                                                               (DirectoryService.recordType_users , "lecroy")) },
+                                                                                               (DirectoryService.recordType_users , "lecroy"))},
         "both_coasts": { "password": "both_coasts", "guid": None, "addresses": (), "members": ((DirectoryService.recordType_groups, "right_coast"),
-                                                                                               (DirectoryService.recordType_groups, "left_coast"))           },
+                                                                                               (DirectoryService.recordType_groups, "left_coast"))},
         "recursive1_coasts":  { "password": "recursive1_coasts",  "guid": None, "addresses": (), "members": ((DirectoryService.recordType_groups, "recursive2_coasts"),
-                                                                                               (DirectoryService.recordType_users, "wsanchez"))           },
+                                                                                               (DirectoryService.recordType_users, "wsanchez"))},
         "recursive2_coasts":  { "password": "recursive2_coasts",  "guid": None, "addresses": (), "members": ((DirectoryService.recordType_groups, "recursive1_coasts"),
-                                                                                               (DirectoryService.recordType_users, "cdaboo"))           },
+                                                                                               (DirectoryService.recordType_users, "cdaboo"))},
         "non_calendar_group": { "password": "non_calendar_group", "guid": None, "addresses": (), "members": ((DirectoryService.recordType_users , "cdaboo"),
-                                                                                               (DirectoryService.recordType_users , "lecroy"))           },
+                                                                                               (DirectoryService.recordType_users , "lecroy"))},
     }
 
     locations = {
         "mercury": { "password": "mercury", "guid": None, "addresses": ("mailto:mercury at example.com",) },
         "gemini" : { "password": "gemini",  "guid": None, "addresses": ("mailto:gemini at example.com",)  },
         "apollo" : { "password": "apollo",  "guid": None, "addresses": ("mailto:apollo at example.com",)  },
-        "orion"  : { "password": "orion",   "guid": None, "addresses": ("mailto:orion at example.com",)  },
+        "orion"  : { "password": "orion",   "guid": None, "addresses": ("mailto:orion at example.com",)   },
     }
 
     resources = {
@@ -77,17 +82,53 @@
     }
 
     def xmlFile(self):
+        """
+        Create a L{FilePath} that points to a temporary file containing a copy
+        of C{twistedcaldav/directory/test/accounts.xml}.
+
+        @see: L{xmlFile}
+
+        @rtype: L{FilePath}
+        """
         if not hasattr(self, "_xmlFile"):
             self._xmlFile = FilePath(self.mktemp())
             xmlFile.copyTo(self._xmlFile)
         return self._xmlFile
 
+
     def augmentsFile(self):
+        """
+        Create a L{FilePath} that points to a temporary file containing a copy
+        of C{twistedcaldav/directory/test/augments.xml}.
+
+        @see: L{augmentsFile}
+
+        @rtype: L{FilePath}
+        """
         if not hasattr(self, "_augmentsFile"):
             self._augmentsFile = FilePath(self.mktemp())
             augmentsFile.copyTo(self._augmentsFile)
         return self._augmentsFile
 
+
+    def service(self):
+        """
+        Create an L{XMLDirectoryService} based on the contents of the paths
+        returned by L{XMLFileBase.augmentsFile} and L{XMLFileBase.xmlFile}.
+
+        @rtype: L{XMLDirectoryService}
+        """
+        return XMLDirectoryService(
+            {
+                'xmlFile': self.xmlFile(),
+                'augmentService':
+                    augment.AugmentXMLDB(xmlFiles=(self.augmentsFile().path,)),
+            },
+            alwaysStat=True
+        )
+
+
+
 class XMLFile (
     XMLFileBase,
     twistedcaldav.directory.test.util.BasicTestCase,
@@ -96,16 +137,6 @@
     """
     Test XML file based directory implementation.
     """
-    def service(self):
-        directory = XMLDirectoryService(
-            {
-                'xmlFile' : self.xmlFile(),
-                'augmentService' :
-                   augment.AugmentXMLDB(xmlFiles=(self.augmentsFile().path,)),
-            },
-            alwaysStat=True
-        )
-        return directory
 
     def test_changedXML(self):
         service = self.service()

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/xmlaugmentsparser.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/xmlaugmentsparser.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/directory/xmlaugmentsparser.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -43,6 +43,7 @@
 ELEMENT_ENABLELOGIN       = "enable-login"
 ELEMENT_AUTOSCHEDULE      = "auto-schedule"
 ELEMENT_AUTOSCHEDULE_MODE = "auto-schedule-mode"
+ELEMENT_AUTOACCEPTGROUP   = "auto-accept-group"
 
 ATTRIBUTE_REPEAT          = "repeat"
 
@@ -60,6 +61,7 @@
     ELEMENT_ENABLELOGIN:       "enabledForLogin",
     ELEMENT_AUTOSCHEDULE:      "autoSchedule",
     ELEMENT_AUTOSCHEDULE_MODE: "autoScheduleMode",
+    ELEMENT_AUTOACCEPTGROUP:   "autoAcceptGroup",
 }
 
 class XMLAugmentsParser(object):
@@ -103,6 +105,7 @@
                     ELEMENT_PARTITIONID,
                     ELEMENT_HOSTEDAT,
                     ELEMENT_AUTOSCHEDULE_MODE,
+                    ELEMENT_AUTOACCEPTGROUP,
                 ):
                     fields[node.tag] = node.text if node.text else ""
                 elif node.tag in (

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/method/report_addressbook_query.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/method/report_addressbook_query.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/method/report_addressbook_query.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -63,11 +63,11 @@
 
     xmlfilter = addressbook_query.filter
     filter = addressbookqueryfilter.Filter(xmlfilter)
-    query = addressbook_query.props
+    query  = addressbook_query.props
     limit = addressbook_query.limit
 
     assert query is not None
-
+    
     if query.qname() == ("DAV:", "allprop"):
         propertiesForResource = report_common.allPropertiesForResource
         generate_address_data = False
@@ -78,7 +78,7 @@
 
     elif query.qname() == ("DAV:", "prop"):
         propertiesForResource = report_common.propertyListForResource
-
+       
         # Verify that any address-data element matches what we can handle
         result, message, generate_address_data = report_common.validPropertyListAddressDataTypeVersion(query)
         if not result:
@@ -88,7 +88,7 @@
                 (carddav_namespace, "supported-address-data"),
                 "Invalid address-data",
             ))
-
+        
     else:
         raise AssertionError("We shouldn't be here")
 
@@ -101,10 +101,10 @@
             "Invalid filter element",
         ))
 
-    matchcount = [0, ]
-    max_number_of_results = [config.MaxQueryWithDataResults if generate_address_data else None, ]
-    limited = [False, ]
-
+    matchcount = [0,]
+    max_number_of_results = [config.MaxQueryWithDataResults if generate_address_data else None,]
+    limited = [False,]
+    
     if limit:
         clientLimit = int(str(limit.childOfType(NResults)))
         if max_number_of_results[0] is None or clientLimit < max_number_of_results[0]:
@@ -118,15 +118,15 @@
         @param addrresource: the L{CalDAVResource} for an address book collection.
         @param uri: the uri for the address book collecton resource.
         """
-
+        
         def checkMaxResults():
             matchcount[0] += 1
             if max_number_of_results[0] is not None and matchcount[0] > max_number_of_results[0]:
                 raise NumberOfMatchesWithinLimits(max_number_of_results[0])
-
-
+           
+        
         @inlineCallbacks
-        def queryAddressBookObjectResource(resource, uri, name, vcard, query_ok=False):
+        def queryAddressBookObjectResource(resource, uri, name, vcard, query_ok = False):
             """
             Run a query on the specified vcard.
             @param resource: the L{CalDAVResource} for the vcard.
@@ -134,7 +134,7 @@
             @param name: the name of the resource.
             @param vcard: the L{Component} vcard read from the resource.
             """
-
+            
             if query_ok or filter.match(vcard):
                 # Check size of results is within limit
                 checkMaxResults()
@@ -143,7 +143,7 @@
                     href = davxml.HRef.fromString(joinURL(uri, name))
                 else:
                     href = davxml.HRef.fromString(uri)
-
+            
                 try:
                     yield report_common.responseForHref(request, responses, href, resource, propertiesForResource, query, vcard=vcard)
                 except ConcurrentModification:
@@ -153,21 +153,21 @@
                     # case, we ignore the now missing resource rather
                     # than raise an error for the entire report.
                     log.err("Missing resource during sync: %s" % (href,))
-
-
+              
+            
         @inlineCallbacks
         def queryDirectoryBackedAddressBook(directoryBackedAddressBook, addressBookFilter):
             """
             """
-            records, limited[0] = (yield directoryBackedAddressBook.directory.vCardRecordsForAddressBookQuery(addressBookFilter, query, max_number_of_results[0]))
+            records, limited[0] = (yield directoryBackedAddressBook.directory.vCardRecordsForAddressBookQuery( addressBookFilter, query, max_number_of_results[0] ))
             for vCardRecord in records:
-
+                
                 # match against original filter
                 if filter.match((yield vCardRecord.vCard())):
-
+ 
                     # Check size of results is within limit
                     checkMaxResults()
-
+                   
                     try:
                         yield report_common.responseForHref(request, responses, vCardRecord.hRef(), vCardRecord, propertiesForResource, query, vcard=(yield vCardRecord.vCard()))
                     except ConcurrentModification:
@@ -177,70 +177,70 @@
                         # case, we ignore the now missing resource rather
                         # than raise an error for the entire report.
                         log.err("Missing resource during sync: %s" % (vCardRecord.hRef(),))
-
-
+ 
+            
         directoryAddressBookLock = None
-        try:
+        try:                
 
             if addrresource.isDirectoryBackedAddressBookCollection() and addrresource.directory.cacheQuery:
-
+                
                 directory = addrresource.directory
                 if directory.liveQuery:
                     # if liveQuery and cacheQuery, get vCards into the directory address book on disk
-                    directoryAddressBookLock, limited[0] = (yield  directory.cacheVCardsForAddressBookQuery(filter, query, max_number_of_results[0]))
-
+                    directoryAddressBookLock, limited[0] = (yield  directory.cacheVCardsForAddressBookQuery( filter, query, max_number_of_results[0] ) )
+ 
                 elif directory.maxDSQueryRecords and directory.maxDSQueryRecords < max_number_of_results[0]:
                     max_number_of_results[0] = directory.maxDSQueryRecords
-
-
+                   
+                
             elif not addrresource.isAddressBookCollection():
-
+ 
                 #do UID lookup on last part of uri
                 resource_name = urllib.unquote(uri[uri.rfind("/") + 1:])
                 if resource_name.endswith(".vcf") and len(resource_name) > 4:
-
+    
                     # see if parent is directory backed address book
-                    parent = (yield  addrresource.locateParent(request, uri))
-
+                    parent = (yield  addrresource.locateParent( request, uri ) )
+    
                     if parent.isDirectoryBackedAddressBookCollection() and parent.directory.cacheQuery:
-
+                        
                         directory = parent.directory
                         if directory.liveQuery:
-                            vCardFilter = carddavxml.Filter(*[carddavxml.PropertyFilter(
-                                                        carddavxml.TextMatch.fromString(resource_name[:-4]),
+                            vCardFilter = carddavxml.Filter( *[carddavxml.PropertyFilter(
+                                                        carddavxml.TextMatch.fromString(resource_name[:-4]), 
                                                         name="UID", # attributes
-                                                        ), ])
+                                                        ),] )
                             vCardFilter = addressbookqueryfilter.Filter(vCardFilter)
+                            
+                            directoryAddressBookLock, limited[0] = (yield  directory.cacheVCardsForAddressBookQuery( vCardFilter, query, max_number_of_results[0] ) )
 
-                            directoryAddressBookLock, limited[0] = (yield  directory.cacheVCardsForAddressBookQuery(vCardFilter, query, max_number_of_results[0]))
-
                         elif directory.maxDSQueryRecords and directory.maxDSQueryRecords < max_number_of_results[0]:
                             max_number_of_results[0] = directory.maxDSQueryRecords
-
-
-
+   
+    
+        
             # Check whether supplied resource is an address book or an address book object resource
             if addrresource.isAddressBookCollection():
-
+    
                 if addrresource.isDirectoryBackedAddressBookCollection() and addrresource.directory.liveQuery and not addrresource.directory.cacheQuery:
-                    yield  maybeDeferred(queryDirectoryBackedAddressBook, addrresource, filter)
-
+                    yield  maybeDeferred( queryDirectoryBackedAddressBook, addrresource, filter )
+                
                 else:
 
                     # Do some optimisation of access control calculation by determining any inherited ACLs outside of
                     # the child resource loop and supply those to the checkPrivileges on each child.
                     filteredaces = (yield addrresource.inheritedACEsforChildren(request))
-
+                
                     # Check for disabled access
                     if filteredaces is not None:
                         # See whether the filter is valid for an index only query
                         index_query_ok = addrresource.index().searchValid(filter)
-
+                    
                         # Get list of children that match the search and have read access
                         names = [name for name, ignore_uid in (yield addrresource.index().search(filter))] #@UnusedVariable
                         if not names:
                             return
-
+                          
                         # Now determine which valid resources are readable and which are not
                         ok_resources = []
                         yield addrresource.findChildrenFaster(
@@ -255,47 +255,47 @@
                         )
                         for child, child_uri in ok_resources:
                             child_uri_name = child_uri[child_uri.rfind("/") + 1:]
-
+                            
                             if generate_address_data or not index_query_ok:
                                 vcard = yield child.vCard()
                                 assert vcard is not None, "vCard %s is missing from address book collection %r" % (child_uri_name, self)
                             else:
                                 vcard = None
-
-                            yield queryAddressBookObjectResource(child, uri, child_uri_name, vcard, query_ok=index_query_ok)
-
+                            
+                            yield queryAddressBookObjectResource(child, uri, child_uri_name, vcard, query_ok = index_query_ok)
+                        
             else:
-
+                
                 handled = False;
                 resource_name = urllib.unquote(uri[uri.rfind("/") + 1:])
                 if resource_name.endswith(".vcf") and len(resource_name) > 4:
-
+                    
                     # see if parent is directory backed address book
-                    parent = (yield  addrresource.locateParent(request, uri))
-
+                    parent = (yield  addrresource.locateParent( request, uri ) )
+    
                     if parent.isDirectoryBackedAddressBookCollection() and parent.directory.liveQuery and not parent.directory.cacheQuery:
-
-                        vCardFilter = carddavxml.Filter(*[carddavxml.PropertyFilter(
-                                                    carddavxml.TextMatch.fromString(resource_name[:-4]),
+ 
+                        vCardFilter = carddavxml.Filter( *[carddavxml.PropertyFilter(
+                                                    carddavxml.TextMatch.fromString(resource_name[:-4]), 
                                                     name="UID", # attributes
-                                                    ), ])
+                                                    ),] )
                         vCardFilter = addressbookqueryfilter.Filter(vCardFilter)
-
-                        yield  maybeDeferred(queryDirectoryBackedAddressBook, parent, vCardFilter)
+                        
+                        yield  maybeDeferred( queryDirectoryBackedAddressBook, parent, vCardFilter )
                         handled = True
 
                 if not handled:
                     vcard = yield addrresource.vCard()
                     yield queryAddressBookObjectResource(addrresource, uri, None, vcard)
-
+        
             if limited[0]:
                 raise NumberOfMatchesWithinLimits(matchcount[0])
 
         finally:
             if directoryAddressBookLock:
                 yield directoryAddressBookLock.release()
-
-
+                
+    
     # Run report taking depth into account
     try:
         depth = request.headers.getHeader("depth", "0")
@@ -309,7 +309,7 @@
                         #davxml.ResponseDescription("Results limited by %s at %d" % resultsWereLimited),
                         davxml.ResponseDescription("Results limited to %d items" % e.maxLimit()),
                     ))
-
+    
     if not hasattr(request, "extendedLogItems"):
         request.extendedLogItems = {}
     request.extendedLogItems["responses"] = len(responses)

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/method/report_multiget_common.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/method/report_multiget_common.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/method/report_multiget_common.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -38,7 +38,7 @@
 from twistedcaldav.config import config
 from twistedcaldav.method import report_common
 from txdav.common.icommondatastore import ConcurrentModification
-from twistedcaldav.method.report_common import COLLECTION_TYPE_CALENDAR, \
+from twistedcaldav.method.report_common import COLLECTION_TYPE_CALENDAR,\
     COLLECTION_TYPE_ADDRESSBOOK
 from twistedcaldav.query import addressbookqueryfilter
 
@@ -53,7 +53,7 @@
     # Make sure target resource is of the right type
     if not self.isCollection():
         parent = (yield self.locateParent(request, request.uri))
-
+        
         if collection_type == COLLECTION_TYPE_CALENDAR:
             if not parent.isPseudoCalendarCollection():
                 log.err("calendar-multiget report is not allowed on a resource outside of a calendar collection %s" % (self,))
@@ -66,12 +66,12 @@
     responses = []
 
     propertyreq = multiget.property
-    resources = multiget.resources
+    resources  = multiget.resources
 
     if not hasattr(request, "extendedLogItems"):
         request.extendedLogItems = {}
     request.extendedLogItems["rcount"] = len(resources)
-
+    
     hasData = False
     if propertyreq.qname() == ("DAV:", "allprop"):
         propertiesForResource = report_common.allPropertiesForResource
@@ -81,7 +81,7 @@
 
     elif propertyreq.qname() == ("DAV:", "prop"):
         propertiesForResource = report_common.propertyListForResource
-
+        
         if collection_type == COLLECTION_TYPE_CALENDAR:
             # Verify that any calendar-data element matches what we can handle
             result, message, hasData = report_common.validPropertyListCalendarDataTypeVersion(propertyreq)
@@ -133,11 +133,11 @@
         # Do some optimisation of access control calculation by determining any inherited ACLs outside of
         # the child resource loop and supply those to the checkPrivileges on each child.
         filteredaces = (yield self.inheritedACEsforChildren(request))
-
+    
         # Check for disabled access
         if filteredaces is None:
             disabled = True
-
+            
         # Check private events access status
         isowner = (yield self.isOwner(request))
 
@@ -147,7 +147,7 @@
         # Do some optimisation of access control calculation by determining any inherited ACLs outside of
         # the child resource loop and supply those to the checkPrivileges on each child.
         filteredaces = (yield self.inheritedACEsforChildren(request))
-
+    
         # Check for disabled access
         if filteredaces is None:
             disabled = True
@@ -167,7 +167,7 @@
 
         @inlineCallbacks
         def doResponse():
-
+            
             # Special for addressbooks
             if collection_type == COLLECTION_TYPE_ADDRESSBOOK:
                 if self.isDirectoryBackedAddressBookCollection() and self.directory.liveQuery:
@@ -185,7 +185,7 @@
                     valid_names.append(name)
             if not valid_names:
                 returnValue(None)
-
+        
             # Now determine which valid resources are readable and which are not
             ok_resources = []
             bad_resources = []
@@ -234,12 +234,12 @@
             # Indicate error for all missing resources
             for href in missing_resources:
                 responses.append(davxml.StatusResponse(davxml.HRef.fromString(href), davxml.Status.fromResponseCode(responsecode.NOT_FOUND)))
-
+    
         @inlineCallbacks
         def doDirectoryAddressBookResponse():
-
+            
             directoryAddressBookLock = None
-            try:
+            try: 
                 # Verify that requested resources are immediate children of the request-URI
                 # and get vCardFilters ;similar to "normal" case below but do not call getChild()
                 vCardFilters = []
@@ -250,22 +250,22 @@
                     if self._isChildURI(request, resource_uri) and resource_name.endswith(".vcf") and len(resource_name) > 4:
                         valid_hrefs.append(href)
                         vCardFilters.append(carddavxml.PropertyFilter(
-                                                carddavxml.TextMatch.fromString(resource_name[:-4]),
+                                                carddavxml.TextMatch.fromString(resource_name[:-4]), 
                                                 name="UID", # attributes
                                             ))
                     elif not self.directory.cacheQuery:
                         responses.append(davxml.StatusResponse(href, davxml.Status.fromResponseCode(responsecode.NOT_FOUND)))
-
+                       
                 # exit if not valid           
                 if not vCardFilters or not valid_hrefs:
-                    returnValue(None)
-
-                addressBookFilter = carddavxml.Filter(*vCardFilters)
+                    returnValue( None )
+                     
+                addressBookFilter = carddavxml.Filter( *vCardFilters )
                 addressBookFilter = addressbookqueryfilter.Filter(addressBookFilter)
                 if self.directory.cacheQuery:
                     # add vcards to directory address book and run "normal case" below
                     limit = config.DirectoryAddressBook.MaxQueryResults
-                    directoryAddressBookLock, limited = (yield  self.directory.cacheVCardsForAddressBookQuery(addressBookFilter, propertyreq, limit))
+                    directoryAddressBookLock, limited = (yield  self.directory.cacheVCardsForAddressBookQuery(addressBookFilter, propertyreq, limit) )
                     if limited:
                         log.err("Too many results in multiget report: %d" % len(resources))
                         raise HTTPError(ErrorResponse(
@@ -276,7 +276,7 @@
                 else:
                     #get vCards and filter
                     limit = config.DirectoryAddressBook.MaxQueryResults
-                    vCardRecords, limited = (yield self.directory.vCardRecordsForAddressBookQuery(addressBookFilter, propertyreq, limit))
+                    vCardRecords, limited = (yield self.directory.vCardRecordsForAddressBookQuery( addressBookFilter, propertyreq, limit ))
                     if limited:
                         log.err("Too many results in multiget report: %d" % len(resources))
                         raise HTTPError(ErrorResponse(
@@ -284,7 +284,7 @@
                             (dav_namespace, "number-of-matches-within-limits"),
                             "Too many results",
                         ))
-
+                   
                     for href in valid_hrefs:
                         matchingRecord = None
                         for vCardRecord in vCardRecords:
@@ -304,30 +304,30 @@
             yield doResponse()
         else:
             for href in resources:
-
+    
                 resource_uri = str(href)
-
+    
                 # Do href checks
                 if requestURIis == "calendar":
                     pass
                 elif requestURIis == "addressbook":
                     pass
-
+        
                 # TODO: we can optimize this one in a similar manner to the calendar case
                 elif requestURIis == "collection":
                     name = unquote(resource_uri[resource_uri.rfind("/") + 1:])
                     if not self._isChildURI(request, resource_uri, False):
                         responses.append(davxml.StatusResponse(href, davxml.Status.fromResponseCode(responsecode.NOT_FOUND)))
                         continue
-
+     
                     child = (yield request.locateResource(resource_uri))
-
+    
                     if not child or not child.exists():
                         responses.append(davxml.StatusResponse(href, davxml.Status.fromResponseCode(responsecode.NOT_FOUND)))
                         continue
-
+    
                     parent = (yield child.locateParent(request, resource_uri))
-
+    
                     if collection_type == COLLECTION_TYPE_CALENDAR:
                         if not parent.isCalendarCollection() or not (yield parent.index().resourceExists(name)):
                             responses.append(davxml.StatusResponse(href, davxml.Status.fromResponseCode(responsecode.FORBIDDEN)))
@@ -336,18 +336,18 @@
                         if not parent.isAddressBookCollection() or not (yield parent.index().resourceExists(name)):
                             responses.append(davxml.StatusResponse(href, davxml.Status.fromResponseCode(responsecode.FORBIDDEN)))
                             continue
-
+                    
                     # Check privileges on parent - must have at least DAV:read
                     try:
                         yield parent.checkPrivileges(request, (davxml.Read(),))
                     except AccessDeniedError:
                         responses.append(davxml.StatusResponse(href, davxml.Status.fromResponseCode(responsecode.FORBIDDEN)))
                         continue
-
+                    
                     # Cache the last parent's inherited aces for checkPrivileges optimization
                     if lastParent != parent:
                         lastParent = parent
-
+                
                         # Do some optimisation of access control calculation by determining any inherited ACLs outside of
                         # the child resource loop and supply those to the checkPrivileges on each child.
                         filteredaces = (yield parent.inheritedACEsforChildren(request))
@@ -359,9 +359,9 @@
                     if (resource_uri != request.uri) or not self.exists():
                         responses.append(davxml.StatusResponse(href, davxml.Status.fromResponseCode(responsecode.NOT_FOUND)))
                         continue
-
+    
                     parent = (yield self.locateParent(request, resource_uri))
-
+    
                     if collection_type == COLLECTION_TYPE_CALENDAR:
                         if not parent.isPseudoCalendarCollection() or not (yield parent.index().resourceExists(name)):
                             responses.append(davxml.StatusResponse(href, davxml.Status.fromResponseCode(responsecode.FORBIDDEN)))
@@ -371,21 +371,21 @@
                             responses.append(davxml.StatusResponse(href, davxml.Status.fromResponseCode(responsecode.FORBIDDEN)))
                             continue
                     child = self
-
+            
                     # Do some optimisation of access control calculation by determining any inherited ACLs outside of
                     # the child resource loop and supply those to the checkPrivileges on each child.
                     filteredaces = (yield parent.inheritedACEsforChildren(request))
 
                     # Check private events access status
                     isowner = (yield parent.isOwner(request))
-
+        
                 # Check privileges - must have at least DAV:read
                 try:
                     yield child.checkPrivileges(request, (davxml.Read(),), inherited_aces=filteredaces)
                 except AccessDeniedError:
                     responses.append(davxml.StatusResponse(href, davxml.Status.fromResponseCode(responsecode.FORBIDDEN)))
                     continue
-
+        
                 yield report_common.responseForHref(request, responses, href, child, propertiesForResource, propertyreq, isowner=isowner)
 
     returnValue(MultiStatusResponse(responses))

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/method/report_sync_collection.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/method/report_sync_collection.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/method/report_sync_collection.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -46,7 +46,7 @@
     """
     Generate a sync-collection REPORT.
     """
-
+    
     # These resource support the report
     if not config.EnableSyncReport or element.Report(element.SyncCollection(),) not in self.supportedReports():
         log.err("sync-collection report is only allowed on calendar/inbox/addressbook/notification collection resources %s" % (self,))
@@ -55,7 +55,7 @@
             element.SupportedReport(),
             "Report not supported on this resource",
         ))
-
+   
     responses = []
 
     # Process Depth and sync-level for backwards compatibility
@@ -66,13 +66,13 @@
     else:
         depth = request.headers.getHeader("depth", None)
         descriptor = "Depth header without DAV:sync-level"
-
+    
     if depth not in ("1", "infinity"):
         log.err("sync-collection report with invalid depth header: %s" % (depth,))
         raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, "Invalid %s value" % (descriptor,)))
-
-    propertyreq = sync_collection.property.children if sync_collection.property else None
-
+        
+    propertyreq = sync_collection.property.children if sync_collection.property else None 
+    
     @inlineCallbacks
     def _namedPropertiesForResource(request, props, resource, forbidden=False):
         """
@@ -87,7 +87,7 @@
             responsecode.FORBIDDEN : [],
             responsecode.NOT_FOUND : [],
         }
-
+        
         for property in props:
             if isinstance(property, element.WebDAVElement):
                 qname = property.qname()
@@ -110,9 +110,9 @@
                         properties_by_status[status].append(propertyName(qname))
                 else:
                     properties_by_status[responsecode.NOT_FOUND].append(propertyName(qname))
-
+        
         returnValue(properties_by_status)
-
+    
     # Do some optimization of access control calculation by determining any inherited ACLs outside of
     # the child resource loop and supply those to the checkPrivileges on each child.
     filteredaces = (yield self.inheritedACEsforChildren(request))
@@ -173,11 +173,11 @@
     for name in removed:
         href = element.HRef.fromString(joinURL(request.uri, name))
         responses.append(element.StatusResponse(element.HRef.fromString(href), element.Status.fromResponseCode(responsecode.NOT_FOUND)))
-
+    
     for name in notallowed:
         href = element.HRef.fromString(joinURL(request.uri, name))
         responses.append(element.StatusResponse(element.HRef.fromString(href), element.Status.fromResponseCode(responsecode.NOT_ALLOWED)))
-
+    
     if not hasattr(request, "extendedLogItems"):
         request.extendedLogItems = {}
     request.extendedLogItems["responses"] = len(responses)

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/scheduling/processing.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/scheduling/processing.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/scheduling/processing.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -500,8 +500,12 @@
             new_calendar = iTipProcessing.processNewRequest(self.message, self.recipient.cuaddr, creating=True)
 
             # Handle auto-reply behavior
-            if self.recipient.principal.canAutoSchedule():
-                send_reply, store_inbox, partstat = (yield self.checkAttendeeAutoReply(new_calendar, self.recipient.principal.getAutoScheduleMode()))
+            organizer = normalizeCUAddr(self.message.getOrganizer())
+            if self.recipient.principal.canAutoSchedule(organizer=organizer):
+                # auto schedule mode can depend on who the organizer is
+                mode = self.recipient.principal.getAutoScheduleMode(organizer=organizer)
+                send_reply, store_inbox, partstat = (yield self.checkAttendeeAutoReply(new_calendar,
+                    mode))
 
                 # Only store inbox item when reply is not sent or always for users
                 store_inbox = store_inbox or self.recipient.principal.getCUType() == "INDIVIDUAL"
@@ -533,8 +537,12 @@
             if new_calendar:
 
                 # Handle auto-reply behavior
-                if self.recipient.principal.canAutoSchedule():
-                    send_reply, store_inbox, partstat = (yield self.checkAttendeeAutoReply(new_calendar, self.recipient.principal.getAutoScheduleMode()))
+                organizer = normalizeCUAddr(self.message.getOrganizer())
+                if self.recipient.principal.canAutoSchedule(organizer=organizer):
+                    # auto schedule mode can depend on who the organizer is
+                    mode = self.recipient.principal.getAutoScheduleMode(organizer=organizer)
+                    send_reply, store_inbox, partstat = (yield self.checkAttendeeAutoReply(new_calendar,
+                        mode))
 
                     # Only store inbox item when reply is not sent or always for users
                     store_inbox = store_inbox or self.recipient.principal.getCUType() == "INDIVIDUAL"

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/stdconfig.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/stdconfig.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/stdconfig.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -285,6 +285,8 @@
 
     "SpawnedDBUser" : "caldav", # The username to use when DBType is empty
 
+    "DBImportFile" : "", # File path to SQL file to import at startup (includes schema)
+
     "DSN"          : "", # Data Source Name.  Used to connect to an external
                            # database if DBType is non-empty.  Format varies
                            # depending on database type.

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/test/test_xmlutil.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/test/test_xmlutil.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/test/test_xmlutil.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -17,7 +17,7 @@
 import twistedcaldav.test.util
 from cStringIO import StringIO
 from twistedcaldav.xmlutil import readXML, writeXML, addSubElement,\
-    changeSubElementText
+    changeSubElementText, createElement, elementToXML, readXMLString
 
 class XMLUtil(twistedcaldav.test.util.TestCase):
     """
@@ -139,3 +139,14 @@
         changeSubElementText(root, "new", "new text")
         self._checkXML(root, XMLUtil.data6)
 
+
+    def test_emoji(self):
+        """
+        Verify we can serialize and parse unicode values above 0xFFFF
+        """
+        name = u"Emoji \U0001F604"
+        elem = createElement("test", text=name)
+        xmlString1 = elementToXML(elem)
+        parsed = readXMLString(xmlString1)[1]
+        xmlString2 = elementToXML(parsed)
+        self.assertEquals(xmlString1, xmlString2)

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/test/util.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/test/util.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/test/util.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -45,7 +45,11 @@
 from txdav.common.datastore.test.util import deriveQuota
 from txdav.common.datastore.file import CommonDataStore
 
+from twext.python.log import Logger
 
+log = Logger()
+
+
 __all__ = [
     "featureUnimplemented",
     "testUnimplemented",
@@ -633,6 +637,7 @@
         self.input = inputData
         self.output = []
         self.error = []
+        self.terminated = False
 
 
     def connectionMade(self):
@@ -655,14 +660,18 @@
         """
         Some output was received on stderr.
         """
+        # Ignore the Postgres "NOTICE" output
+        if "NOTICE" in data:
+            return
+
         self.error.append(data)
+
         # Attempt to exit promptly if a traceback is displayed, so we don't
         # deal with timeouts.
-        lines = ''.join(self.error).split("\n")
-        if len(lines) > 1:
-            errorReportLine = lines[-2].split(": ", 1)
-            if len(errorReportLine) == 2 and ' ' not in errorReportLine[0] and '\t' not in errorReportLine[0]:
-                self.transport.signalProcess("TERM")
+        if "Traceback" in data and not self.terminated:
+            log.error("Terminating process due to output: %s" % (data,))
+            self.terminated = True
+            self.transport.signalProcess("TERM")
 
 
     def processEnded(self, why):

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/upgrade.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/upgrade.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/upgrade.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -756,6 +756,10 @@
 
     docRoot = config.DocumentRoot
 
+    if not os.path.exists(docRoot):
+        log.info("DocumentRoot (%s) doesn't exist; skipping migration" % (docRoot,))
+        return
+
     versionFilePath = os.path.join(docRoot, ".calendarserver_version")
 
     onDiskVersion = 0

Modified: CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/xmlutil.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/xmlutil.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/twistedcaldav/xmlutil.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -56,7 +56,7 @@
     return etree, etree.getroot()
 
 def elementToXML(element):
-    return XML.tostring(element)
+    return XML.tostring(element, "utf-8")
 
 def writeXML(xmlfile, root):
     

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/base/datastore/subpostgres.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/base/datastore/subpostgres.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/base/datastore/subpostgres.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -165,7 +165,8 @@
                  maxConnections=20, options=[],
                  testMode=False,
                  uid=None, gid=None,
-                 spawnedDBUser="caldav"):
+                 spawnedDBUser="caldav",
+                 importFileName=None):
         """
         Initialize a L{PostgresService} pointed at a data store directory.
 
@@ -175,6 +176,11 @@
         @param subServiceFactory: a 1-arg callable that will be called with a
             1-arg callable which returns a DB-API cursor.
         @type subServiceFactory: C{callable}
+
+        @param spawnedDBUser: the postgres role
+        @type spawnedDBUser: C{str}
+        @param importFileName: path to SQL file containing previous data to import
+        @type importFileName: C{str}
         """
 
         # FIXME: By default there is very little (4MB) shared memory available,
@@ -225,6 +231,7 @@
         self.uid = uid
         self.gid = gid
         self.spawnedDBUser = spawnedDBUser
+        self.importFileName = importFileName
         self.schema = schema
         self.monitor = None
         self.openConnections = []
@@ -281,6 +288,8 @@
     def ready(self):
         """
         Subprocess is ready.  Time to initialize the subservice.
+        If the database has not been created and there is a dump file,
+        then the dump file is imported.
         """
         createDatabaseConn = self.produceConnection(
             'schema creation', 'postgres'
@@ -301,20 +310,29 @@
                 "create database %s with encoding 'UTF8'" % (self.databaseName)
             )
         except:
-            execSchema = False
+            # database already exists
+            executeSQL = False
         else:
-            execSchema = True
+            # database does not yet exist; if dump file exists, execute it, otherwise
+            # execute schema
+            executeSQL = True
+            sqlToExecute = self.schema
+            if self.importFileName:
+                importFilePath = CachingFilePath(self.importFileName)
+                if importFilePath.exists():
+                    sqlToExecute = importFilePath.getContent()
 
         createDatabaseCursor.close()
         createDatabaseConn.close()
 
-        if execSchema:
+        if executeSQL:
             connection = self.produceConnection()
             cursor = connection.cursor()
-            cursor.execute(self.schema)
+            cursor.execute(sqlToExecute)
             connection.commit()
             connection.close()
 
+        # TODO: anyone know why these two lines are here?
         connection = self.produceConnection()
         cursor = connection.cursor()
 

Copied: CalendarServer/branches/users/gaya/sharedgroups/txdav/base/datastore/test/importFile.sql (from rev 10076, CalendarServer/trunk/txdav/base/datastore/test/importFile.sql)
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/base/datastore/test/importFile.sql	                        (rev 0)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/base/datastore/test/importFile.sql	2012-11-17 04:03:40 UTC (rev 10079)
@@ -0,0 +1,2 @@
+CREATE TABLE import_test_table (stub varchar);
+INSERT INTO import_test_table values ('value1');

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/base/datastore/test/test_subpostgres.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/base/datastore/test/test_subpostgres.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/base/datastore/test/test_subpostgres.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -132,3 +132,56 @@
         values = cursor.fetchall()
         self.assertEquals(values, [["dummy"]])
 
+    @inlineCallbacks
+    def test_startService_withDumpFile(self):
+        """
+        Assuming a properly configured environment ($PATH points at an 'initdb'
+        and 'postgres', $PYTHONPATH includes pgdb), starting a
+        L{PostgresService} will start the service passed to it, after importing
+        an existing dump file.
+        """
+
+        test = self
+        class SimpleService1(Service):
+
+            instances = []
+            ready = Deferred()
+
+            def __init__(self, connectionFactory):
+                self.connection = connectionFactory()
+                test.addCleanup(self.connection.close)
+                self.instances.append(self)
+
+
+            def startService(self):
+                cursor = self.connection.cursor()
+                try:
+                    cursor.execute(
+                        "insert into import_test_table values ('value2')"
+                    )
+                except:
+                    self.ready.errback()
+                else:
+                    self.ready.callback(None)
+                finally:
+                    cursor.close()
+
+        # The SQL in importFile.sql will get executed, including the insertion of "value1"
+        importFileName = CachingFilePath(__file__).parent().child("importFile.sql").path
+        svc = PostgresService(
+            CachingFilePath("postgres_3.pgdb"),
+            SimpleService1,
+            "",
+            databaseName="dummy_db",
+            testMode=True,
+            importFileName=importFileName
+        )
+        svc.startService()
+        self.addCleanup(svc.stopService)
+        yield SimpleService1.ready
+        connection = SimpleService1.instances[0].connection
+        cursor = connection.cursor()
+        cursor.execute("select * from import_test_table")
+        values = cursor.fetchall()
+        self.assertEquals(values, [["value1"],["value2"]])
+

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/datastore/sql.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/datastore/sql.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -674,39 +674,6 @@
         return super(Calendar, self).unshare(ECALENDARTYPE)
 
 
-    @classproperty
-    def _insertHomeChild(cls): #@NoSelf
-        """
-        DAL statement to create a home child with all default values.
-        """
-        child = cls._homeChildSchema
-        return Insert({child.RESOURCE_ID: schema.RESOURCE_ID_SEQ},
-                      Return=(child.RESOURCE_ID))
-
-
-    @classproperty
-    def _insertHomeChildMetaData(cls): #@NoSelf
-        """
-        DAL statement to create a home child with all default values.
-        """
-        child = cls._homeChildMetaDataSchema
-        return Insert({child.RESOURCE_ID: Parameter("resourceID")},
-                      Return=(child.CREATED, child.MODIFIED))
-
-
-    @classmethod
-    @inlineCallbacks
-    def _createChild(cls, home, name):
-        # Create this object
-        resourceID = (
-            yield cls._insertHomeChild.on(home._txn))[0][0]
-
-        created, modified = (
-            yield cls._insertHomeChildMetaData.on(home._txn,
-                                                  resourceID=resourceID))[0]
-        returnValue((resourceID, created, modified))
-
-
 icalfbtype_to_indexfbtype = {
     "UNKNOWN"         : 0,
     "FREE"            : 1,

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/datastore/test/common.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/datastore/test/common.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/datastore/test/common.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -49,12 +49,12 @@
     ICalendarObject, ICalendarHome,
     ICalendar, IAttachment, ICalendarTransaction)
 
-
 from twistedcaldav.customxml import InviteNotification, InviteSummary
 from txdav.caldav.icalendarstore import IAttachmentStorageTransport
 from txdav.caldav.icalendarstore import QuotaExceeded
-from txdav.common.datastore.test.util import deriveQuota
-from txdav.common.datastore.test.util import withSpecialQuota
+from txdav.common.datastore.test.util import (
+    deriveQuota, withSpecialQuota, transactionClean
+)
 from txdav.common.icommondatastore import ConcurrentModification
 from twistedcaldav.ical import Component
 from twistedcaldav.config import config
@@ -593,23 +593,6 @@
 
 
     @inlineCallbacks
-    def test_calendarHomes(self):
-        """
-        Finding all existing calendar homes.
-        """
-        calendarHomes = (yield self.transactionUnderTest().calendarHomes())
-        self.assertEquals(
-            [home.name() for home in calendarHomes],
-            [
-                "home1",
-                "home_no_splits",
-                "home_splits",
-                "home_splits_shared",
-            ]
-        )
-
-
-    @inlineCallbacks
     def test_displayNameNone(self):
         """
         L{ICalendarHome.calendarWithName} returns C{None} for calendars which
@@ -2272,31 +2255,64 @@
 
 
     @inlineCallbacks
-    def test_eachCalendarHome(self):
+    def test_withEachCalendarHomeDo(self):
         """
-        L{ICalendarTransaction.eachCalendarHome} returns an iterator that
-        yields 2-tuples of (transaction, home).
+        L{ICalendarStore.withEachCalendarHomeDo} executes its C{action}
+        argument repeatedly with all homes that have been created.
         """
-        # create some additional calendar homes
         additionalUIDs = set('alpha-uid home2 home3 beta-uid'.split())
         txn = self.transactionUnderTest()
         for name in additionalUIDs:
-            # maybe it's not actually necessary to yield (i.e. wait) for each
-            # one?  commit() should wait for all of them.
             yield txn.calendarHomeWithUID(name, create=True)
         yield self.commit()
-        foundUIDs = set([])
-        lastTxn = None
-        for txn, home in (yield self.storeUnderTest().eachCalendarHome()):
-            self.addCleanup(txn.commit)
-            foundUIDs.add(home.uid())
-            self.assertNotIdentical(lastTxn, txn)
-            lastTxn = txn
-        requiredUIDs = set([
-            uid for uid in self.requirements
-            if self.requirements[uid] is not None
-        ])
-        additionalUIDs.add("home_bad")
-        additionalUIDs.add("home_attachments")
-        expectedUIDs = additionalUIDs.union(requiredUIDs)
-        self.assertEquals(foundUIDs, expectedUIDs)
+        store = yield self.storeUnderTest()
+        def toEachCalendarHome(txn, eachHome):
+            return eachHome.createCalendarWithName("a-new-calendar")
+        result = yield store.withEachCalendarHomeDo(toEachCalendarHome)
+        self.assertEquals(result, None)
+        txn2 = self.transactionUnderTest()
+        for uid in additionalUIDs:
+            home = yield txn2.calendarHomeWithUID(uid)
+            self.assertNotIdentical(
+                None, (yield home.calendarWithName("a-new-calendar"))
+            )
+
+
+    @transactionClean
+    @inlineCallbacks
+    def test_withEachCalendarHomeDont(self):
+        """
+        When the function passed to L{ICalendarStore.withEachCalendarHomeDo}
+        raises an exception, processing is halted and the transaction is
+        aborted.  The exception is re-raised.
+        """
+        # create some calendar homes.
+        additionalUIDs = set('home2 home3'.split())
+        txn = self.transactionUnderTest()
+        for uid in additionalUIDs:
+            yield txn.calendarHomeWithUID(uid, create=True)
+        yield self.commit()
+        # try to create a calendar in all of them, then fail.
+        class AnException(Exception): pass
+        caught = []
+        @inlineCallbacks
+        def toEachCalendarHome(txn, eachHome):
+            caught.append(eachHome.uid())
+            yield eachHome.createCalendarWithName("wont-be-created")
+            raise AnException()
+        store = self.storeUnderTest()
+        yield self.failUnlessFailure(
+            store.withEachCalendarHomeDo(toEachCalendarHome), AnException
+        )
+        self.assertEquals(len(caught), 1)
+        @inlineCallbacks
+        def noNewCalendar(x):
+            home = yield txn.calendarHomeWithUID(uid, create=False)
+            self.assertIdentical(
+                (yield home.calendarWithName("wont-be-created")), None
+            )
+        txn = self.transactionUnderTest()
+        yield noNewCalendar(caught[0])
+        yield noNewCalendar('home2')
+        yield noNewCalendar('home3')
+

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/datastore/test/test_file.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/datastore/test/test_file.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/datastore/test/test_file.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -502,25 +502,6 @@
 
 
     @inlineCallbacks
-    def test_calendarHomes(self):
-        """
-        Finding all existing calendar homes.
-        """
-        calendarHomes = (yield self.transactionUnderTest().calendarHomes())
-        self.assertEquals(
-            [home.name() for home in calendarHomes],
-            [
-                "home1",
-                "home_attachments",
-                "home_bad",
-                "home_no_splits",
-                "home_splits",
-                "home_splits_shared",
-            ]
-        )
-
-
-    @inlineCallbacks
     def test_calendarObjectsWithDotFile(self):
         """
         Adding a dotfile to the calendar home should not increase the number of

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/datastore/test/test_sql.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/datastore/test/test_sql.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/datastore/test/test_sql.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -518,19 +518,6 @@
         self.assertEqual(int(homeVersion, version))
 
 
-
-    def test_eachCalendarHome(self):
-        """
-        L{ICalendarStore.eachCalendarHome} is currently stubbed out by
-        L{txdav.common.datastore.sql.CommonDataStore}.
-        """
-        return super(CalendarSQLStorageTests, self).test_eachCalendarHome()
-
-
-    test_eachCalendarHome.todo = (
-        "stubbed out, as migration only needs to go from file->sql currently")
-
-
     @inlineCallbacks
     def test_homeProvisioningConcurrency(self):
         """

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/icalendarstore.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/icalendarstore.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/caldav/icalendarstore.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -88,14 +88,6 @@
     Transaction functionality required to be implemented by calendar stores.
     """
 
-    def calendarHomes():
-        """
-        Retrieve each calendar home in the store.
-
-        @return: a L{Deferred} which fires with a list of L{ICalendarHome}.
-        """
-
-
     def calendarHomeWithUID(uid, create=False):
         """
         Retrieve the calendar home for the principal with the given C{uid}.
@@ -114,14 +106,40 @@
     API root for calendar data storage.
     """
 
-    def eachCalendarHome(self):
+    def withEachCalendarHomeDo(action, batchSize=None):
         """
-        Enumerate all calendar homes in this store, with each one in an
-        accompanying transaction.
+        Execute a given action with each calendar home present in this store,
+        in serial, committing after each batch of homes of a given size.
 
-        @return: an iterator of 2-tuples of C{(transaction, calendar home)}
-            where C{transaction} is an L{ITransaction} provider and C{calendar
-            home} is an L{ICalendarHome} provider.
+        @note: This does not execute an action with each directory principal
+            for which there might be a calendar home; it works only on calendar
+            homes which have already been provisioned.  To execute an action on
+            every possible calendar user, you will need to inspect the
+            directory API instead.
+
+        @note: The list of calendar homes is loaded incrementally, so this will
+            not necessarily present a consistent snapshot of the entire
+            database at a particular moment.  (If this behavior is desired,
+            pass a C{batchSize} greater than the number of homes in the
+            database.)
+
+        @param action: a 2-argument callable, taking an L{ICalendarTransaction}
+            and an L{ICalendarHome}, and returning a L{Deferred} that fires
+            with C{None} when complete.  Note that C{action} should not commit
+            or abort the given L{ICalendarTransaction}.  If C{action} completes
+            normally, then it will be called again with the next
+            L{ICalendarHome}.  If it raises an exception or returns a
+            L{Deferred} that fails, processing will stop and the L{Deferred}
+            returned from C{withEachCalendarHomeDo} will fail with that same
+            L{Failure}.
+        @type action: L{callable}
+
+        @param batchSize: The maximum count of calendar homes to include in a
+            single transaction.
+        @type batchSize: L{int}
+
+        @return: a L{Deferred} which fires with L{None} when all homes have
+            completed processing, or fails with the traceback.
         """
 
 

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/datastore/sql.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/datastore/sql.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -403,6 +403,11 @@
         if ownerGroup:
             pass  # convert delete in shared group to remove of member only part 2
         else:
+
+            #the object to be deleted still exists so updateDatabase could have left it around on a
+
+
+
             yield super(AddressBookObject, self).remove()
             self._kind = None
             self._ownerAddressBookResourceID = None

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/datastore/test/common.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/datastore/test/common.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/datastore/test/common.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -234,20 +234,6 @@
 
 
     @inlineCallbacks
-    def test_addressbookHomes(self):
-        """
-        Finding all existing addressbook homes.
-        """
-        addressbookHomes = (yield self.transactionUnderTest().addressbookHomes())
-        self.assertEquals(
-            [home.name() for home in addressbookHomes],
-            [
-                "home1",
-            ]
-        )
-
-
-    @inlineCallbacks
     def test_addressbookHomeWithUID_exists(self):
         """
         Finding an existing addressbook home by UID results in an object that
@@ -967,29 +953,3 @@
                 (yield addressbook2.addressbookObjectWithUID(obj.uid())), None)
 
 
-    @inlineCallbacks
-    def test_eachAddressbookHome(self):
-        """
-        L{IAddressbookTransaction.eachAddressbookHome} returns an iterator that
-        yields 2-tuples of (transaction, home).
-        """
-        # create some additional addressbook homes
-        additionalUIDs = set('alpha-uid home2 home3 beta-uid'.split())
-        txn = self.transactionUnderTest()
-        for name in additionalUIDs:
-            yield txn.addressbookHomeWithUID(name, create=True)
-        yield self.commit()
-        foundUIDs = set([])
-        lastTxn = None
-        for txn, home in (yield self.storeUnderTest().eachAddressbookHome()):
-            self.addCleanup(txn.commit)
-            foundUIDs.add(home.uid())
-            self.assertNotIdentical(lastTxn, txn)
-            lastTxn = txn
-        requiredUIDs = set([
-            uid for uid in self.requirements
-            if self.requirements[uid] is not None
-        ])
-        additionalUIDs.add("home_bad")
-        expectedUIDs = additionalUIDs.union(requiredUIDs)
-        self.assertEquals(foundUIDs, expectedUIDs)

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/datastore/test/test_file.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/datastore/test/test_file.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/datastore/test/test_file.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -472,21 +472,6 @@
 
 
     @inlineCallbacks
-    def test_addressbookHomes(self):
-        """
-        Finding all existing addressbook homes.
-        """
-        addressbookHomes = (yield self.transactionUnderTest().addressbookHomes())
-        self.assertEquals(
-            [home.name() for home in addressbookHomes],
-            [
-                "home1",
-                "home_bad",
-            ]
-        )
-
-
-    @inlineCallbacks
     def test_addressbookObjectsWithDotFile(self):
         """
         Adding a dotfile to the addressbook home should not create a new

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/datastore/test/test_sql.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/datastore/test/test_sql.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/datastore/test/test_sql.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -223,19 +223,6 @@
         self.assertEqual(int(homeVersion, version))
 
 
-
-    def test_eachAddressbookHome(self):
-        """
-        L{IAddressbookStore.eachAddressbookHome} is currently stubbed out by
-        L{txdav.common.datastore.sql.CommonDataStore}.
-        """
-        return super(AddressBookSQLStorageTests, self).test_eachAddressbookHome()
-
-
-    test_eachAddressbookHome.todo = (
-        "stubbed out, as migration only needs to go from file->sql currently")
-
-
     @inlineCallbacks
     def test_putConcurrency(self):
         """

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/iaddressbookstore.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/iaddressbookstore.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/carddav/iaddressbookstore.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -48,14 +48,6 @@
     Transaction interface that addressbook stores must provide.
     """
 
-    def addressbookHomes():
-        """
-        Retrieve each addressbook home in the store.
-
-        @return: a L{Deferred} which fires with a list of L{ICalendarHome}.
-        """
-
-
     def addressbookHomeWithUID(uid, create=False):
         """
         Retrieve the addressbook home for the principal with the given C{uid}.

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/file.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/file.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/file.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -19,6 +19,7 @@
 Common utility functions for a file based datastore.
 """
 
+import sys
 from twext.internet.decorate import memoizedKey
 from twext.python.log import LoggingMixIn
 from txdav.xml.rfc2518 import ResourceType, GETContentType, HRef
@@ -132,6 +133,38 @@
         )
 
 
+    @inlineCallbacks
+    def _withEachHomeDo(self, enumerator, action, batchSize):
+        """
+        Implementation of L{ICalendarStore.withEachCalendarHomeDo} and
+        L{IAddressBookStore.withEachAddressbookHomeDo}.
+        """
+        for txn, home in enumerator():
+            try:
+                yield action(txn, home)
+            except:
+                a, b, c = sys.exc_info()
+                yield txn.abort()
+                raise a, b, c
+            else:
+                yield txn.commit()
+
+
+    def withEachCalendarHomeDo(self, action, batchSize=None):
+        """
+        Implementation of L{ICalendarStore.withEachCalendarHomeDo}.
+        """
+        return self._withEachHomeDo(self._eachCalendarHome, action, batchSize)
+
+
+    def withEachAddressbookHomeDo(self, action, batchSize=None):
+        """
+        Implementation of L{ICalendarStore.withEachCalendarHomeDo}.
+        """
+        return self._withEachHomeDo(self._eachAddressbookHome, action,
+                                    batchSize)
+
+
     def setMigrating(self, state):
         """
         Set the "migrating" state
@@ -149,9 +182,9 @@
 
     def _homesOfType(self, storeType):
         """
-        Common implementation of L{ICalendarStore.eachCalendarHome} and
-        L{IAddressBookStore.eachAddressbookHome}; see those for a description
-        of the return type.
+        Common implementation of L{_eachCalendarHome} and
+        L{_eachAddressbookHome}; see those for a description of the return
+        type.
 
         @param storeType: one of L{EADDRESSBOOKTYPE} or L{ECALENDARTYPE}.
         """
@@ -172,11 +205,11 @@
                         yield (txn, home)
 
 
-    def eachCalendarHome(self):
+    def _eachCalendarHome(self):
         return self._homesOfType(ECALENDARTYPE)
 
 
-    def eachAddressbookHome(self):
+    def _eachAddressbookHome(self):
         return self._homesOfType(EADDRESSBOOKTYPE)
 
 
@@ -228,18 +261,10 @@
         CommonStoreTransaction._homeClass[EADDRESSBOOKTYPE] = AddressBookHome
 
 
-    def calendarHomes(self):
-        return self.homes(ECALENDARTYPE)
-
-
     def calendarHomeWithUID(self, uid, create=False):
         return self.homeWithUID(ECALENDARTYPE, uid, create=create)
 
 
-    def addressbookHomes(self):
-        return self.homes(EADDRESSBOOKTYPE)
-
-
     def addressbookHomeWithUID(self, uid, create=False):
         return self.homeWithUID(EADDRESSBOOKTYPE, uid, create=create)
 

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -25,6 +25,8 @@
     "CommonHome",
 ]
 
+import sys
+
 from uuid import uuid4, UUID
 
 from zope.interface import implements, directlyProvides
@@ -178,20 +180,48 @@
             self.queryCacher = None
 
 
-    def eachCalendarHome(self):
+    @inlineCallbacks
+    def _withEachHomeDo(self, homeTable, homeFromTxn, action, batchSize):
         """
-        @see: L{ICalendarStore.eachCalendarHome}
+        Implementation of L{ICalendarStore.withEachCalendarHomeDo} and
+        L{IAddressbookStore.withEachAddressbookHomeDo}.
         """
-        return []
+        txn = yield self.newTransaction()
+        try:
+            allUIDs = yield (Select([homeTable.OWNER_UID], From=homeTable)
+                             .on(txn))
+            for [uid] in allUIDs:
+                yield action(txn, (yield homeFromTxn(txn, uid)))
+        except:
+            a, b, c = sys.exc_info()
+            yield txn.abort()
+            raise a, b, c
+        else:
+            yield txn.commit()
 
 
-    def eachAddressbookHome(self):
+    def withEachCalendarHomeDo(self, action, batchSize=None):
         """
-        @see: L{IAddressbookStore.eachAddressbookHome}
+        Implementation of L{ICalendarStore.withEachCalendarHomeDo}.
         """
-        return []
+        return self._withEachHomeDo(
+            schema.CALENDAR_HOME,
+            lambda txn, uid: txn.calendarHomeWithUID(uid),
+            action, batchSize
+        )
 
 
+    def withEachAddressbookHomeDo(self, action, batchSize=None):
+        """
+        Implementation of L{IAddressbookStore.withEachAddressbookHomeDo}.
+        """
+        return self._withEachHomeDo(
+            schema.ADDRESSBOOK_HOME,
+            lambda txn, uid: txn.addressbookHomeWithUID(uid),
+            action, batchSize
+        )
+
+
     def newTransaction(self, label="unlabeled", disableCache=False):
         """
         @see: L{IDataStore.newTransaction}
@@ -465,18 +495,10 @@
         raise RuntimeError("Database key %s cannot be determined." % (key,))
 
 
-    def calendarHomes(self):
-        return self.homes(ECALENDARTYPE)
-
-
     def calendarHomeWithUID(self, uid, create=False):
         return self.homeWithUID(ECALENDARTYPE, uid, create=create)
 
 
-    def addressbookHomes(self):
-        return self.homes(EADDRESSBOOKTYPE)
-
-
     def addressbookHomeWithUID(self, uid, create=False):
         return self.homeWithUID(EADDRESSBOOKTYPE, uid, create=create)
 
@@ -2819,8 +2841,44 @@
         returnValue(child)
 
 
+    # TODO: move to Calendar
+    @classproperty
+    def _insertHomeChild(cls): #@NoSelf
+        """
+        DAL statement to create a home child with all default values.
+        """
+        child = cls._homeChildSchema
+        return Insert({child.RESOURCE_ID: schema.RESOURCE_ID_SEQ},
+                      Return=(child.RESOURCE_ID))
+
+
+    # TODO: move to Calendar
+    @classproperty
+    def _insertHomeChildMetaData(cls): #@NoSelf
+        """
+        DAL statement to create a home child with all default values.
+        """
+        child = cls._homeChildMetaDataSchema
+        return Insert({child.RESOURCE_ID: Parameter("resourceID")},
+                      Return=(child.CREATED, child.MODIFIED))
+
+
+    # TODO: Make abstract here and move to Calendar
     @classmethod
     @inlineCallbacks
+    def _createChild(cls, home, name):
+        # Create this object
+        resourceID = (
+            yield cls._insertHomeChild.on(home._txn))[0][0]
+
+        created, modified = (
+            yield cls._insertHomeChildMetaData.on(home._txn,
+                                                  resourceID=resourceID))[0]
+        returnValue((resourceID, created, modified))
+
+
+    @classmethod
+    @inlineCallbacks
     def create(cls, home, name):
         child = (yield cls.objectWithName(home, name))
         if child is not None:
@@ -3085,7 +3143,7 @@
                       Where=obj.PARENT_RESOURCE_ID == Parameter('resourceID'))
 
 
-    # TODO: Make abstract here, and and move to Calendar
+    # TODO: Make abstract here and move to Calendar
     @inlineCallbacks
     def listObjectResources(self):
         if self._objectNames is None:
@@ -3106,7 +3164,7 @@
                       Where=obj.PARENT_RESOURCE_ID == Parameter('resourceID'))
 
 
-    # TODO: Make abstract here, and and move to Calendar
+    # TODO: Make abstract here and move to Calendar
     @inlineCallbacks
     def countObjectResources(self):
         if self._objectNames is None:
@@ -3514,7 +3572,7 @@
         return Select(cls._allColumns, From=obj,
                       Where=obj.PARENT_RESOURCE_ID == Parameter("parentID"))
 
-    # TODO: Make abstract here, and and move to CalendarObject
+    # TODO: Make abstract here and move to CalendarObject
     @classmethod
     @inlineCallbacks
     def _allColumnsWithParent(cls, parent):
@@ -3587,7 +3645,7 @@
                           obj.RESOURCE_NAME.In(Parameter("names", len(names)))))
 
 
-    # TODO: Make abstract here, and and move to CalendarObject
+    # TODO: Make abstract here and move to CalendarObject
     @classmethod
     @inlineCallbacks
     def _allColumnsWithParentAndNames(cls, parent, names):

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_legacy.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_legacy.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_legacy.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -39,7 +39,7 @@
     ReservationError, NoSuchObjectResourceError
 
 from txdav.common.datastore.sql_tables import schema
-from twext.enterprise.dal.syntax import Parameter, Select
+from twext.enterprise.dal.syntax import Parameter, Select 
 from twext.python.clsprop import classproperty
 from twext.python.log import Logger, LoggingMixIn
 
@@ -226,7 +226,7 @@
 
         # For SQL data DB we need to restrict the query to just the targeted calendar resource-id if provided
         if self.calendarid:
-
+            
             test = expression.isExpression("CALENDAR_OBJECT.CALENDAR_RESOURCE_ID", str(self.calendarid), True)
 
             # Since timerange expression already have the calendar resource-id test in them, do not
@@ -236,10 +236,10 @@
             # Top-level timerange expression already has calendar resource-id restriction in it
             if isinstance(self.expression, expression.timerangeExpression):
                 pass
-
+            
             # Top-level OR - check each component
             elif isinstance(self.expression, expression.orExpression):
-
+                
                 def _hasTopLevelTimerange(testexpr):
                     if isinstance(testexpr, expression.timerangeExpression):
                         return True
@@ -247,7 +247,7 @@
                         return any([isinstance(expr, expression.timerangeExpression) for expr in testexpr.expressions])
                     else:
                         return False
-
+                        
                 hasTimerange = any([_hasTopLevelTimerange(expr) for expr in self.expression.expressions])
 
                 if hasTimerange:
@@ -255,16 +255,16 @@
                     pass
                 else:
                     # AND the whole thing with calendarid
-                    self.expression = test.andWith(self.expression)
+                    self.expression = test.andWith(self.expression)    
 
-
+            
             # Top-level AND - only add additional expression if timerange not present
             elif isinstance(self.expression, expression.andExpression):
                 hasTimerange = any([isinstance(expr, expression.timerangeExpression) for expr in self.expression.expressions])
                 if not hasTimerange:
                     # AND the whole thing
-                    self.expression = test.andWith(self.expression)
-
+                    self.expression = test.andWith(self.expression)    
+            
             # Just AND the entire thing
             else:
                 self.expression = test.andWith(self.expression)
@@ -472,10 +472,10 @@
         with a longer expansion.
         """
         obj = yield self.calendar.calendarObjectWithName(name)
-
+        
         # Use a new transaction to do this update quickly without locking the row for too long. However, the original
         # transaction may have the row locked, so use wait=False and if that fails, fall back to using the original txn. 
-
+        
         newTxn = obj.transaction().store().newTransaction()
         try:
             yield obj.lock(wait=False, txn=newTxn)
@@ -494,7 +494,7 @@
             else:
                 # We repeat this check because the resource may have been re-expanded by someone else
                 rmin, rmax = (yield obj.recurrenceMinMax(txn=newTxn))
-
+                
                 # If the resource is not fully expanded, see if within the required range or not.
                 # Note that expand_start could be None if no lower limit is applied, but expand_end will
                 # never be None
@@ -585,7 +585,7 @@
                     if minDate < truncateLowerLimit:
                         raise TimeRangeLowerLimit(truncateLowerLimit)
 
-
+                        
                 if maxDate is not None or minDate is not None:
                     yield self.testAndUpdateIndex(minDate, maxDate)
 
@@ -739,10 +739,10 @@
 
         # For SQL data DB we need to restrict the query to just the targeted calendar resource-id if provided
         if self.calendarid:
-
+            
             # AND the whole thing
             test = expression.isExpression("ADDRESSBOOK_OBJECT.ADDRESSBOOK_RESOURCE_ID", str(self.calendarid), True)
-            self.expression = test.andWith(self.expression)
+            self.expression = test.andWith(self.expression)    
 
         # Generate ' where ...' partial statement
         self.sout.write(self.WHERE)

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_schema/current-oracle-dialect.sql
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_schema/current-oracle-dialect.sql	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_schema/current-oracle-dialect.sql	2012-11-17 04:03:40 UTC (rev 10079)
@@ -1,6 +1,14 @@
 create sequence RESOURCE_ID_SEQ;
 create sequence INSTANCE_ID_SEQ;
 create sequence REVISION_SEQ;
+create table NODE_INFO (
+    "HOSTNAME" nvarchar2(255),
+    "PID" integer not null,
+    "PORT" integer not null,
+    "TIME" timestamp default CURRENT_TIMESTAMP at time zone 'UTC' not null, 
+    primary key("HOSTNAME", "PORT")
+);
+
 create table CALENDAR_HOME (
     "RESOURCE_ID" integer primary key,
     "OWNER_UID" nvarchar2(255) unique,
@@ -249,9 +257,9 @@
     "TOKEN" nvarchar2(255),
     "RESOURCE_KEY" nvarchar2(255),
     "MODIFIED" integer not null,
-    "SUBSCRIBER_GUID" nvarchar2(255), 
+    "SUBSCRIBER_GUID" nvarchar2(255),
     "USER_AGENT" nvarchar2(255) default null,
-    "IP_ADDR" nvarchar2(255) default null,
+    "IP_ADDR" nvarchar2(255) default null, 
     primary key("TOKEN", "RESOURCE_KEY")
 );
 
@@ -260,8 +268,8 @@
     "VALUE" nvarchar2(255)
 );
 
-insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '10');
-insert into CALENDARSERVER (NAME, VALUE) values ('CALENDAR-DATAVERSION', '2');
+insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '12');
+insert into CALENDARSERVER (NAME, VALUE) values ('CALENDAR-DATAVERSION', '3');
 insert into CALENDARSERVER (NAME, VALUE) values ('ADDRESSBOOK-DATAVERSION', '1');
 create index INVITE_INVITE_UID_9b0902ff on INVITE (
     INVITE_UID

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_schema/current.sql
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_schema/current.sql	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_schema/current.sql	2012-11-17 04:03:40 UTC (rev 10079)
@@ -35,7 +35,7 @@
   PORT      integer not null,
   TIME      timestamp not null default timezone('UTC', CURRENT_TIMESTAMP),
 
-  primary key(HOSTNAME, PORT)
+  primary key (HOSTNAME, PORT)
 );
 
 
@@ -178,7 +178,7 @@
   CREATED              timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
   MODIFIED             timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
 
-  unique(CALENDAR_RESOURCE_ID, RESOURCE_NAME) -- implicit index
+  unique (CALENDAR_RESOURCE_ID, RESOURCE_NAME) -- implicit index
 
   -- since the 'inbox' is a 'calendar resource' for the purpose of storing
   -- calendar objects, this constraint has to be selectively enforced by the
@@ -293,7 +293,7 @@
   MODIFIED                    timestamp default timezone('UTC', CURRENT_TIMESTAMP),
   PATH                        varchar(1024) not null,
 
-  primary key(DROPBOX_ID, PATH) --implicit index
+  primary key (DROPBOX_ID, PATH) --implicit index
 );
 
 create index ATTACHMENT_CALENDAR_HOME_RESOURCE_ID on
@@ -309,7 +309,7 @@
   VALUE       text         not null, -- FIXME: xml?
   VIEWER_UID  varchar(255),
 
-  primary key(RESOURCE_ID, NAME, VIEWER_UID) -- implicit index
+  primary key (RESOURCE_ID, NAME, VIEWER_UID) -- implicit index
 );
 
 
@@ -350,8 +350,8 @@
   MODIFIED                timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
   KIND 			  		  integer      not null, -- enum OBJECT_KIND 
   
-  unique(ADDRESSBOOK_RESOURCE_ID, RESOURCE_NAME), -- implicit index
-  unique(ADDRESSBOOK_RESOURCE_ID, VCARD_UID)      -- implicit index
+  unique (ADDRESSBOOK_RESOURCE_ID, RESOURCE_NAME), -- implicit index
+  unique (ADDRESSBOOK_RESOURCE_ID, VCARD_UID)      -- implicit index
 );
 
 -----------------------------
@@ -376,7 +376,7 @@
     GROUP_ID              integer      not null references ADDRESSBOOK_OBJECT on delete cascade,	-- AddressBook Object's (kind=='group') RESOURCE_ID
  	ADDRESSBOOK_ID		  integer      not null references ADDRESSBOOK_OBJECT on delete cascade,	-- only used on insert and whole address book delete
     MEMBER_ID             integer      not null references ADDRESSBOOK_OBJECT,						-- member AddressBook Object's RESOURCE_ID
-    primary key(GROUP_ID, MEMBER_ID) -- implicit index
+    primary key (GROUP_ID, MEMBER_ID) -- implicit index
 );
 
 ------------------------------------------
@@ -387,7 +387,7 @@
     GROUP_ID              integer      not null references ADDRESSBOOK_OBJECT on delete cascade,	-- AddressBook Object's (kind=='group') RESOURCE_ID
  	ADDRESSBOOK_ID		  integer      not null references ADDRESSBOOK_OBJECT on delete cascade,	-- only used on insert and whole address book delete
     MEMBER_ADDRESS  	  varchar(255) not null, 													-- member AddressBook Object's 'calendar' address
-    primary key(GROUP_ID, MEMBER_ADDRESS) -- implicit index
+    primary key (GROUP_ID, MEMBER_ADDRESS) -- implicit index
 );
 
 ----------------------
@@ -404,8 +404,8 @@
   BIND_STATUS                  		integer      not null, 	-- enum CALENDAR_BIND_STATUS
   MESSAGE                      		text,        			-- FIXME: xml?
 
-  primary key(ADDRESSBOOK_HOME_RESOURCE_ID, ADDRESSBOOK_RESOURCE_ID), -- implicit index
-  unique(ADDRESSBOOK_HOME_RESOURCE_ID, ADDRESSBOOK_RESOURCE_NAME)     -- implicit index
+  primary key (ADDRESSBOOK_HOME_RESOURCE_ID, ADDRESSBOOK_RESOURCE_ID), -- implicit index
+  unique (ADDRESSBOOK_HOME_RESOURCE_ID, ADDRESSBOOK_RESOURCE_NAME)     -- implicit index
 );
 
 create index ADDRESSBOOK_BIND_RESOURCE_ID on
@@ -485,7 +485,7 @@
   USER_AGENT                    varchar(255) default null,
   IP_ADDR                       varchar(255) default null,
 
-  primary key(TOKEN, RESOURCE_KEY) -- implicit index
+  primary key (TOKEN, RESOURCE_KEY) -- implicit index
 );
 
 create index APN_SUBSCRIPTIONS_RESOURCE_KEY

Copied: CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_schema/old/oracle-dialect/v11.sql (from rev 10076, CalendarServer/trunk/txdav/common/datastore/sql_schema/old/oracle-dialect/v11.sql)
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_schema/old/oracle-dialect/v11.sql	                        (rev 0)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_schema/old/oracle-dialect/v11.sql	2012-11-17 04:03:40 UTC (rev 10079)
@@ -0,0 +1,362 @@
+create sequence RESOURCE_ID_SEQ;
+create sequence INSTANCE_ID_SEQ;
+create sequence REVISION_SEQ;
+create table CALENDAR_HOME (
+    "RESOURCE_ID" integer primary key,
+    "OWNER_UID" nvarchar2(255) unique,
+    "DATAVERSION" integer default 0 not null
+);
+
+create table CALENDAR_HOME_METADATA (
+    "RESOURCE_ID" integer primary key references CALENDAR_HOME on delete cascade,
+    "QUOTA_USED_BYTES" integer default 0 not null,
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table CALENDAR (
+    "RESOURCE_ID" integer primary key
+);
+
+create table CALENDAR_METADATA (
+    "RESOURCE_ID" integer primary key references CALENDAR on delete cascade,
+    "SUPPORTED_COMPONENTS" nvarchar2(255) default null,
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table INVITE (
+    "INVITE_UID" nvarchar2(255),
+    "NAME" nvarchar2(255),
+    "RECIPIENT_ADDRESS" nvarchar2(255),
+    "HOME_RESOURCE_ID" integer not null,
+    "RESOURCE_ID" integer not null
+);
+
+create table NOTIFICATION_HOME (
+    "RESOURCE_ID" integer primary key,
+    "OWNER_UID" nvarchar2(255) unique
+);
+
+create table NOTIFICATION (
+    "RESOURCE_ID" integer primary key,
+    "NOTIFICATION_HOME_RESOURCE_ID" integer not null references NOTIFICATION_HOME,
+    "NOTIFICATION_UID" nvarchar2(255),
+    "XML_TYPE" nvarchar2(255),
+    "XML_DATA" nclob,
+    "MD5" nchar(32),
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    unique("NOTIFICATION_UID", "NOTIFICATION_HOME_RESOURCE_ID")
+);
+
+create table CALENDAR_BIND (
+    "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+    "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+    "CALENDAR_RESOURCE_NAME" nvarchar2(255),
+    "BIND_MODE" integer not null,
+    "BIND_STATUS" integer not null,
+    "SEEN_BY_OWNER" integer not null,
+    "SEEN_BY_SHAREE" integer not null,
+    "MESSAGE" nclob, 
+    primary key("CALENDAR_HOME_RESOURCE_ID", "CALENDAR_RESOURCE_ID"), 
+    unique("CALENDAR_HOME_RESOURCE_ID", "CALENDAR_RESOURCE_NAME")
+);
+
+create table CALENDAR_BIND_MODE (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('own', 0);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('read', 1);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('write', 2);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('direct', 3);
+create table CALENDAR_BIND_STATUS (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('invited', 0);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('accepted', 1);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('declined', 2);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('invalid', 3);
+create table CALENDAR_OBJECT (
+    "RESOURCE_ID" integer primary key,
+    "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+    "RESOURCE_NAME" nvarchar2(255),
+    "ICALENDAR_TEXT" nclob,
+    "ICALENDAR_UID" nvarchar2(255),
+    "ICALENDAR_TYPE" nvarchar2(255),
+    "ATTACHMENTS_MODE" integer default 0 not null,
+    "DROPBOX_ID" nvarchar2(255),
+    "ORGANIZER" nvarchar2(255),
+    "ORGANIZER_OBJECT" integer references CALENDAR_OBJECT,
+    "RECURRANCE_MIN" date,
+    "RECURRANCE_MAX" date,
+    "ACCESS" integer default 0 not null,
+    "SCHEDULE_OBJECT" integer default 0,
+    "SCHEDULE_TAG" nvarchar2(36) default null,
+    "SCHEDULE_ETAGS" nclob default null,
+    "PRIVATE_COMMENTS" integer default 0 not null,
+    "MD5" nchar(32),
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    unique("CALENDAR_RESOURCE_ID", "RESOURCE_NAME")
+);
+
+create table CALENDAR_OBJECT_ATTACHMENTS_MO (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_OBJECT_ATTACHMENTS_MO (DESCRIPTION, ID) values ('none', 0);
+insert into CALENDAR_OBJECT_ATTACHMENTS_MO (DESCRIPTION, ID) values ('read', 1);
+insert into CALENDAR_OBJECT_ATTACHMENTS_MO (DESCRIPTION, ID) values ('write', 2);
+create table CALENDAR_ACCESS_TYPE (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(32) unique
+);
+
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('', 0);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('public', 1);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('private', 2);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('confidential', 3);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('restricted', 4);
+create table TIME_RANGE (
+    "INSTANCE_ID" integer primary key,
+    "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+    "CALENDAR_OBJECT_RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+    "FLOATING" integer not null,
+    "START_DATE" timestamp not null,
+    "END_DATE" timestamp not null,
+    "FBTYPE" integer not null,
+    "TRANSPARENT" integer not null
+);
+
+create table FREE_BUSY_TYPE (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('unknown', 0);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('free', 1);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy', 2);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy-unavailable', 3);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy-tentative', 4);
+create table TRANSPARENCY (
+    "TIME_RANGE_INSTANCE_ID" integer not null references TIME_RANGE on delete cascade,
+    "USER_ID" nvarchar2(255),
+    "TRANSPARENT" integer not null
+);
+
+create table ATTACHMENT (
+    "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+    "DROPBOX_ID" nvarchar2(255),
+    "CONTENT_TYPE" nvarchar2(255),
+    "SIZE" integer not null,
+    "MD5" nchar(32),
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "PATH" nvarchar2(1024), 
+    primary key("DROPBOX_ID", "PATH")
+);
+
+create table RESOURCE_PROPERTY (
+    "RESOURCE_ID" integer not null,
+    "NAME" nvarchar2(255),
+    "VALUE" nclob,
+    "VIEWER_UID" nvarchar2(255), 
+    primary key("RESOURCE_ID", "NAME", "VIEWER_UID")
+);
+
+create table ADDRESSBOOK_HOME (
+    "RESOURCE_ID" integer primary key,
+    "OWNER_UID" nvarchar2(255) unique,
+    "DATAVERSION" integer default 0 not null
+);
+
+create table ADDRESSBOOK_HOME_METADATA (
+    "RESOURCE_ID" integer primary key references ADDRESSBOOK_HOME on delete cascade,
+    "QUOTA_USED_BYTES" integer default 0 not null,
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table ADDRESSBOOK (
+    "RESOURCE_ID" integer primary key
+);
+
+create table ADDRESSBOOK_METADATA (
+    "RESOURCE_ID" integer primary key references ADDRESSBOOK on delete cascade,
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table ADDRESSBOOK_BIND (
+    "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+    "ADDRESSBOOK_RESOURCE_ID" integer not null references ADDRESSBOOK on delete cascade,
+    "ADDRESSBOOK_RESOURCE_NAME" nvarchar2(255),
+    "BIND_MODE" integer not null,
+    "BIND_STATUS" integer not null,
+    "SEEN_BY_OWNER" integer not null,
+    "SEEN_BY_SHAREE" integer not null,
+    "MESSAGE" nclob, 
+    primary key("ADDRESSBOOK_HOME_RESOURCE_ID", "ADDRESSBOOK_RESOURCE_ID"), 
+    unique("ADDRESSBOOK_HOME_RESOURCE_ID", "ADDRESSBOOK_RESOURCE_NAME")
+);
+
+create table ADDRESSBOOK_OBJECT (
+    "RESOURCE_ID" integer primary key,
+    "ADDRESSBOOK_RESOURCE_ID" integer not null references ADDRESSBOOK on delete cascade,
+    "RESOURCE_NAME" nvarchar2(255),
+    "VCARD_TEXT" nclob,
+    "VCARD_UID" nvarchar2(255),
+    "MD5" nchar(32),
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    unique("ADDRESSBOOK_RESOURCE_ID", "RESOURCE_NAME"), 
+    unique("ADDRESSBOOK_RESOURCE_ID", "VCARD_UID")
+);
+
+create table CALENDAR_OBJECT_REVISIONS (
+    "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+    "CALENDAR_RESOURCE_ID" integer references CALENDAR,
+    "CALENDAR_NAME" nvarchar2(255) default null,
+    "RESOURCE_NAME" nvarchar2(255),
+    "REVISION" integer not null,
+    "DELETED" integer not null
+);
+
+create table ADDRESSBOOK_OBJECT_REVISIONS (
+    "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+    "ADDRESSBOOK_RESOURCE_ID" integer references ADDRESSBOOK,
+    "ADDRESSBOOK_NAME" nvarchar2(255) default null,
+    "RESOURCE_NAME" nvarchar2(255),
+    "REVISION" integer not null,
+    "DELETED" integer not null
+);
+
+create table NOTIFICATION_OBJECT_REVISIONS (
+    "NOTIFICATION_HOME_RESOURCE_ID" integer not null references NOTIFICATION_HOME on delete cascade,
+    "RESOURCE_NAME" nvarchar2(255),
+    "REVISION" integer not null,
+    "DELETED" integer not null, 
+    unique("NOTIFICATION_HOME_RESOURCE_ID", "RESOURCE_NAME")
+);
+
+create table APN_SUBSCRIPTIONS (
+    "TOKEN" nvarchar2(255),
+    "RESOURCE_KEY" nvarchar2(255),
+    "MODIFIED" integer not null,
+    "SUBSCRIBER_GUID" nvarchar2(255),
+    "USER_AGENT" nvarchar2(255) default null,
+    "IP_ADDR" nvarchar2(255) default null, 
+    primary key("TOKEN", "RESOURCE_KEY")
+);
+
+create table CALENDARSERVER (
+    "NAME" nvarchar2(255) primary key,
+    "VALUE" nvarchar2(255)
+);
+
+insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '11');
+insert into CALENDARSERVER (NAME, VALUE) values ('CALENDAR-DATAVERSION', '3');
+insert into CALENDARSERVER (NAME, VALUE) values ('ADDRESSBOOK-DATAVERSION', '1');
+create index INVITE_INVITE_UID_9b0902ff on INVITE (
+    INVITE_UID
+);
+
+create index INVITE_RESOURCE_ID_b36ddc23 on INVITE (
+    RESOURCE_ID
+);
+
+create index INVITE_HOME_RESOURCE__e9bdf77e on INVITE (
+    HOME_RESOURCE_ID
+);
+
+create index NOTIFICATION_NOTIFICA_f891f5f9 on NOTIFICATION (
+    NOTIFICATION_HOME_RESOURCE_ID
+);
+
+create index CALENDAR_BIND_RESOURC_e57964d4 on CALENDAR_BIND (
+    CALENDAR_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_CALEN_a9a453a9 on CALENDAR_OBJECT (
+    CALENDAR_RESOURCE_ID,
+    ICALENDAR_UID
+);
+
+create index CALENDAR_OBJECT_CALEN_96e83b73 on CALENDAR_OBJECT (
+    CALENDAR_RESOURCE_ID,
+    RECURRANCE_MAX
+);
+
+create index CALENDAR_OBJECT_ORGAN_7ce24750 on CALENDAR_OBJECT (
+    ORGANIZER_OBJECT
+);
+
+create index CALENDAR_OBJECT_DROPB_de041d80 on CALENDAR_OBJECT (
+    DROPBOX_ID
+);
+
+create index TIME_RANGE_CALENDAR_R_beb6e7eb on TIME_RANGE (
+    CALENDAR_RESOURCE_ID
+);
+
+create index TIME_RANGE_CALENDAR_O_acf37bd1 on TIME_RANGE (
+    CALENDAR_OBJECT_RESOURCE_ID
+);
+
+create index TRANSPARENCY_TIME_RAN_5f34467f on TRANSPARENCY (
+    TIME_RANGE_INSTANCE_ID
+);
+
+create index ATTACHMENT_CALENDAR_H_0078845c on ATTACHMENT (
+    CALENDAR_HOME_RESOURCE_ID
+);
+
+create index ADDRESSBOOK_BIND_RESO_205aa75c on ADDRESSBOOK_BIND (
+    ADDRESSBOOK_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_REVIS_3a3956c4 on CALENDAR_OBJECT_REVISIONS (
+    CALENDAR_HOME_RESOURCE_ID,
+    CALENDAR_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_REVIS_2643d556 on CALENDAR_OBJECT_REVISIONS (
+    CALENDAR_RESOURCE_ID,
+    RESOURCE_NAME
+);
+
+create index CALENDAR_OBJECT_REVIS_265c8acf on CALENDAR_OBJECT_REVISIONS (
+    CALENDAR_RESOURCE_ID,
+    REVISION
+);
+
+create index ADDRESSBOOK_OBJECT_RE_f460d62d on ADDRESSBOOK_OBJECT_REVISIONS (
+    ADDRESSBOOK_HOME_RESOURCE_ID,
+    ADDRESSBOOK_RESOURCE_ID
+);
+
+create index ADDRESSBOOK_OBJECT_RE_9a848f39 on ADDRESSBOOK_OBJECT_REVISIONS (
+    ADDRESSBOOK_RESOURCE_ID,
+    RESOURCE_NAME
+);
+
+create index ADDRESSBOOK_OBJECT_RE_cb101e6b on ADDRESSBOOK_OBJECT_REVISIONS (
+    ADDRESSBOOK_RESOURCE_ID,
+    REVISION
+);
+
+create index NOTIFICATION_OBJECT_R_036a9cee on NOTIFICATION_OBJECT_REVISIONS (
+    NOTIFICATION_HOME_RESOURCE_ID,
+    REVISION
+);
+
+create index APN_SUBSCRIPTIONS_RES_9610d78e on APN_SUBSCRIPTIONS (
+    RESOURCE_KEY
+);
+

Copied: CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_schema/old/postgres-dialect/v11.sql (from rev 10076, CalendarServer/trunk/txdav/common/datastore/sql_schema/old/postgres-dialect/v11.sql)
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_schema/old/postgres-dialect/v11.sql	                        (rev 0)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/sql_schema/old/postgres-dialect/v11.sql	2012-11-17 04:03:40 UTC (rev 10079)
@@ -0,0 +1,502 @@
+-- -*- test-case-name: txdav.caldav.datastore.test.test_sql,txdav.carddav.datastore.test.test_sql -*-
+
+----
+-- Copyright (c) 2010-2012 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+-----------------
+-- Resource ID --
+-----------------
+
+create sequence RESOURCE_ID_SEQ;
+
+
+-------------------
+-- Calendar Home --
+-------------------
+
+create table CALENDAR_HOME (
+  RESOURCE_ID      integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  OWNER_UID        varchar(255) not null unique,                                 -- implicit index
+  DATAVERSION	   integer      default 0 not null
+);
+
+----------------------------
+-- Calendar Home Metadata --
+----------------------------
+
+create table CALENDAR_HOME_METADATA (
+  RESOURCE_ID      integer      primary key references CALENDAR_HOME on delete cascade, -- implicit index
+  QUOTA_USED_BYTES integer      default 0 not null,
+  CREATED          timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED         timestamp    default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+--------------
+-- Calendar --
+--------------
+
+create table CALENDAR (
+  RESOURCE_ID integer   primary key default nextval('RESOURCE_ID_SEQ') -- implicit index
+);
+
+
+-----------------------
+-- Calendar Metadata --
+-----------------------
+
+create table CALENDAR_METADATA (
+  RESOURCE_ID           integer   primary key references CALENDAR on delete cascade, -- implicit index
+  SUPPORTED_COMPONENTS  varchar(255) default null,
+  CREATED               timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED              timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+------------------------
+-- Sharing Invitation --
+------------------------
+
+create table INVITE (
+    INVITE_UID         varchar(255) not null,
+    NAME               varchar(255) not null,
+    RECIPIENT_ADDRESS  varchar(255) not null,
+    HOME_RESOURCE_ID   integer      not null,
+    RESOURCE_ID        integer      not null
+
+    -- Need primary key on (INVITE_UID, NAME, RECIPIENT_ADDRESS)?
+);
+
+create index INVITE_INVITE_UID on INVITE(INVITE_UID);
+create index INVITE_RESOURCE_ID on INVITE(RESOURCE_ID);
+create index INVITE_HOME_RESOURCE_ID on INVITE(HOME_RESOURCE_ID);
+
+---------------------------
+-- Sharing Notifications --
+---------------------------
+
+create table NOTIFICATION_HOME (
+  RESOURCE_ID integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  OWNER_UID   varchar(255) not null unique                                 -- implicit index
+);
+
+create table NOTIFICATION (
+  RESOURCE_ID                   integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  NOTIFICATION_HOME_RESOURCE_ID integer      not null references NOTIFICATION_HOME,
+  NOTIFICATION_UID              varchar(255) not null,
+  XML_TYPE                      varchar(255) not null,
+  XML_DATA                      text         not null,
+  MD5                           char(32)     not null,
+  CREATED                       timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                      timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+
+  unique(NOTIFICATION_UID, NOTIFICATION_HOME_RESOURCE_ID) -- implicit index
+);
+
+create index NOTIFICATION_NOTIFICATION_HOME_RESOURCE_ID on
+  NOTIFICATION(NOTIFICATION_HOME_RESOURCE_ID);
+
+-------------------
+-- Calendar Bind --
+-------------------
+
+-- Joins CALENDAR_HOME and CALENDAR
+
+create table CALENDAR_BIND (
+  CALENDAR_HOME_RESOURCE_ID integer      not null references CALENDAR_HOME,
+  CALENDAR_RESOURCE_ID      integer      not null references CALENDAR on delete cascade,
+
+  -- An invitation which hasn't been accepted yet will not yet have a resource
+  -- name, so this field may be null.
+
+  CALENDAR_RESOURCE_NAME    varchar(255),
+  BIND_MODE                 integer      not null, -- enum CALENDAR_BIND_MODE
+  BIND_STATUS               integer      not null, -- enum CALENDAR_BIND_STATUS
+  SEEN_BY_OWNER             boolean      not null,
+  SEEN_BY_SHAREE            boolean      not null,
+  MESSAGE                   text,
+
+  primary key(CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_ID), -- implicit index
+  unique(CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_NAME)     -- implicit index
+);
+
+create index CALENDAR_BIND_RESOURCE_ID on CALENDAR_BIND(CALENDAR_RESOURCE_ID);
+
+-- Enumeration of calendar bind modes
+
+create table CALENDAR_BIND_MODE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_BIND_MODE values (0, 'own'  );
+insert into CALENDAR_BIND_MODE values (1, 'read' );
+insert into CALENDAR_BIND_MODE values (2, 'write');
+insert into CALENDAR_BIND_MODE values (3, 'direct');
+
+-- Enumeration of statuses
+
+create table CALENDAR_BIND_STATUS (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_BIND_STATUS values (0, 'invited' );
+insert into CALENDAR_BIND_STATUS values (1, 'accepted');
+insert into CALENDAR_BIND_STATUS values (2, 'declined');
+insert into CALENDAR_BIND_STATUS values (3, 'invalid');
+
+
+---------------------
+-- Calendar Object --
+---------------------
+
+create table CALENDAR_OBJECT (
+  RESOURCE_ID          integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  CALENDAR_RESOURCE_ID integer      not null references CALENDAR on delete cascade,
+  RESOURCE_NAME        varchar(255) not null,
+  ICALENDAR_TEXT       text         not null,
+  ICALENDAR_UID        varchar(255) not null,
+  ICALENDAR_TYPE       varchar(255) not null,
+  ATTACHMENTS_MODE     integer      default 0 not null, -- enum CALENDAR_OBJECT_ATTACHMENTS_MODE
+  DROPBOX_ID           varchar(255),
+  ORGANIZER            varchar(255),
+  ORGANIZER_OBJECT     integer      references CALENDAR_OBJECT,
+  RECURRANCE_MIN       date,        -- minimum date that recurrences have been expanded to.
+  RECURRANCE_MAX       date,        -- maximum date that recurrences have been expanded to.
+  ACCESS               integer      default 0 not null,
+  SCHEDULE_OBJECT      boolean      default false,
+  SCHEDULE_TAG         varchar(36)  default null,
+  SCHEDULE_ETAGS       text         default null,
+  PRIVATE_COMMENTS     boolean      default false not null,
+  MD5                  char(32)     not null,
+  CREATED              timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED             timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+
+  unique(CALENDAR_RESOURCE_ID, RESOURCE_NAME) -- implicit index
+
+  -- since the 'inbox' is a 'calendar resource' for the purpose of storing
+  -- calendar objects, this constraint has to be selectively enforced by the
+  -- application layer.
+
+  -- unique(CALENDAR_RESOURCE_ID, ICALENDAR_UID)
+);
+
+create index CALENDAR_OBJECT_CALENDAR_RESOURCE_ID_AND_ICALENDAR_UID on
+  CALENDAR_OBJECT(CALENDAR_RESOURCE_ID, ICALENDAR_UID);
+
+create index CALENDAR_OBJECT_CALENDAR_RESOURCE_ID_RECURRANCE_MAX on
+  CALENDAR_OBJECT(CALENDAR_RESOURCE_ID, RECURRANCE_MAX);
+
+create index CALENDAR_OBJECT_ORGANIZER_OBJECT on
+  CALENDAR_OBJECT(ORGANIZER_OBJECT);
+
+create index CALENDAR_OBJECT_DROPBOX_ID on
+  CALENDAR_OBJECT(DROPBOX_ID);
+
+-- Enumeration of attachment modes
+
+create table CALENDAR_OBJECT_ATTACHMENTS_MODE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_OBJECT_ATTACHMENTS_MODE values (0, 'none' );
+insert into CALENDAR_OBJECT_ATTACHMENTS_MODE values (1, 'read' );
+insert into CALENDAR_OBJECT_ATTACHMENTS_MODE values (2, 'write');
+
+
+-- Enumeration of calendar access types
+
+create table CALENDAR_ACCESS_TYPE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(32) not null unique
+);
+
+insert into CALENDAR_ACCESS_TYPE values (0, ''             );
+insert into CALENDAR_ACCESS_TYPE values (1, 'public'       );
+insert into CALENDAR_ACCESS_TYPE values (2, 'private'      );
+insert into CALENDAR_ACCESS_TYPE values (3, 'confidential' );
+insert into CALENDAR_ACCESS_TYPE values (4, 'restricted'   );
+
+-----------------
+-- Instance ID --
+-----------------
+
+create sequence INSTANCE_ID_SEQ;
+
+
+----------------
+-- Time Range --
+----------------
+
+create table TIME_RANGE (
+  INSTANCE_ID                 integer        primary key default nextval('INSTANCE_ID_SEQ'), -- implicit index
+  CALENDAR_RESOURCE_ID        integer        not null references CALENDAR on delete cascade,
+  CALENDAR_OBJECT_RESOURCE_ID integer        not null references CALENDAR_OBJECT on delete cascade,
+  FLOATING                    boolean        not null,
+  START_DATE                  timestamp      not null,
+  END_DATE                    timestamp      not null,
+  FBTYPE                      integer        not null,
+  TRANSPARENT                 boolean        not null
+);
+
+create index TIME_RANGE_CALENDAR_RESOURCE_ID on
+  TIME_RANGE(CALENDAR_RESOURCE_ID);
+create index TIME_RANGE_CALENDAR_OBJECT_RESOURCE_ID on
+  TIME_RANGE(CALENDAR_OBJECT_RESOURCE_ID);
+
+
+-- Enumeration of free/busy types
+
+create table FREE_BUSY_TYPE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into FREE_BUSY_TYPE values (0, 'unknown'         );
+insert into FREE_BUSY_TYPE values (1, 'free'            );
+insert into FREE_BUSY_TYPE values (2, 'busy'            );
+insert into FREE_BUSY_TYPE values (3, 'busy-unavailable');
+insert into FREE_BUSY_TYPE values (4, 'busy-tentative'  );
+
+
+------------------
+-- Transparency --
+------------------
+
+create table TRANSPARENCY (
+  TIME_RANGE_INSTANCE_ID      integer      not null references TIME_RANGE on delete cascade,
+  USER_ID                     varchar(255) not null,
+  TRANSPARENT                 boolean      not null
+);
+
+create index TRANSPARENCY_TIME_RANGE_INSTANCE_ID on
+  TRANSPARENCY(TIME_RANGE_INSTANCE_ID);
+
+----------------
+-- Attachment --
+----------------
+
+create table ATTACHMENT (
+  CALENDAR_HOME_RESOURCE_ID   integer       not null references CALENDAR_HOME,
+  DROPBOX_ID                  varchar(255)  not null,
+  CONTENT_TYPE                varchar(255)  not null,
+  SIZE                        integer       not null,
+  MD5                         char(32)      not null,
+  CREATED                     timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                    timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+  PATH                        varchar(1024) not null,
+
+  primary key(DROPBOX_ID, PATH) --implicit index
+);
+
+create index ATTACHMENT_CALENDAR_HOME_RESOURCE_ID on
+  ATTACHMENT(CALENDAR_HOME_RESOURCE_ID);
+
+-----------------------
+-- Resource Property --
+-----------------------
+
+create table RESOURCE_PROPERTY (
+  RESOURCE_ID integer      not null, -- foreign key: *.RESOURCE_ID
+  NAME        varchar(255) not null,
+  VALUE       text         not null, -- FIXME: xml?
+  VIEWER_UID  varchar(255),
+
+  primary key(RESOURCE_ID, NAME, VIEWER_UID) -- implicit index
+);
+
+
+----------------------
+-- AddressBook Home --
+----------------------
+
+create table ADDRESSBOOK_HOME (
+  RESOURCE_ID      integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  OWNER_UID        varchar(255) not null unique,                                -- implicit index
+  DATAVERSION	   integer      default 0 not null
+);
+
+-------------------------------
+-- AddressBook Home Metadata --
+-------------------------------
+
+create table ADDRESSBOOK_HOME_METADATA (
+  RESOURCE_ID      integer      primary key references ADDRESSBOOK_HOME on delete cascade, -- implicit index
+  QUOTA_USED_BYTES integer      default 0 not null,
+  CREATED          timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED         timestamp    default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+-----------------
+-- AddressBook --
+-----------------
+
+create table ADDRESSBOOK (
+  RESOURCE_ID integer   primary key default nextval('RESOURCE_ID_SEQ') -- implicit index
+);
+
+
+--------------------------
+-- AddressBook Metadata --
+--------------------------
+
+create table ADDRESSBOOK_METADATA (
+  RESOURCE_ID integer   primary key references ADDRESSBOOK on delete cascade, -- implicit index
+  CREATED     timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED    timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+----------------------
+-- AddressBook Bind --
+----------------------
+
+-- Joins ADDRESSBOOK_HOME and ADDRESSBOOK
+
+create table ADDRESSBOOK_BIND (
+  ADDRESSBOOK_HOME_RESOURCE_ID integer      not null references ADDRESSBOOK_HOME,
+  ADDRESSBOOK_RESOURCE_ID      integer      not null references ADDRESSBOOK on delete cascade,
+
+  -- An invitation which hasn't been accepted yet will not yet have a resource
+  -- name, so this field may be null.
+
+  ADDRESSBOOK_RESOURCE_NAME    varchar(255),
+  BIND_MODE                    integer      not null, -- enum CALENDAR_BIND_MODE
+  BIND_STATUS                  integer      not null, -- enum CALENDAR_BIND_STATUS
+  SEEN_BY_OWNER                boolean      not null,
+  SEEN_BY_SHAREE               boolean      not null,
+  MESSAGE                      text,                  -- FIXME: xml?
+
+  primary key(ADDRESSBOOK_HOME_RESOURCE_ID, ADDRESSBOOK_RESOURCE_ID), -- implicit index
+  unique(ADDRESSBOOK_HOME_RESOURCE_ID, ADDRESSBOOK_RESOURCE_NAME)     -- implicit index
+);
+
+create index ADDRESSBOOK_BIND_RESOURCE_ID on
+  ADDRESSBOOK_BIND(ADDRESSBOOK_RESOURCE_ID);
+
+create table ADDRESSBOOK_OBJECT (
+  RESOURCE_ID             integer      primary key default nextval('RESOURCE_ID_SEQ'),    -- implicit index
+  ADDRESSBOOK_RESOURCE_ID integer      not null references ADDRESSBOOK on delete cascade,
+  RESOURCE_NAME           varchar(255) not null,
+  VCARD_TEXT              text         not null,
+  VCARD_UID               varchar(255) not null,
+  MD5                     char(32)     not null,
+  CREATED                 timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+
+  unique(ADDRESSBOOK_RESOURCE_ID, RESOURCE_NAME), -- implicit index
+  unique(ADDRESSBOOK_RESOURCE_ID, VCARD_UID)      -- implicit index
+);
+
+---------------
+-- Revisions --
+---------------
+
+create sequence REVISION_SEQ;
+
+
+---------------
+-- Revisions --
+---------------
+
+create table CALENDAR_OBJECT_REVISIONS (
+  CALENDAR_HOME_RESOURCE_ID integer      not null references CALENDAR_HOME,
+  CALENDAR_RESOURCE_ID      integer      references CALENDAR,
+  CALENDAR_NAME             varchar(255) default null,
+  RESOURCE_NAME             varchar(255),
+  REVISION                  integer      default nextval('REVISION_SEQ') not null,
+  DELETED                   boolean      not null
+);
+
+create index CALENDAR_OBJECT_REVISIONS_HOME_RESOURCE_ID_CALENDAR_RESOURCE_ID
+  on CALENDAR_OBJECT_REVISIONS(CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_ID);
+
+create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_RESOURCE_NAME
+  on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, RESOURCE_NAME);
+
+create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_REVISION
+  on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, REVISION);
+
+-------------------------------
+-- AddressBook Object Revisions --
+-------------------------------
+
+create table ADDRESSBOOK_OBJECT_REVISIONS (
+  ADDRESSBOOK_HOME_RESOURCE_ID integer      not null references ADDRESSBOOK_HOME,
+  ADDRESSBOOK_RESOURCE_ID      integer      references ADDRESSBOOK,
+  ADDRESSBOOK_NAME             varchar(255) default null,
+  RESOURCE_NAME                varchar(255),
+  REVISION                     integer      default nextval('REVISION_SEQ') not null,
+  DELETED                      boolean      not null
+);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_HOME_RESOURCE_ID_ADDRESSBOOK_RESOURCE_ID
+  on ADDRESSBOOK_OBJECT_REVISIONS(ADDRESSBOOK_HOME_RESOURCE_ID, ADDRESSBOOK_RESOURCE_ID);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_RESOURCE_ID_RESOURCE_NAME
+  on ADDRESSBOOK_OBJECT_REVISIONS(ADDRESSBOOK_RESOURCE_ID, RESOURCE_NAME);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_RESOURCE_ID_REVISION
+  on ADDRESSBOOK_OBJECT_REVISIONS(ADDRESSBOOK_RESOURCE_ID, REVISION);
+
+-----------------------------------
+-- Notification Object Revisions --
+-----------------------------------
+
+create table NOTIFICATION_OBJECT_REVISIONS (
+  NOTIFICATION_HOME_RESOURCE_ID integer      not null references NOTIFICATION_HOME on delete cascade,
+  RESOURCE_NAME                 varchar(255),
+  REVISION                      integer      default nextval('REVISION_SEQ') not null,
+  DELETED                       boolean      not null,
+
+  unique(NOTIFICATION_HOME_RESOURCE_ID, RESOURCE_NAME) -- implicit index
+);
+
+create index NOTIFICATION_OBJECT_REVISIONS_RESOURCE_ID_REVISION
+  on NOTIFICATION_OBJECT_REVISIONS(NOTIFICATION_HOME_RESOURCE_ID, REVISION);
+
+-------------------------------------------
+-- Apple Push Notification Subscriptions --
+-------------------------------------------
+
+create table APN_SUBSCRIPTIONS (
+  TOKEN                         varchar(255) not null,
+  RESOURCE_KEY                  varchar(255) not null,
+  MODIFIED                      integer not null,
+  SUBSCRIBER_GUID               varchar(255) not null,
+  USER_AGENT                    varchar(255) default null,
+  IP_ADDR                       varchar(255) default null,
+
+  primary key(TOKEN, RESOURCE_KEY) -- implicit index
+);
+
+create index APN_SUBSCRIPTIONS_RESOURCE_KEY
+   on APN_SUBSCRIPTIONS(RESOURCE_KEY);
+
+
+--------------------
+-- Schema Version --
+--------------------
+
+create table CALENDARSERVER (
+  NAME                          varchar(255) primary key, -- implicit index
+  VALUE                         varchar(255)
+);
+
+insert into CALENDARSERVER values ('VERSION', '11');
+insert into CALENDARSERVER values ('CALENDAR-DATAVERSION', '3');
+insert into CALENDARSERVER values ('ADDRESSBOOK-DATAVERSION', '1');

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/test/test_sql.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/test/test_sql.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/test/test_sql.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -18,22 +18,22 @@
 Tests for L{txdav.common.datastore.sql}.
 """
 
-from twext.enterprise.dal.syntax import Insert, Select
+from twext.enterprise.dal.syntax import Select
+from txdav.xml import element as davxml
 
-from twisted.internet.defer import Deferred, inlineCallbacks, returnValue
+from twisted.internet.defer import inlineCallbacks, returnValue
 from twisted.internet.task import Clock
-
 from twisted.trial.unittest import TestCase
+from twisted.internet.defer import Deferred
 
-from txdav.caldav.datastore.sql import Calendar
-from txdav.carddav.datastore.sql import AddressBook
-from txdav.common.datastore.sql import fixUUIDNormalization, log, \
-    CommonStoreTransactionMonitor, ECALENDARTYPE, EADDRESSBOOKTYPE
-from txdav.common.datastore.sql_tables import schema
-
+from txdav.common.datastore.sql import log, CommonStoreTransactionMonitor,\
+    CommonHome, CommonHomeChild, ECALENDARTYPE
+from txdav.common.datastore.sql_tables import schema, CALENDAR_BIND_TABLE,\
+    CALENDAR_OBJECT_REVISIONS_TABLE
 from txdav.common.datastore.test.util import CommonCommonTests, buildStore
-
 from txdav.common.icommondatastore import AllRetriesFailed
+from twext.enterprise.dal.syntax import Insert
+from txdav.common.datastore.sql import fixUUIDNormalization
 
 class CommonSQLStoreTests(CommonCommonTests, TestCase):
     """
@@ -61,7 +61,7 @@
         """
         txn.execSQL works with all logging options on.
         """
-
+        
         # Patch config to turn on logging then rebuild the store
         self.patch(self._sqlStore, "logLabels", True)
         self.patch(self._sqlStore, "logStats", True)
@@ -70,7 +70,7 @@
         txn = self.transactionUnderTest()
         cs = schema.CALENDARSERVER
         version = (yield Select(
-                [cs.VALUE, ],
+                [cs.VALUE,],
                 From=cs,
                 Where=cs.NAME == 'VERSION',
             ).on(txn))
@@ -82,20 +82,20 @@
         """
         CommonStoreTransactionMonitor logs waiting transactions.
         """
-
+        
         c = Clock()
         self.patch(CommonStoreTransactionMonitor, "callLater", c.callLater)
 
         # Patch config to turn on log waits then rebuild the store
         self.patch(self._sqlStore, "logTransactionWaits", 1)
-
+        
         ctr = [0]
         def counter(_ignore):
             ctr[0] += 1
         self.patch(log, "error", counter)
 
-        txn = self.transactionUnderTest()
-
+        txn = self.transactionUnderTest()        
+ 
         c.advance(2)
         self.assertNotEqual(ctr[0], 0)
         txn.abort()
@@ -105,13 +105,13 @@
         """
         CommonStoreTransactionMonitor terminates long transactions.
         """
-
+        
         c = Clock()
         self.patch(CommonStoreTransactionMonitor, "callLater", c.callLater)
 
         # Patch config to turn on transaction timeouts then rebuild the store
         self.patch(self._sqlStore, "timeoutTransactions", 1)
-
+        
         ctr = [0]
         def counter(_ignore):
             ctr[0] += 1
@@ -128,14 +128,14 @@
         """
         CommonStoreTransactionMonitor logs waiting transactions and terminates long transactions.
         """
-
+        
         c = Clock()
         self.patch(CommonStoreTransactionMonitor, "callLater", c.callLater)
 
         # Patch config to turn on log waits then rebuild the store
         self.patch(self._sqlStore, "logTransactionWaits", 1)
         self.patch(self._sqlStore, "timeoutTransactions", 2)
-
+        
         ctr = [0, 0]
         def counter(logStr):
             if "wait" in logStr:
@@ -145,7 +145,7 @@
         self.patch(log, "error", counter)
 
         txn = self.transactionUnderTest()
-
+        
         c.advance(2)
         self.assertNotEqual(ctr[0], 0)
         self.assertNotEqual(ctr[1], 0)
@@ -156,7 +156,7 @@
         """
         txn.subtransaction runs loop once.
         """
-
+        
         txn = self.transactionUnderTest()
         ctr = [0]
 
@@ -164,11 +164,11 @@
             ctr[0] += 1
             cs = schema.CALENDARSERVER
             return Select(
-                [cs.VALUE, ],
+                [cs.VALUE,],
                 From=cs,
                 Where=cs.NAME == 'VERSION',
             ).on(subtxn)
-
+            
         (yield txn.subtransaction(_test, retries=0))[0][0]
         self.assertEqual(ctr[0], 1)
 
@@ -178,7 +178,7 @@
         """
         txn.subtransaction runs loop twice when one failure.
         """
-
+        
         txn = self.transactionUnderTest()
         ctr = [0]
 
@@ -188,11 +188,11 @@
                 raise ValueError
             cs = schema.CALENDARSERVER
             return Select(
-                [cs.VALUE, ],
+                [cs.VALUE,],
                 From=cs,
                 Where=cs.NAME == 'VERSION',
             ).on(subtxn)
-
+            
         (yield txn.subtransaction(_test, retries=1))[0][0]
         self.assertEqual(ctr[0], 2)
 
@@ -202,7 +202,7 @@
         """
         txn.subtransaction runs loop once when one failure and no retries.
         """
-
+        
         txn = self.transactionUnderTest()
         ctr = [0]
 
@@ -211,11 +211,11 @@
             raise ValueError
             cs = schema.CALENDARSERVER
             return Select(
-                [cs.VALUE, ],
+                [cs.VALUE,],
                 From=cs,
                 Where=cs.NAME == 'VERSION',
             ).on(subtxn)
-
+        
         try:
             (yield txn.subtransaction(_test, retries=0))[0][0]
         except AllRetriesFailed:
@@ -231,7 +231,7 @@
         txn.subtransaction runs loop three times when all fail and two retries
         requested.
         """
-
+        
         txn = self.transactionUnderTest()
         ctr = [0]
 
@@ -240,11 +240,11 @@
             raise ValueError
             cs = schema.CALENDARSERVER
             return Select(
-                [cs.VALUE, ],
+                [cs.VALUE,],
                 From=cs,
                 Where=cs.NAME == 'VERSION',
             ).on(subtxn)
-
+        
         try:
             (yield txn.subtransaction(_test, retries=2))[0][0]
         except AllRetriesFailed:
@@ -282,15 +282,30 @@
 
 
     @inlineCallbacks
-    def test_calendarChangeRevision(self):
+    def test_changeRevision(self):
         """
-        Calendar._changeRevision actions.
+        CommonHomeChild._changeRevision actions.
         """
-
+        
+        class TestCommonHome(CommonHome):
+            _bindTable = CALENDAR_BIND_TABLE
+            _revisionsTable = CALENDAR_OBJECT_REVISIONS_TABLE
+    
+        class TestCommonHomeChild(CommonHomeChild):
+            _homeChildSchema = schema.CALENDAR
+            _homeChildMetaDataSchema = schema.CALENDAR_METADATA
+            _bindSchema = schema.CALENDAR_BIND
+            _revisionsSchema = schema.CALENDAR_OBJECT_REVISIONS
+            _bindTable = CALENDAR_BIND_TABLE
+            _revisionsTable = CALENDAR_OBJECT_REVISIONS_TABLE
+            
+            def resourceType(self):
+                return davxml.ResourceType.calendar
+    
         txn = self.transactionUnderTest()
         home = yield txn.homeWithUID(ECALENDARTYPE, "uid", create=True)
-        homeChild = yield Calendar.create(home, "B")
-
+        homeChild = yield TestCommonHomeChild.create(home, "B")
+        
         # insert test
         token = yield homeChild.syncToken()
         yield homeChild._changeRevision("insert", "C")
@@ -325,49 +340,6 @@
 
 
     @inlineCallbacks
-    def test_addressbookChangeRevision(self):
-        """
-        AddressBook._changeRevision actions.
-        """
-
-        txn = self.transactionUnderTest()
-        home = yield txn.homeWithUID(EADDRESSBOOKTYPE, "uid", create=True)
-        homeChild = yield AddressBook.create(home, "B")
-
-        # insert test
-        token = yield homeChild.syncToken()
-        yield homeChild._changeRevision("insert", "C")
-        changed = yield homeChild.resourceNamesSinceToken(token)
-        self.assertEqual(changed, (["C"], [],))
-
-        # update test
-        token = yield homeChild.syncToken()
-        yield homeChild._changeRevision("update", "C")
-        changed = yield homeChild.resourceNamesSinceToken(token)
-        self.assertEqual(changed, (["C"], [],))
-
-        # delete test
-        token = yield homeChild.syncToken()
-        yield homeChild._changeRevision("delete", "C")
-        changed = yield homeChild.resourceNamesSinceToken(token)
-        self.assertEqual(changed, ([], ["C"],))
-
-        # missing update test
-        token = yield homeChild.syncToken()
-        yield homeChild._changeRevision("update", "D")
-        changed = yield homeChild.resourceNamesSinceToken(token)
-        self.assertEqual(changed, (["D"], [],))
-
-        # missing delete test
-        token = yield homeChild.syncToken()
-        yield homeChild._changeRevision("delete", "E")
-        changed = yield homeChild.resourceNamesSinceToken(token)
-        self.assertEqual(changed, ([], [],))
-
-        yield txn.abort()
-
-
-    @inlineCallbacks
     def test_normalizeColumnUUIDs(self):
         """
         L{_normalizeColumnUUIDs} upper-cases only UUIDs in a given column.

Copied: CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/test/test_sql_schema_files.py (from rev 10076, CalendarServer/trunk/txdav/common/datastore/test/test_sql_schema_files.py)
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/test/test_sql_schema_files.py	                        (rev 0)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/test/test_sql_schema_files.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -0,0 +1,102 @@
+# #
+# Copyright (c) 2012 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# #
+
+from twisted.python.modules import getModule
+from twisted.trial.unittest import TestCase
+import re
+
+"""
+Tests for L{txdav.common.datastore.sql}.
+"""
+
+class SQLSchemaFiles(TestCase):
+    """
+    Tests for txdav.common.datastore.sql_schema having complete information. Note that upgrade files are checked elsewhere.
+    """
+
+    def versionFromSchema(self, filePath):
+        current_schema = filePath.getContent()
+        found = re.search("insert into CALENDARSERVER values \('VERSION', '(\d+)'\);", current_schema)
+        if found is None:
+            found = re.search("insert into CALENDARSERVER \(NAME, VALUE\) values \('VERSION', '(\d+)'\);", current_schema)
+            if found is None:
+                self.fail("Could not find version string in %s" % (filePath.path,))
+
+        return int(found.group(1))
+
+
+    def test_old_files(self):
+        """
+        Make sure txdav.common.datastore.sql_schema.old contains all the appropriate old versions
+        """
+
+        sqlSchema = getModule(__name__).filePath.parent().sibling("sql_schema")
+        currentSchema = sqlSchema.child("current.sql")
+        current_version = self.versionFromSchema(currentSchema)
+        current_set = set([i for i in range(3, current_version)])
+
+        oldDirectory = sqlSchema.child("old")
+
+        for child in oldDirectory.children():
+            if child.basename().startswith("."):
+                continue
+            old_set = set()
+            for oldVersion in child.children():
+                if oldVersion.basename().startswith("."):
+                    continue
+                found = re.search("v(\d+).sql", oldVersion.basename())
+                if found is None:
+                    self.fail("%s is not a valid old sql file" % (oldVersion))
+                old_set.add(int(found.group(1)))
+            self.assertEqual(current_set, old_set, msg="Missing old schema file for dialect: %s" % (child.basename(),))
+
+
+    def test_old_files_consistent(self):
+        """
+        Make sure txdav.common.datastore.sql_schema.old contains all the appropriate old versions
+        """
+
+        sqlSchema = getModule(__name__).filePath.parent().sibling("sql_schema")
+        oldDirectory = sqlSchema.child("old")
+
+        for child in oldDirectory.children():
+            if child.basename().startswith("."):
+                continue
+            for oldVersion in child.children():
+                if oldVersion.basename().startswith("."):
+                    continue
+                found = re.search("v(\d+).sql", oldVersion.basename())
+                if found is None:
+                    self.fail("%s is not a valid old sql file" % (oldVersion))
+                old_name_version = int(found.group(1))
+                old_version = self.versionFromSchema(oldVersion)
+                self.assertEqual(old_name_version, old_version, "Name of schema file does not match actual schema version: %s" % (oldVersion.path,))
+
+
+    def test_current_oracle(self):
+        """
+        Make sure current-oracle-dialect.sql matches current.sql
+        """
+
+        sqlSchema = getModule(__name__).filePath.parent().sibling("sql_schema")
+
+        currentSchema = sqlSchema.child("current.sql")
+        current_version = self.versionFromSchema(currentSchema)
+
+        currentOracleSchema = sqlSchema.child("current-oracle-dialect.sql")
+        current_oracle_version = self.versionFromSchema(currentOracleSchema)
+
+        self.assertEqual(current_version, current_oracle_version)

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/test/util.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/test/util.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/test/util.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -193,7 +193,17 @@
         store.label = currentTestID
         cp.startService()
         def stopIt():
-            return cp.stopService()
+            # active transactions should have been shut down.
+            wasBusy = len(cp._busy)
+            busyText = repr(cp._busy)
+            stop = cp.stopService()
+            def checkWasBusy(ignored):
+                if wasBusy:
+                    testCase.fail("Outstanding Transactions: " + busyText)
+                return ignored
+            if deriveValue(testCase, _SPECIAL_TXN_CLEAN, lambda tc: False):
+                stop.addBoth(checkWasBusy)
+            return stop
         testCase.addCleanup(stopIt)
         yield self.cleanStore(testCase, store)
         returnValue(store)
@@ -250,13 +260,20 @@
     for that test.
 
     @param testCase: the test case instance.
+    @type testCase: L{TestCase}
 
     @param attribute: the name of the attribute (the same name passed to
         L{withSpecialValue}).
+    @type attribute: L{str}
 
     @param computeDefault: A 1-argument callable, which will be called with
         C{testCase} to compute a default value for the attribute for the given
         test if no custom one was specified.
+    @type computeDefault: L{callable}
+
+    @return: the value of the given C{attribute} for the given C{testCase}, as
+        decorated with C{withSpecialValue}.
+    @rtype: same type as the return type of L{computeDefault}
     """
     testID = testCase.id()
     testMethodName = testID.split(".")[-1]
@@ -295,6 +312,7 @@
 
 
 _SPECIAL_QUOTA = "__special_quota__"
+_SPECIAL_TXN_CLEAN = "__special_txn_clean__"
 
 
 
@@ -328,12 +346,29 @@
     Test method decorator that will cause L{deriveQuota} to return a different
     value for test cases that run that test method.
 
-    @see: withSpecialValue
+    @see: L{withSpecialValue}
     """
     return withSpecialValue(_SPECIAL_QUOTA, quotaValue)
 
 
 
+def transactionClean(f=None):
+    """
+    Test method decorator that will cause L{buildStore} to check that no
+    transactions were left outstanding at the end of the test, and fail the
+    test if they are outstanding rather than terminating them by shutting down
+    the connection pool service.
+
+    @see: L{withSpecialValue}
+    """
+    decorator = withSpecialValue(_SPECIAL_TXN_CLEAN, True)
+    if f:
+        return decorator(f)
+    else:
+        return decorator
+
+
+
 @inlineCallbacks
 def populateCalendarsFrom(requirements, store, migrating=False):
     """

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/upgrade/migrate.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/upgrade/migrate.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/upgrade/migrate.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -223,6 +223,9 @@
                 lambda fileHome:
                 self.upgrader.migrateOneHome(fileTxn, homeType, fileHome)
             )
+            .addCallbacks(lambda ignored: fileTxn.commit(),
+                          lambda err: fileTxn.abort()
+                                      .addCallback(lambda ign: err))
             .addCallback(lambda ignored: {})
         )
 
@@ -343,7 +346,6 @@
                 "%s home %r already existed not migrating" % (
                     homeType, uid))
             yield sqlTxn.abort()
-            yield fileTxn.commit()
             returnValue(None)
         try:
             if sqlHome is None:
@@ -351,11 +353,9 @@
             yield migrateFunc(fileHome, sqlHome, merge=self.merge)
         except:
             f = Failure()
-            yield fileTxn.abort()
             yield sqlTxn.abort()
             f.raiseException()
         else:
-            yield fileTxn.commit()
             yield sqlTxn.commit()
             # Remove file home after migration. FIXME: instead, this should be a
             # public remove...HomeWithUID() API for de-provisioning.  (If we had
@@ -402,27 +402,20 @@
             )
             self.log_warn("Upgrade helpers ready.")
             parallelizer = Parallelizer(drivers)
+        else:
+            parallelizer = None
 
         self.log_warn("Beginning filesystem -> database upgrade.")
+
         for homeType, eachFunc in [
-                ("calendar", self.fileStore.eachCalendarHome),
-                ("addressbook", self.fileStore.eachAddressbookHome),
+                ("calendar", self.fileStore.withEachCalendarHomeDo),
+                ("addressbook", self.fileStore.withEachAddressbookHomeDo),
             ]:
-            for fileTxn, fileHome in eachFunc():
-                uid = fileHome.uid()
-                self.log_warn("Migrating %s UID %r" % (homeType, uid))
-                if parallel:
-                    # No-op transaction here: make sure everything's unlocked
-                    # before asking the subprocess to handle it.
-                    yield fileTxn.commit()
-                    @inlineCallbacks
-                    def doOneUpgrade(driver, fileUID=uid, homeType=homeType):
-                        yield driver.oneUpgrade(fileUID, homeType)
-                        self.log_warn("Completed migration of %s uid %r" %
-                                      (homeType, fileUID))
-                    yield parallelizer.do(doOneUpgrade)
-                else:
-                    yield self.migrateOneHome(fileTxn, homeType, fileHome)
+            yield eachFunc(
+                lambda txn, home: self._upgradeAction(
+                    txn, home, homeType, parallel, parallelizer
+                )
+            )
 
         if parallel:
             yield parallelizer.done()
@@ -458,6 +451,23 @@
             reactor.callLater(0, wrapped.setServiceParent, self.parent)
 
 
+    @inlineCallbacks
+    def _upgradeAction(self, fileTxn, fileHome, homeType, parallel,
+                       parallelizer):
+        uid = fileHome.uid()
+        self.log_warn("Migrating %s UID %r" % (homeType, uid))
+        if parallel:
+            @inlineCallbacks
+            def doOneUpgrade(driver, fileUID=uid, homeType=homeType):
+                yield driver.oneUpgrade(fileUID, homeType)
+                self.log_warn("Completed migration of %s uid %r" %
+                              (homeType, fileUID))
+            yield parallelizer.do(doOneUpgrade)
+        else:
+            yield self.migrateOneHome(fileTxn, homeType, fileHome)
+
+
+
     def startService(self):
         """
         Start the service.

Modified: CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/upgrade/test/test_migrate.py
===================================================================
--- CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/upgrade/test/test_migrate.py	2012-11-17 02:37:43 UTC (rev 10078)
+++ CalendarServer/branches/users/gaya/sharedgroups/txdav/common/datastore/upgrade/test/test_migrate.py	2012-11-17 04:03:40 UTC (rev 10079)
@@ -156,7 +156,8 @@
         class StubService(Service, object):
             def startService(self):
                 super(StubService, self).startService()
-                subStarted.callback(None)
+                if not subStarted.called:
+                    subStarted.callback(None)
         from twisted.python import log
         def justOnce(evt):
             if evt.get('isError') and not hasattr(subStarted, 'result'):
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20121116/575f1319/attachment-0001.html>


More information about the calendarserver-changes mailing list