[CalendarServer-changes] [13408] CalendarServer/trunk

source_changes at macosforge.org source_changes at macosforge.org
Fri May 2 13:07:26 PDT 2014


Revision: 13408
          http://trac.calendarserver.org//changeset/13408
Author:   cdaboo at apple.com
Date:     2014-05-02 13:07:26 -0700 (Fri, 02 May 2014)
Log Message:
-----------
Implement background automatic purge of missing users (not enabled by default). Required some api clean-ups
and support for scheduling without a valid directory record.

Modified Paths:
--------------
    CalendarServer/trunk/calendarserver/tools/purge.py
    CalendarServer/trunk/calendarserver/tools/test/test_purge.py
    CalendarServer/trunk/calendarserver/tools/test/test_purge_old_events.py
    CalendarServer/trunk/twistedcaldav/directory/principal.py
    CalendarServer/trunk/twistedcaldav/ical.py
    CalendarServer/trunk/twistedcaldav/stdconfig.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/addressmapping.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/caldav/delivery.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/caldav/scheduler.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/cuaddress.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/icaldiff.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/outbound.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/implicit.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/delivery.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/scheduler.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/processing.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/scheduler.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_implicit.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_utils.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/utils.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/work.py
    CalendarServer/trunk/txdav/caldav/datastore/sql.py
    CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py
    CalendarServer/trunk/txdav/caldav/datastore/test/util.py
    CalendarServer/trunk/txdav/carddav/datastore/sql.py
    CalendarServer/trunk/txdav/common/datastore/sql.py
    CalendarServer/trunk/txdav/common/datastore/sql_schema/current-oracle-dialect.sql
    CalendarServer/trunk/txdav/common/datastore/sql_schema/current.sql
    CalendarServer/trunk/txdav/common/datastore/sql_tables.py
    CalendarServer/trunk/txdav/common/datastore/test/util.py
    CalendarServer/trunk/txdav/who/directory.py
    CalendarServer/trunk/txdav/who/util.py

Added Paths:
-----------
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_cuaddress.py
    CalendarServer/trunk/txdav/common/datastore/sql_schema/old/oracle-dialect/v40.sql
    CalendarServer/trunk/txdav/common/datastore/sql_schema/old/postgres-dialect/v40.sql
    CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_40_to_41.sql
    CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_40_to_41.sql

Modified: CalendarServer/trunk/calendarserver/tools/purge.py
===================================================================
--- CalendarServer/trunk/calendarserver/tools/purge.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/calendarserver/tools/purge.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -18,22 +18,28 @@
 from __future__ import print_function
 
 import collections
+import datetime
 from getopt import getopt, GetoptError
 import os
 import sys
 
 from calendarserver.tools import tables
 from calendarserver.tools.cmdline import utilityMain, WorkerService
+
 from pycalendar.datetime import DateTime
+
+from twext.enterprise.dal.record import fromTable
+from twext.enterprise.dal.syntax import Delete, Select, Union
+from twext.enterprise.jobqueue import WorkItem
 from twext.python.log import Logger
+
 from twisted.internet.defer import inlineCallbacks, returnValue
+
 from twistedcaldav import caldavxml
+from twistedcaldav.config import config
+
 from txdav.caldav.datastore.query.filter import Filter
-from twext.enterprise.dal.record import fromTable
-from twext.enterprise.dal.syntax import Delete, Select
-from twext.enterprise.jobqueue import WorkItem
-from txdav.common.datastore.sql_tables import schema
-import datetime
+from txdav.common.datastore.sql_tables import schema, _HOME_STATUS_NORMAL
 
 log = Logger()
 
@@ -47,6 +53,11 @@
     WorkItem,
     fromTable(schema.PRINCIPAL_PURGE_POLLING_WORK)
 ):
+    """
+    A work item that scans the existing set of provisioned homes in the
+    store and creates a work item for each to be checked against the
+    directory to see if they need purging.
+    """
 
     group = "principal_purge_polling"
 
@@ -56,36 +67,44 @@
         # Delete all other work items
         yield Delete(From=self.table, Where=None).on(self.transaction)
 
-        # Schedule next update, 7 days out
-        notBefore = (
-            datetime.datetime.utcnow() +
-            datetime.timedelta(days=7)
-        )
-        log.info(
-            "Scheduling next principal purge scan update: {when}", when=notBefore
-        )
-        yield self.transaction.enqueue(
-            PrincipalPurgePollingWork,
-            notBefore=notBefore
-        )
+        # If not enabled, punt here
+        if not config.AutomaticPurging.Enabled:
+            returnValue(None)
 
+        # Schedule next update, 7 days out (default)
+        # Special - for testing it is handy to have this work item not regenerate, so
+        # we use an interval of -1 to signify a one-shot operation
+        if config.AutomaticPurging.PollingIntervalSeconds != -1:
+            notBefore = (
+                datetime.datetime.utcnow() +
+                datetime.timedelta(seconds=config.AutomaticPurging.PollingIntervalSeconds)
+            )
+            log.info(
+                "Scheduling next principal purge scan update: {when}", when=notBefore
+            )
+            yield self.transaction.enqueue(
+                PrincipalPurgePollingWork,
+                notBefore=notBefore
+            )
+
         # Do the scan
         allUIDs = set()
         for home in (schema.CALENDAR_HOME, schema.ADDRESSBOOK_HOME):
             for [uid] in (
                 yield Select(
                     [home.OWNER_UID],
-                    From=home
+                    From=home,
+                    Where=(home.STATUS == _HOME_STATUS_NORMAL),
                 ).on(self.transaction)
             ):
                 allUIDs.add(uid)
 
-        # Spread out the per-uid checks 1 second apart
+        # Spread out the per-uid checks 0 second apart
         seconds = 0
         for uid in allUIDs:
             notBefore = (
                 datetime.datetime.utcnow() +
-                datetime.timedelta(seconds=seconds)
+                datetime.timedelta(seconds=config.AutomaticPurging.CheckStaggerSeconds)
             )
             seconds += 1
             yield self.transaction.enqueue(
@@ -96,13 +115,14 @@
 
 
 
-
 class PrincipalPurgeCheckWork(
     WorkItem,
     fromTable(schema.PRINCIPAL_PURGE_CHECK_WORK)
 ):
     """
-    Work item for checking for the existence of a UID in the directory
+    Work item for checking for the existence of a UID in the directory. This
+    work item is created by L{PrincipalPurgePollingWork} - one for each
+    unique user UID to check.
     """
 
     group = property(lambda self: self.uid)
@@ -116,6 +136,10 @@
             Where=self.table.UID == self.uid
         ).on(self.transaction)
 
+        # If not enabled, punt here
+        if not config.AutomaticPurging.Enabled:
+            returnValue(None)
+
         log.debug("Checking for existence of {uid} in directory", uid=self.uid)
         directory = self.transaction.store().directoryService()
         record = yield directory.recordWithUID(self.uid)
@@ -124,7 +148,7 @@
             # Schedule purge of this UID a week from now
             notBefore = (
                 datetime.datetime.utcnow() +
-                datetime.timedelta(minutes=1)
+                datetime.timedelta(seconds=config.AutomaticPurging.PurgeIntervalSeconds)
             )
             log.warn(
                 "Principal {uid} is no longer in the directory; scheduling clean-up at {when}",
@@ -159,6 +183,10 @@
             Where=self.table.UID == self.uid
         ).on(self.transaction)
 
+        # If not enabled, punt here
+        if not config.AutomaticPurging.Enabled:
+            returnValue(None)
+
         # Check for UID in directory again
         log.debug("One last existence check for {uid}", uid=self.uid)
         directory = self.transaction.store().directoryService()
@@ -166,17 +194,15 @@
 
         if record is None:
             # Time to go
-            service = PurgePrincipalService(self.transaction.store)
+            service = PurgePrincipalService(self.transaction.store())
             log.warn(
                 "Cleaning up future events for principal {uid} since they are no longer in directory",
                 uid=self.uid
             )
             yield service.purgeUIDs(
-                self.transaction.store,
+                self.transaction.store(),
                 directory,
                 [self.uid],
-                completely=False,
-                doimplicit=True,
                 proxies=True,
                 when=None
             )
@@ -184,6 +210,72 @@
             log.debug("{uid} has re-appeared in the directory", uid=self.uid)
 
 
+
+class PrincipalPurgeHomeWork(
+    WorkItem,
+    fromTable(schema.PRINCIPAL_PURGE_HOME_WORK)
+):
+    """
+    Work item for removing a UID's home
+    """
+
+    group = property(lambda self: str(self.homeResourceID))
+
+    @inlineCallbacks
+    def doWork(self):
+
+        # Delete any other work items for this UID
+        yield Delete(
+            From=self.table,
+            Where=self.table.HOME_RESOURCE_ID == self.homeResourceID
+        ).on(self.transaction)
+
+        # NB We do not check config.AutomaticPurging.Enabled here because if this work
+        # item was enqueued we always need to complete it
+
+        # Check for pending scheduling operations
+        sow = schema.SCHEDULE_ORGANIZER_WORK
+        srw = schema.SCHEDULE_REPLY_WORK
+        srcw = schema.SCHEDULE_REPLY_CANCEL_WORK
+        rows = yield Select(
+            [sow.HOME_RESOURCE_ID],
+            From=sow,
+            Where=(sow.HOME_RESOURCE_ID == self.homeResourceID),
+            SetExpression=Union(
+                Select(
+                    [srw.HOME_RESOURCE_ID],
+                    From=srw,
+                    Where=(srw.HOME_RESOURCE_ID == self.homeResourceID),
+                    SetExpression=Union(
+                        Select(
+                            [srcw.HOME_RESOURCE_ID],
+                            From=srcw,
+                            Where=(srcw.HOME_RESOURCE_ID == self.homeResourceID),
+                        )
+                    ),
+                )
+            ),
+        ).on(self.transaction)
+
+        if rows and len(rows):
+            # Regenerate this job
+            notBefore = (
+                datetime.datetime.utcnow() +
+                datetime.timedelta(seconds=config.AutomaticPurging.HomePurgeDelaySeconds)
+            )
+            yield self.transaction.enqueue(
+                PrincipalPurgeHomeWork,
+                homeResourceID=self.homeResourceID,
+                notBefore=notBefore
+            )
+        else:
+            # Get the home and remove it - only if properly marked as being purged
+            home = yield self.transaction.calendarHomeWithResourceID(self.homeResourceID)
+            if home.purging():
+                yield home.remove()
+
+
+
 @inlineCallbacks
 def scheduleNextPrincipalPurgeUpdate(store, seconds):
 
@@ -743,8 +835,6 @@
     uids = None
     dryrun = False
     verbose = False
-    completely = False
-    doimplicit = True
     proxies = True
     when = None
 
@@ -755,9 +845,9 @@
         print("usage: %s [options]" % (name,))
         print("")
         print("  Remove a principal's events and contacts from the calendar server")
+        print("  Future events are declined or cancelled")
         print("")
         print("options:")
-        print("  -c --completely: By default, only future events are canceled; this option cancels all events")
         print("  -h --help: print this help and exit")
         print("  -f --config <path>: Specify caldavd.plist configuration path")
         print("  -n --dry-run: calculate how many events and contacts to purge, but do not purge data")
@@ -777,14 +867,12 @@
 
         try:
             (optargs, args) = getopt(
-                sys.argv[1:], "cDf:hnv", [
-                    "completely",
+                sys.argv[1:], "Df:hnv", [
                     "dry-run",
                     "config=",
                     "help",
                     "verbose",
                     "debug",
-                    "noimplicit",
                 ],
             )
         except GetoptError, e:
@@ -797,16 +885,11 @@
         dryrun = False
         verbose = False
         debug = False
-        completely = False
-        doimplicit = True
 
         for opt, arg in optargs:
             if opt in ("-h", "--help"):
                 cls.usage()
 
-            elif opt in ("-c", "--completely"):
-                completely = True
-
             elif opt in ("-v", "--verbose"):
                 verbose = True
 
@@ -819,18 +902,13 @@
             elif opt in ("-f", "--config"):
                 configFileName = arg
 
-            elif opt in ("--noimplicit"):
-                doimplicit = False
-
             else:
                 raise NotImplementedError(opt)
 
         # args is a list of uids
         cls.uids = args
-        cls.completely = completely
         cls.dryrun = dryrun
         cls.verbose = verbose
-        cls.doimplicit = doimplicit
 
         utilityMain(
             configFileName,
@@ -842,15 +920,13 @@
     @classmethod
     @inlineCallbacks
     def purgeUIDs(cls, store, directory, uids, verbose=False, dryrun=False,
-                  completely=False, doimplicit=True, proxies=True, when=None):
+                  proxies=True, when=None):
 
         service = cls(store)
         service.directory = directory
         service.uids = uids
         service.verbose = verbose
         service.dryrun = dryrun
-        service.completely = completely
-        service.doimplicit = doimplicit
         service.proxies = proxies
         service.when = when
         result = yield service.doWork()
@@ -885,32 +961,18 @@
         if self.when is None:
             self.when = DateTime.getNowUTC()
 
-        # Does the record exist?
-        record = yield self.directory.recordWithUID(uid)
-        # MOVE2WHO
-        # if record is None:
-            # The user has already been removed from the directory service.  We
-            # need to fashion a temporary, fake record
+        cuas = set((
+            "urn:uuid:{}".format(uid),
+            "urn:x-uid:{}".format(uid)
+        ))
 
-            # FIXME: probably want a more elegant way to accomplish this,
-            # since it requires the aggregate directory to examine these first:
-            # record = DirectoryRecord(self.directory, "users", uid, shortNames=(uid,), enabledForCalendaring=True)
-            # self.directory._tmpRecords["shortNames"][uid] = record
-            # self.directory._tmpRecords["uids"][uid] = record
-
-        # Override augments settings for this record
-        record.hasCalendars = True
-        record.hasContacts = True
-
-        cua = record.canonicalCalendarUserAddress()
-
         # See if calendar home is provisioned
         txn = self.store.newTransaction()
         storeCalHome = yield txn.calendarHomeWithUID(uid)
         calHomeProvisioned = storeCalHome is not None
 
-        # If in "completely" mode, unshare collections, remove notifications
-        if calHomeProvisioned and self.completely:
+        # Always, unshare collections, remove notifications
+        if calHomeProvisioned:
             yield self._cleanHome(txn, storeCalHome)
 
         yield txn.commit()
@@ -918,7 +980,7 @@
         count = 0
 
         if calHomeProvisioned:
-            count = yield self._cancelEvents(txn, uid, cua)
+            count = yield self._cancelEvents(txn, uid, cuas)
 
         # Remove empty calendar collections (and calendar home if no more
         # calendars)
@@ -930,7 +992,7 @@
         if self.proxies and not self.dryrun:
             if self.verbose:
                 print("Deleting any proxy assignments")
-            yield self._purgeProxyAssignments(self.store, record)
+            yield self._purgeProxyAssignments(self.store, uid)
 
         returnValue(count)
 
@@ -951,13 +1013,13 @@
 
         if not self.dryrun:
             yield storeCalHome.removeUnacceptedShares()
-            notificationHome = yield txn.notificationsWithUID(storeCalHome.uid())
+            notificationHome = yield txn.notificationsWithUID(storeCalHome.uid(), create=False)
             if notificationHome is not None:
                 yield notificationHome.remove()
 
 
     @inlineCallbacks
-    def _cancelEvents(self, txn, uid, cua):
+    def _cancelEvents(self, txn, uid, cuas):
 
         # Anything in the past is left alone
         whenString = self.when.getText()
@@ -983,73 +1045,59 @@
             txn = self.store.newTransaction(authz_uid=uid)
             storeCalHome = yield txn.calendarHomeWithUID(uid)
             calendar = yield storeCalHome.calendarWithName(calendarName)
-            childNames = []
+            allChildNames = []
+            futureChildNames = set()
 
-            if self.completely:
+            # Only purge owned calendars
+            if calendar.owned():
                 # all events
                 for childName in (yield calendar.listCalendarObjects()):
-                    childNames.append(childName)
-            else:
+                    allChildNames.append(childName)
+
                 # events matching filter
                 for childName, _ignore_childUid, _ignore_childType in (yield calendar.search(query_filter)):
-                    childNames.append(childName)
+                    futureChildNames.add(childName)
+
             yield txn.commit()
 
-            for childName in childNames:
+            for childName in allChildNames:
 
                 txn = self.store.newTransaction(authz_uid=uid)
                 storeCalHome = yield txn.calendarHomeWithUID(uid)
                 calendar = yield storeCalHome.calendarWithName(calendarName)
+                doScheduling = childName in futureChildNames
 
                 try:
                     childResource = yield calendar.calendarObjectWithName(childName)
 
-                    # Always delete inbox items
-                    if self.completely or calendar.isInbox():
-                        action = self.CANCELEVENT_SHOULD_DELETE
-                    else:
-                        event = yield childResource.componentForUser()
-                        action = self._cancelEvent(event, self.when, cua)
-
                     uri = "/calendars/__uids__/%s/%s/%s" % (storeCalHome.uid(), calendar.name(), childName)
-                    if action == self.CANCELEVENT_MODIFIED:
-                        if self.verbose:
-                            if self.dryrun:
-                                print("Would modify: %s" % (uri,))
-                            else:
-                                print("Modifying: %s" % (uri,))
-                        if not self.dryrun:
-                            yield childResource.setComponent(event)
-                        count += 1
+                    incrementCount = self.dryrun
+                    if self.verbose:
+                        if self.dryrun:
+                            print("Would delete%s: %s" % (" with scheduling" if doScheduling else "", uri,))
+                        else:
+                            print("Deleting%s: %s" % (" with scheduling" if doScheduling else "", uri,))
+                    if not self.dryrun:
+                        retry = False
+                        try:
+                            yield childResource.remove(implicitly=doScheduling)
+                            incrementCount = True
+                        except Exception, e:
+                            print("Exception deleting %s: %s" % (uri, str(e)))
+                            retry = True
 
-                    elif action == self.CANCELEVENT_SHOULD_DELETE:
-                        incrementCount = self.dryrun
-                        if self.verbose:
-                            if self.dryrun:
-                                print("Would delete: %s" % (uri,))
-                            else:
-                                print("Deleting: %s" % (uri,))
-                        if not self.dryrun:
-                            retry = False
+                        if retry and doScheduling:
+                            # Try again with implicit scheduling off
+                            print("Retrying deletion of %s with scheduling turned off" % (uri,))
                             try:
-                                yield childResource.remove(implicitly=self.doimplicit)
+                                yield childResource.remove(implicitly=False)
                                 incrementCount = True
                             except Exception, e:
-                                print("Exception deleting %s: %s" % (uri, str(e)))
-                                retry = True
+                                print("Still couldn't delete %s even with scheduling turned off: %s" % (uri, str(e)))
 
-                            if retry and self.doimplicit:
-                                # Try again with implicit scheduling off
-                                print("Retrying deletion of %s with implicit scheduling turned off" % (uri, childName))
-                                try:
-                                    yield childResource.remove(implicitly=False)
-                                    incrementCount = True
-                                except Exception, e:
-                                    print("Still couldn't delete %s even with implicit scheduling turned off: %s" % (uri, str(e)))
+                    if incrementCount:
+                        count += 1
 
-                        if incrementCount:
-                            count += 1
-
                     # Commit
                     yield txn.commit()
 
@@ -1095,8 +1143,22 @@
                         else:
                             print("Deleting calendar home")
                     if not self.dryrun:
-                        yield storeCalHome.remove()
+                        # Queue a job to delete the calendar home after any scheduling operations
+                        # are complete
+                        notBefore = (
+                            datetime.datetime.utcnow() +
+                            datetime.timedelta(seconds=config.AutomaticPurging.HomePurgeDelaySeconds)
+                        )
+                        yield txn.enqueue(
+                            PrincipalPurgeHomeWork,
+                            homeResourceID=storeCalHome.id(),
+                            notBefore=notBefore
+                        )
 
+                        # Also mark the home as purging so it won't be looked at again during
+                        # purge polling
+                        yield storeCalHome.purge()
+
             # Commit
             yield txn.commit()
 
@@ -1159,112 +1221,12 @@
 
         returnValue(count)
 
-    CANCELEVENT_SKIPPED = 1
-    CANCELEVENT_MODIFIED = 2
-    CANCELEVENT_NOT_MODIFIED = 3
-    CANCELEVENT_SHOULD_DELETE = 4
 
-    @classmethod
-    def _cancelEvent(cls, event, when, cua):
-        """
-        Modify a VEVENT such that all future occurrences are removed
-
-        @param event: the event to modify
-        @type event: L{twistedcaldav.ical.Component}
-
-        @param when: the cutoff date (anything after which is removed)
-        @type when: DateTime
-
-        @param cua: Calendar User Address of principal being purged, to compare
-            to see if it's the organizer of the event or just an attendee
-        @type cua: string
-
-        Assumes that event does not occur entirely in the past.
-
-        @return: one of the 4 constants above to indicate what action to take
-        """
-
-        whenDate = when.duplicate()
-        whenDate.setDateOnly(True)
-
-        # Only process VEVENT
-        if event.mainType() != "VEVENT":
-            return cls.CANCELEVENT_SKIPPED
-
-        main = event.mainComponent()
-
-        # Anything completely in the future is deleted
-        dtstart = main.getStartDateUTC()
-        isDateTime = not dtstart.isDateOnly()
-        if dtstart > when:
-            return cls.CANCELEVENT_SHOULD_DELETE
-
-        organizer = main.getOrganizer()
-
-        # Non-meetings are deleted
-        if organizer is None:
-            return cls.CANCELEVENT_SHOULD_DELETE
-
-        # Meetings which cua is merely an attendee are deleted (thus implicitly
-        # declined)
-        # FIXME: I think we want to decline anything after the cut-off, not delete
-        # the whole event.
-        if organizer != cua:
-            return cls.CANCELEVENT_SHOULD_DELETE
-
-        dirty = False
-
-        # Set the UNTIL on RRULE to cease at the cutoff
-        if main.hasProperty("RRULE"):
-            for rrule in main.properties("RRULE"):
-                rrule = rrule.value()
-                if rrule.getUseCount():
-                    rrule.setUseCount(False)
-
-                rrule.setUseUntil(True)
-                if isDateTime:
-                    rrule.setUntil(when)
-                else:
-                    rrule.setUntil(whenDate)
-                dirty = True
-
-        # Remove any EXDATEs and RDATEs beyond the cutoff
-        for dateType in ("EXDATE", "RDATE"):
-            if main.hasProperty(dateType):
-                for exdate_rdate in main.properties(dateType):
-                    newValues = []
-                    for value in exdate_rdate.value():
-                        if value.getValue() < when:
-                            newValues.append(value)
-                        else:
-                            exdate_rdate.value().remove(value)
-                            dirty = True
-                    if not newValues:
-                        main.removeProperty(exdate_rdate)
-                        dirty = True
-
-        # Remove any overridden components beyond the cutoff
-        for component in tuple(event.subcomponents()):
-            if component.name() == "VEVENT":
-                dtstart = component.getStartDateUTC()
-                remove = False
-                if dtstart > when:
-                    remove = True
-                if remove:
-                    event.removeComponent(component)
-                    dirty = True
-
-        if dirty:
-            return cls.CANCELEVENT_MODIFIED
-        else:
-            return cls.CANCELEVENT_NOT_MODIFIED
-
-
     @inlineCallbacks
-    def _purgeProxyAssignments(self, store, record):
+    def _purgeProxyAssignments(self, store, uid):
 
         txn = store.newTransaction()
         for readWrite in (True, False):
-            yield txn.removeDelegates(record.uid, readWrite)
-            yield txn.removeDelegateGroupss(record.uid, readWrite)
+            yield txn.removeDelegates(uid, readWrite)
+            yield txn.removeDelegateGroups(uid, readWrite)
         yield txn.commit()

Modified: CalendarServer/trunk/calendarserver/tools/test/test_purge.py
===================================================================
--- CalendarServer/trunk/calendarserver/tools/test/test_purge.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/calendarserver/tools/test/test_purge.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -15,22 +15,27 @@
 ##
 
 
-from calendarserver.tools.purge import PurgePrincipalService
-from twistedcaldav.config import config
-from twistedcaldav.ical import Component
-from twistedcaldav.test.util import StoreTestCase
+from calendarserver.tools.purge import PurgePrincipalService, \
+    PrincipalPurgeHomeWork, PrincipalPurgePollingWork, PrincipalPurgeCheckWork, \
+    PrincipalPurgeWork
 
 from pycalendar.datetime import DateTime
-from pycalendar.timezone import Timezone
 
-from twisted.internet.defer import inlineCallbacks
-from txdav.common.datastore.test.util import populateCalendarsFrom
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, Deferred
+
+from twistedcaldav.config import config
+from twistedcaldav.test.util import StoreTestCase
+
 from txdav.common.datastore.sql_tables import _BIND_MODE_WRITE
+from txdav.common.datastore.test.util import populateCalendarsFrom
 
 from txweb2.http_headers import MimeType
 
+import datetime
 
 
+
 future = DateTime.getNowUTC()
 future.offsetDay(1)
 future = future.getText()
@@ -61,9 +66,9 @@
 SUMMARY:Organizer
 DTSTART:%s
 DURATION:PT1H
-ORGANIZER:urn:uuid:E9E78C86-4829-4520-A35D-70DDADAB2092
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:E9E78C86-4829-4520-A35D-70DDADAB2092
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
+ORGANIZER:urn:uuid:10000000-0000-0000-0000-000000000001
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:10000000-0000-0000-0000-000000000001
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:10000000-0000-0000-0000-000000000002
 END:VEVENT
 END:VCALENDAR
 """.replace("\n", "\r\n") % (future,)
@@ -76,9 +81,9 @@
 SUMMARY:Attendee
 DTSTART:%s
 DURATION:PT1H
-ORGANIZER:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:E9E78C86-4829-4520-A35D-70DDADAB2092
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
+ORGANIZER:urn:uuid:10000000-0000-0000-0000-000000000002
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:10000000-0000-0000-0000-000000000001
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:10000000-0000-0000-0000-000000000002
 END:VEVENT
 END:VCALENDAR
 """.replace("\n", "\r\n") % (future,)
@@ -119,9 +124,9 @@
 SUMMARY:Organizer
 DTSTART:%s
 DURATION:PT1H
-ORGANIZER:urn:uuid:1CB4378B-DD76-462D-B4D4-BD131FE89243
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:1CB4378B-DD76-462D-B4D4-BD131FE89243
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
+ORGANIZER:urn:uuid:F0000000-0000-0000-0000-000000000001
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:F0000000-0000-0000-0000-000000000001
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:10000000-0000-0000-0000-000000000002
 END:VEVENT
 END:VCALENDAR
 """.replace("\n", "\r\n") % (future,)
@@ -134,9 +139,9 @@
 SUMMARY:Attendee
 DTSTART:%s
 DURATION:PT1H
-ORGANIZER:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:1CB4378B-DD76-462D-B4D4-BD131FE89243
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
+ORGANIZER:urn:uuid:10000000-0000-0000-0000-000000000002
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:F0000000-0000-0000-0000-000000000001
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:10000000-0000-0000-0000-000000000002
 END:VEVENT
 END:VCALENDAR
 """.replace("\n", "\r\n") % (future,)
@@ -150,9 +155,9 @@
 DTSTART:%s
 DURATION:PT1H
 RRULE:FREQ=DAILY;COUNT=400
-ORGANIZER:urn:uuid:1CB4378B-DD76-462D-B4D4-BD131FE89243
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:1CB4378B-DD76-462D-B4D4-BD131FE89243
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
+ORGANIZER:urn:uuid:F0000000-0000-0000-0000-000000000001
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:F0000000-0000-0000-0000-000000000001
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:10000000-0000-0000-0000-000000000002
 END:VEVENT
 END:VCALENDAR
 """.replace("\n", "\r\n") % (past,)
@@ -180,10 +185,10 @@
 SUMMARY:Organizer
 DTSTART:%s
 DURATION:PT1H
-ORGANIZER:urn:uuid:767F9EB0-8A58-4F61-8163-4BE0BB72B873
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:767F9EB0-8A58-4F61-8163-4BE0BB72B873
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:42EB074A-F859-4E8F-A4D0-7F0ADCB73D87
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
+ORGANIZER:urn:uuid:F0000000-0000-0000-0000-000000000001
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:F0000000-0000-0000-0000-000000000001
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:F0000000-0000-0000-0000-000000000002
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:10000000-0000-0000-0000-000000000002
 END:VEVENT
 END:VCALENDAR
 """.replace("\n", "\r\n") % (future,)
@@ -197,10 +202,10 @@
 SUMMARY:Attendee
 DTSTART:%s
 DURATION:PT1H
-ORGANIZER:urn:uuid:42EB074A-F859-4E8F-A4D0-7F0ADCB73D87
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:767F9EB0-8A58-4F61-8163-4BE0BB72B873
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:42EB074A-F859-4E8F-A4D0-7F0ADCB73D87
+ORGANIZER:urn:uuid:F0000000-0000-0000-0000-000000000002
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:F0000000-0000-0000-0000-000000000002
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:10000000-0000-0000-0000-000000000001
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:F0000000-0000-0000-0000-000000000001
 END:VEVENT
 END:VCALENDAR
 """.replace("\n", "\r\n") % (future,)
@@ -213,531 +218,16 @@
 SUMMARY:2 non-existent attendees
 DTSTART:%s
 DURATION:PT1H
-ORGANIZER:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:767F9EB0-8A58-4F61-8163-4BE0BB72B873
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:42EB074A-F859-4E8F-A4D0-7F0ADCB73D87
+ORGANIZER:urn:uuid:10000000-0000-0000-0000-000000000002
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:10000000-0000-0000-0000-000000000002
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:F0000000-0000-0000-0000-000000000001
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:F0000000-0000-0000-0000-000000000002
 END:VEVENT
 END:VCALENDAR
 """.replace("\n", "\r\n") % (future,)
 
 
 
-class CancelEventTestCase(StoreTestCase):
-
-    def test_cancelRepeating(self):
-        # A repeating event where purged CUA is organizer
-        event = Component.fromString(REPEATING_1_ICS_BEFORE)
-        action = PurgePrincipalService._cancelEvent(event, DateTime(2010, 12, 6, 12, 0, 0, Timezone(utc=True)),
-            "urn:uuid:0F168477-CF3D-45D3-AE60-9875EA02C4D1")
-        self.assertEquals(action, PurgePrincipalService.CANCELEVENT_MODIFIED)
-        self.assertEquals(str(event), REPEATING_1_ICS_AFTER)
-
-
-    def test_cancelAllDayRepeating(self):
-        # A repeating All Day event where purged CUA is organizer
-        event = Component.fromString(REPEATING_2_ICS_BEFORE)
-        action = PurgePrincipalService._cancelEvent(event, DateTime(2010, 12, 6, 12, 0, 0, Timezone(utc=True)),
-            "urn:uuid:0F168477-CF3D-45D3-AE60-9875EA02C4D1")
-        self.assertEquals(action, PurgePrincipalService.CANCELEVENT_MODIFIED)
-        self.assertEquals(str(event), REPEATING_2_ICS_AFTER)
-
-
-    def test_cancelFutureEvent(self):
-        # A future event
-        event = Component.fromString(FUTURE_EVENT_ICS)
-        action = PurgePrincipalService._cancelEvent(event, DateTime(2010, 12, 6, 12, 0, 0, Timezone(utc=True)),
-            "urn:uuid:0F168477-CF3D-45D3-AE60-9875EA02C4D1")
-        self.assertEquals(action, PurgePrincipalService.CANCELEVENT_SHOULD_DELETE)
-
-
-    def test_cancelNonMeeting(self):
-        # A repeating non-meeting event
-        event = Component.fromString(REPEATING_NON_MEETING_ICS)
-        action = PurgePrincipalService._cancelEvent(event, DateTime(2010, 12, 6, 12, 0, 0, Timezone(utc=True)),
-            "urn:uuid:0F168477-CF3D-45D3-AE60-9875EA02C4D1")
-        self.assertEquals(action, PurgePrincipalService.CANCELEVENT_SHOULD_DELETE)
-
-
-    def test_cancelAsAttendee(self):
-        # A repeating meeting event where purged CUA is an attendee
-        event = Component.fromString(REPEATING_ATTENDEE_MEETING_ICS)
-        action = PurgePrincipalService._cancelEvent(event, DateTime(2010, 12, 6, 12, 0, 0, Timezone(utc=True)),
-            "urn:uuid:0F168477-CF3D-45D3-AE60-9875EA02C4D1")
-        self.assertEquals(action, PurgePrincipalService.CANCELEVENT_SHOULD_DELETE)
-
-
-    def test_cancelAsAttendeeOccurrence(self):
-        # A repeating meeting occurrence with no master, where purged CUA is
-        # an attendee
-        event = Component.fromString(INVITED_TO_OCCURRENCE_ICS)
-        action = PurgePrincipalService._cancelEvent(event, DateTime(2010, 12, 6, 12, 0, 0, Timezone(utc=True)),
-            "urn:uuid:9DC04A71-E6DD-11DF-9492-0800200C9A66")
-        self.assertEquals(action, PurgePrincipalService.CANCELEVENT_SHOULD_DELETE)
-
-
-    def test_cancelAsAttendeeMultipleOccurrences(self):
-        # Multiple meeting occurrences with no master, where purged CUA is
-        # an attendee
-        event = Component.fromString(INVITED_TO_MULTIPLE_OCCURRENCES_ICS)
-        action = PurgePrincipalService._cancelEvent(event, DateTime(2010, 12, 6, 12, 0, 0, Timezone(utc=True)),
-            "urn:uuid:9DC04A71-E6DD-11DF-9492-0800200C9A66")
-        self.assertEquals(action, PurgePrincipalService.CANCELEVENT_SHOULD_DELETE)
-
-# This event begins on Nov 30, 2010, has two EXDATES (Dec 3 and 9), and has two
-# overridden instances (Dec 4 and 11).  The Dec 11 one will be removed since
-# the cutoff date for this test is Dec 6.
-
-REPEATING_1_ICS_BEFORE = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//Apple Inc.//iCal 4.0.4//EN
-BEGIN:VTIMEZONE
-TZID:US/Pacific
-BEGIN:DAYLIGHT
-DTSTART:20070311T020000
-RRULE:FREQ=YEARLY;BYDAY=2SU;BYMONTH=3
-TZNAME:PDT
-TZOFFSETFROM:-0800
-TZOFFSETTO:-0700
-END:DAYLIGHT
-BEGIN:STANDARD
-DTSTART:20071104T020000
-RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=11
-TZNAME:PST
-TZOFFSETFROM:-0700
-TZOFFSETTO:-0800
-END:STANDARD
-END:VTIMEZONE
-BEGIN:VEVENT
-UID:59E260E3-1644-4BDF-BBC6-6130B0C3A520
-DTSTART;TZID=US/Pacific:20101130T100000
-DTEND;TZID=US/Pacific:20101130T110000
-ATTENDEE;CN=Test User;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED;ROLE=REQ-PARTICI
- PANT;SCHEDULE-STATUS=1.2:urn:uuid:3FF02D2B-07A3-4420-8570-7B7C7D07F08A
-ATTENDEE;CN=Purge Test;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:0F1684
- 77-CF3D-45D3-AE60-9875EA02C4D1
-CREATED:20101203T184815Z
-DTSTAMP:20101203T185019Z
-EXDATE;TZID=US/Pacific:20101203T100000
-EXDATE;TZID=US/Pacific:20101209T100000
-ORGANIZER;CN=Purge Test:urn:uuid:0F168477-CF3D-45D3-AE60-9875EA02C4D1
-RRULE:FREQ=DAILY;COUNT=400
-SEQUENCE:4
-SUMMARY:Repeating 1
-TRANSP:OPAQUE
-END:VEVENT
-BEGIN:VEVENT
-UID:59E260E3-1644-4BDF-BBC6-6130B0C3A520
-RECURRENCE-ID;TZID=US/Pacific:20101204T100000
-DTSTART;TZID=US/Pacific:20101204T120000
-DTEND;TZID=US/Pacific:20101204T130000
-ATTENDEE;CN=Test User;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED;ROLE=REQ-PARTICI
- PANT;SCHEDULE-STATUS=2.0:urn:uuid:3FF02D2B-07A3-4420-8570-7B7C7D07F08A
-ATTENDEE;CN=Purge Test;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:0F1684
- 77-CF3D-45D3-AE60-9875EA02C4D1
-CREATED:20101203T184815Z
-DTSTAMP:20101203T185027Z
-ORGANIZER;CN=Purge Test:urn:uuid:0F168477-CF3D-45D3-AE60-9875EA02C4D1
-SEQUENCE:6
-SUMMARY:Repeating 1
-TRANSP:OPAQUE
-END:VEVENT
-BEGIN:VEVENT
-UID:59E260E3-1644-4BDF-BBC6-6130B0C3A520
-RECURRENCE-ID;TZID=US/Pacific:20101211T100000
-DTSTART;TZID=US/Pacific:20101211T120000
-DTEND;TZID=US/Pacific:20101211T130000
-ATTENDEE;CN=Test User;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED;ROLE=REQ-PARTICI
- PANT;SCHEDULE-STATUS=2.0:urn:uuid:3FF02D2B-07A3-4420-8570-7B7C7D07F08A
-ATTENDEE;CN=Purge Test;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:0F1684
- 77-CF3D-45D3-AE60-9875EA02C4D1
-CREATED:20101203T184815Z
-DTSTAMP:20101203T185038Z
-ORGANIZER;CN=Purge Test:urn:uuid:0F168477-CF3D-45D3-AE60-9875EA02C4D1
-SEQUENCE:6
-SUMMARY:Repeating 1
-TRANSP:OPAQUE
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n")
-
-REPEATING_1_ICS_AFTER = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//Apple Inc.//iCal 4.0.4//EN
-BEGIN:VTIMEZONE
-TZID:US/Pacific
-BEGIN:DAYLIGHT
-DTSTART:20070311T020000
-RRULE:FREQ=YEARLY;BYDAY=2SU;BYMONTH=3
-TZNAME:PDT
-TZOFFSETFROM:-0800
-TZOFFSETTO:-0700
-END:DAYLIGHT
-BEGIN:STANDARD
-DTSTART:20071104T020000
-RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=11
-TZNAME:PST
-TZOFFSETFROM:-0700
-TZOFFSETTO:-0800
-END:STANDARD
-END:VTIMEZONE
-BEGIN:VEVENT
-UID:59E260E3-1644-4BDF-BBC6-6130B0C3A520
-DTSTART;TZID=US/Pacific:20101130T100000
-DTEND;TZID=US/Pacific:20101130T110000
-ATTENDEE;CN=Test User;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED;ROLE=REQ-PARTICI
- PANT;SCHEDULE-STATUS=1.2:urn:uuid:3FF02D2B-07A3-4420-8570-7B7C7D07F08A
-ATTENDEE;CN=Purge Test;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:0F1684
- 77-CF3D-45D3-AE60-9875EA02C4D1
-CREATED:20101203T184815Z
-DTSTAMP:20101203T185019Z
-EXDATE;TZID=US/Pacific:20101203T100000
-ORGANIZER;CN=Purge Test:urn:uuid:0F168477-CF3D-45D3-AE60-9875EA02C4D1
-RRULE:FREQ=DAILY;UNTIL=20101206T120000Z
-SEQUENCE:4
-SUMMARY:Repeating 1
-TRANSP:OPAQUE
-END:VEVENT
-BEGIN:VEVENT
-UID:59E260E3-1644-4BDF-BBC6-6130B0C3A520
-RECURRENCE-ID;TZID=US/Pacific:20101204T100000
-DTSTART;TZID=US/Pacific:20101204T120000
-DTEND;TZID=US/Pacific:20101204T130000
-ATTENDEE;CN=Test User;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED;ROLE=REQ-PARTICI
- PANT;SCHEDULE-STATUS=2.0:urn:uuid:3FF02D2B-07A3-4420-8570-7B7C7D07F08A
-ATTENDEE;CN=Purge Test;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:0F1684
- 77-CF3D-45D3-AE60-9875EA02C4D1
-CREATED:20101203T184815Z
-DTSTAMP:20101203T185027Z
-ORGANIZER;CN=Purge Test:urn:uuid:0F168477-CF3D-45D3-AE60-9875EA02C4D1
-SEQUENCE:6
-SUMMARY:Repeating 1
-TRANSP:OPAQUE
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n")
-
-# This event is similar to the "Repeating 1" event above except this one is an
-# all-day event.
-
-REPEATING_2_ICS_BEFORE = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//Apple Inc.//iCal 4.0.4//EN
-BEGIN:VEVENT
-UID:53BA0EA4-05B1-4E89-BD1E-8397F071FD6A
-DTSTART;VALUE=DATE:20101130
-DTEND;VALUE=DATE:20101201
-ATTENDEE;CN=Test User;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED;ROLE=REQ-PARTICI
- PANT;SCHEDULE-STATUS=1.2:urn:uuid:3FF02D2B-07A3-4420-8570-7B7C7D07F08A
-ATTENDEE;CN=Purge Test;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:0F1684
- 77-CF3D-45D3-AE60-9875EA02C4D1
-CREATED:20101203T203510Z
-DTSTAMP:20101203T203603Z
-EXDATE;VALUE=DATE:20101203
-EXDATE;VALUE=DATE:20101209
-ORGANIZER;CN=Purge Test:urn:uuid:0F168477-CF3D-45D3-AE60-9875EA02C4D1
-RRULE:FREQ=DAILY;COUNT=400
-SEQUENCE:5
-SUMMARY:All Day
-TRANSP:TRANSPARENT
-END:VEVENT
-BEGIN:VEVENT
-UID:53BA0EA4-05B1-4E89-BD1E-8397F071FD6A
-RECURRENCE-ID;VALUE=DATE:20101211
-DTSTART;VALUE=DATE:20101211
-DTEND;VALUE=DATE:20101212
-ATTENDEE;CN=Purge Test;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:0F1684
- 77-CF3D-45D3-AE60-9875EA02C4D1
-ATTENDEE;CN=Test User;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED;ROLE=REQ-PARTICI
- PANT;SCHEDULE-STATUS=1.2:urn:uuid:3FF02D2B-07A3-4420-8570-7B7C7D07F08A
-CREATED:20101203T203510Z
-DTSTAMP:20101203T203631Z
-ORGANIZER;CN=Purge Test:urn:uuid:0F168477-CF3D-45D3-AE60-9875EA02C4D1
-SEQUENCE:6
-SUMMARY:Modified Title
-TRANSP:TRANSPARENT
-END:VEVENT
-BEGIN:VEVENT
-UID:53BA0EA4-05B1-4E89-BD1E-8397F071FD6A
-RECURRENCE-ID;VALUE=DATE:20101204
-DTSTART;VALUE=DATE:20101204
-DTEND;VALUE=DATE:20101205
-ATTENDEE;CN=Test User;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED;ROLE=REQ-PARTICI
- PANT;SCHEDULE-STATUS=1.2:urn:uuid:3FF02D2B-07A3-4420-8570-7B7C7D07F08A
-ATTENDEE;CN=Purge Test;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:0F1684
- 77-CF3D-45D3-AE60-9875EA02C4D1
-CREATED:20101203T203510Z
-DTSTAMP:20101203T203618Z
-ORGANIZER;CN=Purge Test:urn:uuid:0F168477-CF3D-45D3-AE60-9875EA02C4D1
-SEQUENCE:7
-SUMMARY:Modified Title
-TRANSP:TRANSPARENT
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n")
-
-REPEATING_2_ICS_AFTER = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//Apple Inc.//iCal 4.0.4//EN
-BEGIN:VEVENT
-UID:53BA0EA4-05B1-4E89-BD1E-8397F071FD6A
-DTSTART;VALUE=DATE:20101130
-DTEND;VALUE=DATE:20101201
-ATTENDEE;CN=Test User;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED;ROLE=REQ-PARTICI
- PANT;SCHEDULE-STATUS=1.2:urn:uuid:3FF02D2B-07A3-4420-8570-7B7C7D07F08A
-ATTENDEE;CN=Purge Test;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:0F1684
- 77-CF3D-45D3-AE60-9875EA02C4D1
-CREATED:20101203T203510Z
-DTSTAMP:20101203T203603Z
-EXDATE;VALUE=DATE:20101203
-ORGANIZER;CN=Purge Test:urn:uuid:0F168477-CF3D-45D3-AE60-9875EA02C4D1
-RRULE:FREQ=DAILY;UNTIL=20101206
-SEQUENCE:5
-SUMMARY:All Day
-TRANSP:TRANSPARENT
-END:VEVENT
-BEGIN:VEVENT
-UID:53BA0EA4-05B1-4E89-BD1E-8397F071FD6A
-RECURRENCE-ID;VALUE=DATE:20101204
-DTSTART;VALUE=DATE:20101204
-DTEND;VALUE=DATE:20101205
-ATTENDEE;CN=Test User;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED;ROLE=REQ-PARTICI
- PANT;SCHEDULE-STATUS=1.2:urn:uuid:3FF02D2B-07A3-4420-8570-7B7C7D07F08A
-ATTENDEE;CN=Purge Test;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:0F1684
- 77-CF3D-45D3-AE60-9875EA02C4D1
-CREATED:20101203T203510Z
-DTSTAMP:20101203T203618Z
-ORGANIZER;CN=Purge Test:urn:uuid:0F168477-CF3D-45D3-AE60-9875EA02C4D1
-SEQUENCE:7
-SUMMARY:Modified Title
-TRANSP:TRANSPARENT
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n")
-
-
-# This event is on Dec 8 (in the future compared to Dec 6) and should be flagged
-# as needing to be deleted
-
-FUTURE_EVENT_ICS = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//Apple Inc.//iCal 4.0.4//EN
-BEGIN:VTIMEZONE
-TZID:US/Pacific
-BEGIN:DAYLIGHT
-DTSTART:20070311T020000
-RRULE:FREQ=YEARLY;BYDAY=2SU;BYMONTH=3
-TZNAME:PDT
-TZOFFSETFROM:-0800
-TZOFFSETTO:-0700
-END:DAYLIGHT
-BEGIN:STANDARD
-DTSTART:20071104T020000
-RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=11
-TZNAME:PST
-TZOFFSETFROM:-0700
-TZOFFSETTO:-0800
-END:STANDARD
-END:VTIMEZONE
-BEGIN:VEVENT
-UID:97B243D3-D252-4034-AA6D-9AE34E063991
-DTSTART;TZID=US/Pacific:20101208T091500
-DTEND;TZID=US/Pacific:20101208T101500
-CREATED:20101203T172929Z
-DTSTAMP:20101203T172932Z
-SEQUENCE:2
-SUMMARY:Future event single
-TRANSP:OPAQUE
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n")
-
-REPEATING_NON_MEETING_ICS = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//Apple Inc.//iCal 4.0.4//EN
-BEGIN:VTIMEZONE
-TZID:US/Pacific
-BEGIN:DAYLIGHT
-DTSTART:20070311T020000
-RRULE:FREQ=YEARLY;BYDAY=2SU;BYMONTH=3
-TZNAME:PDT
-TZOFFSETFROM:-0800
-TZOFFSETTO:-0700
-END:DAYLIGHT
-BEGIN:STANDARD
-DTSTART:20071104T020000
-RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=11
-TZNAME:PST
-TZOFFSETFROM:-0700
-TZOFFSETTO:-0800
-END:STANDARD
-END:VTIMEZONE
-BEGIN:VEVENT
-UID:4E4D0C8C-6546-4777-9BF5-AD629C05E7D5
-DTSTART;TZID=US/Pacific:20101130T110000
-DTEND;TZID=US/Pacific:20101130T120000
-CREATED:20101203T204353Z
-DTSTAMP:20101203T204409Z
-RRULE:FREQ=DAILY;COUNT=400
-SEQUENCE:3
-SUMMARY:Repeating non meeting
-TRANSP:OPAQUE
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n")
-
-REPEATING_ATTENDEE_MEETING_ICS = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VTIMEZONE
-TZID:US/Pacific
-BEGIN:DAYLIGHT
-DTSTART:20070311T020000
-RRULE:FREQ=YEARLY;BYDAY=2SU;BYMONTH=3
-TZNAME:PDT
-TZOFFSETFROM:-0800
-TZOFFSETTO:-0700
-END:DAYLIGHT
-BEGIN:STANDARD
-DTSTART:20071104T020000
-RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=11
-TZNAME:PST
-TZOFFSETFROM:-0700
-TZOFFSETTO:-0800
-END:STANDARD
-END:VTIMEZONE
-BEGIN:VEVENT
-UID:111A679F-EF8E-4CA5-9262-7C805E2C184D
-DTSTART;TZID=US/Pacific:20101130T120000
-DTEND;TZID=US/Pacific:20101130T130000
-ATTENDEE;CN=Test User;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:3FF02D2
- B-07A3-4420-8570-7B7C7D07F08A
-ATTENDEE;CN=Purge Test;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED;ROLE=REQ-PARTIC
- IPANT:urn:uuid:0F168477-CF3D-45D3-AE60-9875EA02C4D1
-CREATED:20101203T204908Z
-DTSTAMP:20101203T204927Z
-ORGANIZER;CN=Test User;SCHEDULE-STATUS=1.2:urn:uuid:3FF02D2B-07A3-4420-857
- 0-7B7C7D07F08A
-RRULE:FREQ=DAILY;COUNT=400
-SEQUENCE:4
-SUMMARY:As an attendee
-TRANSP:OPAQUE
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n")
-
-INVITED_TO_OCCURRENCE_ICS = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-METHOD:REQUEST
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VTIMEZONE
-TZID:America/Los_Angeles
-BEGIN:DAYLIGHT
-DTSTART:20070311T020000
-RRULE:FREQ=YEARLY;BYDAY=2SU;BYMONTH=3
-TZNAME:PDT
-TZOFFSETFROM:-0800
-TZOFFSETTO:-0700
-END:DAYLIGHT
-BEGIN:STANDARD
-DTSTART:20071104T020000
-RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=11
-TZNAME:PST
-TZOFFSETFROM:-0700
-TZOFFSETTO:-0800
-END:STANDARD
-END:VTIMEZONE
-BEGIN:VEVENT
-UID:44A391CF-52F5-46B4-B35A-E000E3002084
-RECURRENCE-ID;TZID=America/Los_Angeles:20111103T150000
-DTSTART;TZID=America/Los_Angeles:20111103T150000
-DTEND;TZID=America/Los_Angeles:20111103T170000
-ATTENDEE;CN=Betty Test;CUTYPE=INDIVIDUAL;EMAIL=betty at example.com;PARTSTAT=
- NEEDS-ACTION;ROLE=REQ-PARTICIPANT;RSVP=TRUE:urn:uuid:9DC04A71-E6DD-11DF-94
- 92-0800200C9A66
-ATTENDEE;CN=Amanda Test;CUTYPE=INDIVIDUAL;EMAIL=amanda at example.com;PARTSTA
- T=ACCEPTED:urn:uuid:9DC04A70-E6DD-11DF-9492-0800200C9A66
-CREATED:20111101T205355Z
-DTSTAMP:20111101T205506Z
-ORGANIZER;CN=Amanda Test;EMAIL=amanda at example.com:urn:uuid:9DC04A70-E6DD-1
- 1DF-9492-0800200C9A66
-SEQUENCE:5
-SUMMARY:Repeating
-TRANSP:OPAQUE
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n")
-
-
-INVITED_TO_MULTIPLE_OCCURRENCES_ICS = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-METHOD:REQUEST
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VTIMEZONE
-TZID:America/Los_Angeles
-BEGIN:DAYLIGHT
-DTSTART:20070311T020000
-RRULE:FREQ=YEARLY;BYDAY=2SU;BYMONTH=3
-TZNAME:PDT
-TZOFFSETFROM:-0800
-TZOFFSETTO:-0700
-END:DAYLIGHT
-BEGIN:STANDARD
-DTSTART:20071104T020000
-RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=11
-TZNAME:PST
-TZOFFSETFROM:-0700
-TZOFFSETTO:-0800
-END:STANDARD
-END:VTIMEZONE
-BEGIN:VEVENT
-UID:44A391CF-52F5-46B4-B35A-E000E3002084
-RECURRENCE-ID;TZID=America/Los_Angeles:20111103T150000
-DTSTART;TZID=America/Los_Angeles:20111103T150000
-DTEND;TZID=America/Los_Angeles:20111103T170000
-ATTENDEE;CN=Betty Test;CUTYPE=INDIVIDUAL;EMAIL=betty at example.com;PARTSTAT=
- NEEDS-ACTION;ROLE=REQ-PARTICIPANT;RSVP=TRUE:urn:uuid:9DC04A71-E6DD-11DF-94
- 92-0800200C9A66
-ATTENDEE;CN=Amanda Test;CUTYPE=INDIVIDUAL;EMAIL=amanda at example.com;PARTSTA
- T=ACCEPTED:urn:uuid:9DC04A70-E6DD-11DF-9492-0800200C9A66
-CREATED:20111101T205355Z
-DTSTAMP:20111101T205506Z
-ORGANIZER;CN=Amanda Test;EMAIL=amanda at example.com:urn:uuid:9DC04A70-E6DD-1
- 1DF-9492-0800200C9A66
-SEQUENCE:5
-SUMMARY:Repeating
-TRANSP:OPAQUE
-END:VEVENT
-BEGIN:VEVENT
-ATTENDEE;CN="Amanda Test";CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:9
- DC04A70-E6DD-11DF-9492-0800200C9A66
-ATTENDEE;CN="Betty Test";CUTYPE=INDIVIDUAL;EMAIL="betty at example.com";PAR
- TSTAT=NEEDS-ACTION;ROLE=REQ-PARTICIPANT;RSVP=TRUE:mailto:betty at example.c
- om
-DTEND;TZID=America/Los_Angeles:20111105T170000
-TRANSP:OPAQUE
-ORGANIZER;CN="Amanda Test":urn:uuid:9DC04A70-E6DD-11DF-9492-0800200C9A66
-UID:44A391CF-52F5-46B4-B35A-E000E3002084
-DTSTAMP:20111102T162426Z
-SEQUENCE:5
-RECURRENCE-ID;TZID=America/Los_Angeles:20111105T150000
-SUMMARY:Repeating
-DTSTART;TZID=America/Los_Angeles:20111105T150000
-CREATED:20111101T205355Z
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n")
-
-
 ATTACHMENT_ICS = """BEGIN:VCALENDAR
 VERSION:2.0
 PRODID:-//Apple Inc.//iCal 4.0.1//EN
@@ -785,21 +275,22 @@
 DTSTART:%s
 DURATION:PT1H
 RRULE:FREQ=DAILY;COUNT=400
-ORGANIZER:urn:uuid:6423F94A-6B76-4A3A-815B-D52CFD77935D
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:6423F94A-6B76-4A3A-815B-D52CFD77935D
-ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:uuid:291C2C29-B663-4342-8EA1-A055E6A04D65
+ORGANIZER:urn:x-uid:user01
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:x-uid:user01
+ATTENDEE;CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:urn:x-uid:user02
 DTSTAMP:20100303T195203Z
 END:VEVENT
 END:VCALENDAR
 """.replace("\n", "\r\n") % (past,)
 
 
+
 class PurgePrincipalTests(StoreTestCase):
     """
     Tests for purging the data belonging to a given principal
     """
-    uid = "6423F94A-6B76-4A3A-815B-D52CFD77935D"
-    uid2 = "37DB0C90-4DB1-4932-BC69-3DAB66F374F5"
+    uid = "user01"
+    uid2 = "user02"
 
     metadata = {
         "accessMode": "PUBLIC",
@@ -814,11 +305,14 @@
             "calendar1" : {
                 "attachment.ics" : (ATTACHMENT_ICS, metadata,),
                 "organizer.ics" : (REPEATING_PUBLIC_EVENT_ORGANIZER_ICS, metadata,),
-            }
+            },
+            "inbox": {},
         },
         uid2 : {
             "calendar2" : {
-            }
+                "attendee.ics" : (REPEATING_PUBLIC_EVENT_ORGANIZER_ICS, metadata,),
+            },
+            "inbox": {},
         },
     }
 
@@ -857,7 +351,15 @@
         self.assertNotEquals(calendar1, None)
         yield txn.commit()
 
+        # Now remove user01
+        yield self.directory.removeRecords((self.uid,))
+        self.patch(config.Scheduling.Options.WorkQueues, "Enabled", False)
+        self.patch(config.AutomaticPurging, "PollingIntervalSeconds", -1)
+        self.patch(config.AutomaticPurging, "CheckStaggerSeconds", 1)
+        self.patch(config.AutomaticPurging, "PurgeIntervalSeconds", 3)
+        self.patch(config.AutomaticPurging, "HomePurgeDelaySeconds", 1)
 
+
     @inlineCallbacks
     def populate(self):
         yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
@@ -871,23 +373,37 @@
         """
 
         # Now you see it
-        txn = self._sqlCalendarStore.newTransaction()
-        home = yield txn.calendarHomeWithUID(self.uid)
+        home = yield self.homeUnderTest(name=self.uid)
         self.assertNotEquals(home, None)
-        yield txn.commit()
 
+        calobj2 = yield self.calendarObjectUnderTest(name="attendee.ics", calendar_name="calendar2", home=self.uid2)
+        comp = yield calobj2.componentForUser()
+        self.assertTrue("STATUS:CANCELLED" not in str(comp))
+        self.assertTrue(";UNTIL=" not in str(comp))
+        yield self.commit()
+
         count = (yield PurgePrincipalService.purgeUIDs(self.storeUnderTest(), self.directory,
-            (self.uid,), verbose=False, proxies=False, completely=True))
+            (self.uid,), verbose=False, proxies=False))
         self.assertEquals(count, 2) # 2 events
 
+        # Wait for queue to process
+        while(True):
+            txn = self.transactionUnderTest()
+            work = yield PrincipalPurgeHomeWork.all(txn)
+            yield self.commit()
+            if len(work) == 0:
+                break
+            d = Deferred()
+            reactor.callLater(1, lambda : d.callback(None))
+            yield d
+
         # Now you don't
-        txn = self._sqlCalendarStore.newTransaction()
-        home = yield txn.calendarHomeWithUID(self.uid)
+        home = yield self.homeUnderTest(name=self.uid)
         self.assertEquals(home, None)
         # Verify calendar1 was unshared to uid2
-        home2 = yield txn.calendarHomeWithUID(self.uid2)
+        home2 = yield self.homeUnderTest(name=self.uid2)
         self.assertEquals((yield home2.childWithName(self.sharedName)), None)
-        yield txn.commit()
+        yield self.commit()
 
         count = yield PurgePrincipalService.purgeUIDs(
             self.storeUnderTest(),
@@ -895,50 +411,89 @@
             (self.uid,),
             verbose=False,
             proxies=False,
-            completely=True
         )
         self.assertEquals(count, 0)
 
         # And you still don't (making sure it's not provisioned)
-        txn = self._sqlCalendarStore.newTransaction()
-        home = yield txn.calendarHomeWithUID(self.uid)
+        home = yield self.homeUnderTest(name=self.uid)
         self.assertEquals(home, None)
-        yield txn.commit()
+        yield self.commit()
 
+        calobj2 = yield self.calendarObjectUnderTest(name="attendee.ics", calendar_name="calendar2", home=self.uid2)
+        comp = yield calobj2.componentForUser()
+        self.assertTrue("STATUS:CANCELLED" in str(comp))
+        self.assertTrue(";UNTIL=" not in str(comp))
+        yield self.commit()
 
+
+
+class PurgePrincipalTestsWithWorkQueue(PurgePrincipalTests):
+    """
+    Same as L{PurgePrincipalTests} but with the work queue enabled.
+    """
+
     @inlineCallbacks
-    def test_purgeUIDsNotCompletely(self):
+    def setUp(self):
+        yield super(PurgePrincipalTestsWithWorkQueue, self).setUp()
+        self.patch(config.Scheduling.Options.WorkQueues, "Enabled", True)
+        self.patch(config.AutomaticPurging, "PollingIntervalSeconds", -1)
+        self.patch(config.AutomaticPurging, "CheckStaggerSeconds", 1)
+        self.patch(config.AutomaticPurging, "PurgeIntervalSeconds", 3)
+        self.patch(config.AutomaticPurging, "HomePurgeDelaySeconds", 1)
+
+
+    @inlineCallbacks
+    def test_purgeUIDService(self):
         """
-        Verify purgeUIDs removes some events, but leaves others and the home behind
+        Test that the full sequence of work items are processed via automatic polling.
         """
 
-        self.patch(config, "EnablePrivateEvents", True)
-
         # Now you see it
-        txn = self._sqlCalendarStore.newTransaction()
-        home = (yield txn.calendarHomeWithUID(self.uid))
+        home = yield self.homeUnderTest(name=self.uid)
         self.assertNotEquals(home, None)
-        yield txn.commit()
 
-        count = (yield PurgePrincipalService.purgeUIDs(self.storeUnderTest(), self.directory,
-            (self.uid,), verbose=False, proxies=False, completely=False))
-        self.assertEquals(count, 1) # 2 events
+        calobj2 = yield self.calendarObjectUnderTest(name="attendee.ics", calendar_name="calendar2", home=self.uid2)
+        comp = yield calobj2.componentForUser()
+        self.assertTrue("STATUS:CANCELLED" not in str(comp))
+        self.assertTrue(";UNTIL=" not in str(comp))
+        yield self.commit()
 
-        # Now you still see it
-        txn = self._sqlCalendarStore.newTransaction()
-        home = (yield txn.calendarHomeWithUID(self.uid))
-        self.assertNotEquals(home, None)
+        txn = self.transactionUnderTest()
+        notBefore = (
+            datetime.datetime.utcnow() +
+            datetime.timedelta(seconds=3)
+        )
+        yield txn.enqueue(PrincipalPurgePollingWork, notBefore=notBefore)
+        yield self.commit()
+
+        while True:
+            txn = self.transactionUnderTest()
+            work1 = yield PrincipalPurgePollingWork.all(txn)
+            work2 = yield PrincipalPurgeCheckWork.all(txn)
+            work3 = yield PrincipalPurgeWork.all(txn)
+            work4 = yield PrincipalPurgeHomeWork.all(txn)
+
+            if len(work4) != 0:
+                home = yield txn.calendarHomeWithUID(self.uid)
+                self.assertTrue(home.purging())
+
+            yield self.commit()
+            #print len(work1), len(work2), len(work3), len(work4)
+            if len(work1) + len(work2) + len(work3) + len(work4) == 0:
+                break
+            d = Deferred()
+            reactor.callLater(1, lambda : d.callback(None))
+            yield d
+
+        # Now you don't
+        home = yield self.homeUnderTest(name=self.uid)
+        self.assertEquals(home, None)
         # Verify calendar1 was unshared to uid2
-        home2 = (yield txn.calendarHomeWithUID(self.uid2))
+        home2 = yield self.homeUnderTest(name=self.uid2)
         self.assertEquals((yield home2.childWithName(self.sharedName)), None)
-        yield txn.commit()
 
-        count = yield PurgePrincipalService.purgeUIDs(self.storeUnderTest(), self.directory,
-            (self.uid,), verbose=False, proxies=False, completely=False)
-        self.assertEquals(count, 1)
-
-        # And you still do
-        txn = self._sqlCalendarStore.newTransaction()
-        home = (yield txn.calendarHomeWithUID(self.uid))
-        self.assertNotEquals(home, None)
-        yield txn.commit()
+        calobj2 = yield self.calendarObjectUnderTest(name="attendee.ics", calendar_name="calendar2", home=self.uid2)
+        comp = yield calobj2.componentForUser()
+        self.assertTrue("STATUS:CANCELLED" in str(comp))
+        self.assertTrue(";UNTIL=" not in str(comp))
+        yield self.commit()

Modified: CalendarServer/trunk/calendarserver/tools/test/test_purge_old_events.py
===================================================================
--- CalendarServer/trunk/calendarserver/tools/test/test_purge_old_events.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/calendarserver/tools/test/test_purge_old_events.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -21,12 +21,12 @@
 import os
 
 from calendarserver.tools.purge import (
-    PurgeOldEventsService, PurgeAttachmentsService, PurgePrincipalService
+    PurgeOldEventsService, PurgeAttachmentsService, PurgePrincipalService, PrincipalPurgeHomeWork
 )
 from pycalendar.datetime import DateTime
-from pycalendar.timezone import Timezone
 from twext.enterprise.dal.syntax import Update, Delete
-from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, returnValue, Deferred
 from twistedcaldav.config import config
 from twistedcaldav.test.util import StoreTestCase
 from twistedcaldav.vcard import Component as VCardComponent
@@ -385,7 +385,8 @@
                 "oldmattachment1.ics" : (MATTACHMENT_ICS % {"year": now - 5, "uid": "1.1m"}, metadata,),
                 "oldmattachment2.ics" : (MATTACHMENT_ICS % {"year": now - 5, "uid": "1.2m"}, metadata,),
                 "currentmattachment3.ics" : (MATTACHMENT_ICS % {"year": now + 1, "uid": "1.3m"}, metadata,),
-            }
+            },
+            "inbox": {},
         },
         "home2" : {
             "calendar2" : {
@@ -402,16 +403,25 @@
             },
             "calendar3" : {
                 "repeating_awhile.ics" : (REPEATING_AWHILE_ICS, metadata,),
-            }
+            },
+            "inbox": {},
         }
     }
 
+
     def configure(self):
         super(PurgeOldEventsTests, self).configure()
 
         # Turn off delayed indexing option so we can have some useful tests
         self.patch(config, "FreeBusyIndexDelayedExpand", False)
 
+        # Tweak queue timing to speed things up
+        self.patch(config.Scheduling.Options.WorkQueues, "Enabled", False)
+        self.patch(config.AutomaticPurging, "PollingIntervalSeconds", -1)
+        self.patch(config.AutomaticPurging, "CheckStaggerSeconds", 1)
+        self.patch(config.AutomaticPurging, "PurgeIntervalSeconds", 3)
+        self.patch(config.AutomaticPurging, "HomePurgeDelaySeconds", 1)
+
         # self.patch(config.DirectoryService.params, "xmlFile",
         #     os.path.join(
         #         os.path.dirname(__file__), "purge", "accounts.xml"
@@ -675,47 +685,21 @@
         # Make the newly created objects available to the purgeUID transaction
         (yield txn.commit())
 
-        # Purge home1
-        total = yield PurgePrincipalService.purgeUIDs(self._sqlCalendarStore, self.directory,
-            ("home1",), verbose=False, proxies=False,
-            when=DateTime(now, 4, 1, 12, 0, 0, 0, Timezone(utc=True)))
-
-        # 4 items deleted: 3 events and 1 vcard
-        self.assertEquals(total, 4)
-
-        txn = self._sqlCalendarStore.newTransaction()
-        # adressbook home is deleted since it's now empty
-        abHome = (yield txn.addressbookHomeWithUID("home1"))
-        self.assertEquals(abHome, None)
-
-        calHome = (yield txn.calendarHomeWithUID("home1"))
-        calColl = (yield calHome.calendarWithName("calendar1"))
-        self.assertEquals(len((yield calColl.calendarObjects())), 5)
-
-
-    @inlineCallbacks
-    def test_purgeUIDCompletely(self):
-        txn = self._sqlCalendarStore.newTransaction()
-
-        # Create an addressbook and one CardDAV resource
-        abHome = (yield txn.addressbookHomeWithUID("home1", create=True))
-        abColl = (yield abHome.addressbookWithName("addressbook"))
-        (yield abColl.createAddressBookObjectWithName("card1",
-            VCardComponent.fromString(VCARD_1)))
-        self.assertEquals(len((yield abColl.addressbookObjects())), 1)
-
-        # Verify there are 8 events in calendar1
-        calHome = (yield txn.calendarHomeWithUID("home1"))
-        calColl = (yield calHome.calendarWithName("calendar1"))
-        self.assertEquals(len((yield calColl.calendarObjects())), 8)
-
-        # Make the newly created objects available to the purgeUID transaction
-        (yield txn.commit())
-
         # Purge home1 completely
         total = yield PurgePrincipalService.purgeUIDs(self._sqlCalendarStore, self.directory,
-            ("home1",), verbose=False, proxies=False, completely=True)
+            ("home1",), verbose=False, proxies=False)
 
+        # Wait for queue to process
+        while(True):
+            txn = self.transactionUnderTest()
+            work = yield PrincipalPurgeHomeWork.all(txn)
+            yield self.commit()
+            if len(work) == 0:
+                break
+            d = Deferred()
+            reactor.callLater(1, lambda : d.callback(None))
+            yield d
+
         # 9 items deleted: 8 events and 1 vcard
         self.assertEquals(total, 9)
 

Modified: CalendarServer/trunk/twistedcaldav/directory/principal.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/principal.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/twistedcaldav/directory/principal.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -54,7 +54,7 @@
 )
 from twistedcaldav.extensions import DirectoryElement
 from twistedcaldav.resource import CalendarPrincipalCollectionResource, CalendarPrincipalResource
-from txdav.caldav.datastore.scheduling.cuaddress import normalizeCUAddr
+from txdav.caldav.datastore.scheduling.utils import normalizeCUAddr
 from txdav.who.delegates import RecordType as DelegateRecordType
 from txdav.who.directory import CalendarDirectoryRecordMixin
 from txdav.xml import element as davxml

Modified: CalendarServer/trunk/twistedcaldav/ical.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/ical.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/twistedcaldav/ical.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -45,9 +45,10 @@
 from twistedcaldav.dateops import timeRangesOverlap, normalizeForIndex, differenceDateTime, \
     normalizeForExpand
 from twistedcaldav.instance import InstanceList, InvalidOverriddenInstanceError
-from txdav.caldav.datastore.scheduling.cuaddress import normalizeCUAddr
 from twistedcaldav.timezones import hasTZ, TimezoneException
 
+from txdav.caldav.datastore.scheduling.utils import normalizeCUAddr
+
 from pycalendar.icalendar import definitions
 from pycalendar.parameter import Parameter
 from pycalendar.icalendar.calendar import Calendar

Modified: CalendarServer/trunk/twistedcaldav/stdconfig.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/stdconfig.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/twistedcaldav/stdconfig.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -1029,6 +1029,14 @@
         "ReconciliationDelaySeconds" : 5,
     },
 
+    "AutomaticPurging": {
+        "Enabled": False,
+        "PollingIntervalSeconds": 7 * 24 * 60 * 60,   # 7 days
+        "CheckStaggerSeconds": 0, # No staggering
+        "PurgeIntervalSeconds": 7 * 24 * 60 * 60,   # 7 days
+        "HomePurgeDelaySeconds": 60,
+    },
+
     "Manhole": {
         "Enabled": False,
         "StartingPortNumber": 5000,

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/addressmapping.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/addressmapping.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/addressmapping.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -22,11 +22,10 @@
 from twistedcaldav.memcacher import Memcacher
 
 from txdav.caldav.datastore.scheduling.caldav.delivery import ScheduleViaCalDAV
+from txdav.caldav.datastore.scheduling.cuaddress import RemoteCalendarUser, EmailCalendarUser, InvalidCalendarUser
 from txdav.caldav.datastore.scheduling.delivery import DeliveryService
 from txdav.caldav.datastore.scheduling.imip.delivery import ScheduleViaIMip
 from txdav.caldav.datastore.scheduling.ischedule.delivery import ScheduleViaISchedule
-from txdav.caldav.datastore.scheduling.cuaddress import RemoteCalendarUser, EmailCalendarUser, InvalidCalendarUser, \
-    calendarUserFromPrincipal
 
 __all__ = [
     "ScheduleAddressMapper",
@@ -51,12 +50,8 @@
 
 
     @inlineCallbacks
-    def getCalendarUser(self, cuaddr, principal):
+    def getCalendarUser(self, cuaddr):
 
-        # If we have a principal always treat the user as local
-        if principal:
-            returnValue(calendarUserFromPrincipal(cuaddr, principal))
-
         # Get the type
         cuaddr_type = (yield self.getCalendarUserServiceType(cuaddr))
         if cuaddr_type == DeliveryService.serviceType_caldav:

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/caldav/delivery.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/caldav/delivery.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/caldav/delivery.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -99,7 +99,7 @@
 
         organizerPrincipal = None
         if type(self.scheduler.organizer) in (LocalCalendarUser, OtherServerCalendarUser,):
-            organizerPrincipal = self.scheduler.organizer.principal.uid
+            organizerPrincipal = self.scheduler.organizer.record.uid
 
         for recipient in self.recipients:
 
@@ -187,7 +187,7 @@
     def generateFreeBusyResponse(self, recipient, responses, organizerProp, organizerPrincipal, uid, event_details):
 
         # Extract the ATTENDEE property matching current recipient from the calendar data
-        cuas = recipient.principal.calendarUserAddresses
+        cuas = recipient.record.calendarUserAddresses
         attendeeProp = self.scheduler.calendar.getAttendeeProperty(cuas)
 
         remote = isinstance(self.scheduler.organizer, RemoteCalendarUser)
@@ -250,7 +250,7 @@
         # Check to see if the recipient is the same calendar user as the organizer.
         # Needed for masked UID stuff.
         if isinstance(self.scheduler.organizer, LocalCalendarUser):
-            same_calendar_user = self.scheduler.organizer.principal.uid == recipient.principal.uid
+            same_calendar_user = self.scheduler.organizer.record.uid == recipient.record.uid
         else:
             same_calendar_user = False
 

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/caldav/scheduler.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/caldav/scheduler.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/caldav/scheduler.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -26,7 +26,8 @@
 
 from txdav.caldav.datastore.scheduling import addressmapping
 from txdav.caldav.datastore.scheduling.cuaddress import LocalCalendarUser, \
-    InvalidCalendarUser, calendarUserFromPrincipal, RemoteCalendarUser
+    OtherServerCalendarUser, InvalidCalendarUser, \
+    calendarUserFromCalendarUserAddress
 from txdav.caldav.datastore.scheduling.scheduler import Scheduler, ScheduleResponseQueue
 
 
@@ -92,8 +93,8 @@
         """
 
         # Verify that Originator is a valid calendar user
-        originatorPrincipal = yield self.txn.directoryService().recordWithCalendarUserAddress(self.originator)
-        if originatorPrincipal is None:
+        originatorAddress = yield calendarUserFromCalendarUserAddress(self.originator, self.txn)
+        if not originatorAddress.hosted():
             # Local requests MUST have a principal.
             log.error("Could not find principal for originator: %s" % (self.originator,))
             raise HTTPError(self.errorResponse(
@@ -102,7 +103,7 @@
                 "No principal for originator",
             ))
         else:
-            if not (originatorPrincipal.calendarsEnabled() and originatorPrincipal.thisServer()):
+            if not originatorAddress.validOriginator() or isinstance(originatorAddress, OtherServerCalendarUser):
                 log.error("Originator not enabled or hosted on this server: %s" % (self.originator,))
                 raise HTTPError(self.errorResponse(
                     responsecode.FORBIDDEN,
@@ -110,7 +111,7 @@
                     "Originator cannot be scheduled",
                 ))
 
-            self.originator = LocalCalendarUser(self.originator, originatorPrincipal)
+            self.originator = originatorAddress
 
 
     @inlineCallbacks
@@ -122,32 +123,32 @@
 
         results = []
         for recipient in self.recipients:
-            # Get the principal resource for this recipient
-            principal = yield self.txn.directoryService().recordWithCalendarUserAddress(recipient)
+            # Get the calendar user object for this recipient
+            recipientAddress = yield calendarUserFromCalendarUserAddress(recipient, self.txn)
 
             # If no principal we may have a remote recipient but we should check whether
             # the address is one that ought to be on our server and treat that as a missing
             # user. Also if server-to-server is not enabled then remote addresses are not allowed.
-            if principal is None:
-                address = (yield addressmapping.mapper.getCalendarUser(recipient, principal))
-                if isinstance(address, InvalidCalendarUser):
+            if not recipientAddress.hosted():
+                if isinstance(recipientAddress, InvalidCalendarUser):
                     log.error("Unknown calendar user address: %s" % (recipient,))
-                results.append(address)
+                results.append(recipientAddress)
             else:
-                # Map recipient to their inbox
+                # Map recipient to their inbox and cache on calendar user object
                 inbox = None
-                if principal.calendarsEnabled():
-                    if principal.thisServer():
-                        recipient_home = yield self.txn.calendarHomeWithUID(principal.uid, create=True)
+                if recipientAddress.validRecipient():
+                    if isinstance(recipientAddress, LocalCalendarUser):
+                        recipient_home = yield self.txn.calendarHomeWithUID(recipientAddress.record.uid, create=True)
                         if recipient_home:
                             inbox = (yield recipient_home.calendarWithName("inbox"))
                     else:
                         inbox = "dummy"
+                    recipientAddress.inbox = inbox
 
                 if inbox:
-                    results.append(calendarUserFromPrincipal(recipient, principal, inbox))
+                    results.append(recipientAddress)
                 else:
-                    log.error("Recipient not enabled for calendaring: %s" % (principal,))
+                    log.error("No scheduling for calendar user: %s" % (recipient,))
                     results.append(InvalidCalendarUser(recipient))
 
         self.recipients = results
@@ -162,14 +163,14 @@
         # Verify that the ORGANIZER's cu address maps to a valid user
         organizer = self.calendar.getOrganizer()
         if organizer:
-            organizerPrincipal = yield self.txn.directoryService().recordWithCalendarUserAddress(organizer)
-            if organizerPrincipal:
-                if organizerPrincipal.calendarsEnabled():
+            organizerAddress = yield calendarUserFromCalendarUserAddress(organizer, self.txn)
+            if organizerAddress.hosted():
+                if organizerAddress.record.calendarsEnabled():
 
                     # Only do this check for a freebusy request. A check for an invite needs
                     # to be handled later when we know whether a new invite is being added
                     # (which we reject) vs an update to an existing one (which we allow).
-                    if self.checkForFreeBusy() and not organizerPrincipal.enabledAsOrganizer():
+                    if self.checkForFreeBusy() and not organizerAddress.record.enabledAsOrganizer():
                         log.error("ORGANIZER not allowed to be an Organizer: %s" % (self.calendar,))
                         raise HTTPError(self.errorResponse(
                             responsecode.FORBIDDEN,
@@ -177,9 +178,9 @@
                             "Organizer cannot schedule",
                         ))
 
-                    self.organizer = LocalCalendarUser(organizer, organizerPrincipal)
+                    self.organizer = organizerAddress
                 else:
-                    log.error("No outbox for ORGANIZER in calendar data: %s" % (self.calendar,))
+                    log.error("No scheduling for ORGANIZER: %s" % (organizer,))
                     raise HTTPError(self.errorResponse(
                         responsecode.FORBIDDEN,
                         self.errorElements["organizer-denied"],
@@ -195,7 +196,7 @@
                         "No principal for organizer",
                     ))
                 else:
-                    self.organizer = RemoteCalendarUser(organizer)
+                    self.organizer = organizerAddress
         else:
             log.error("ORGANIZER missing in calendar data: %s" % (self.calendar,))
             raise HTTPError(self.errorResponse(
@@ -216,8 +217,8 @@
                 "Organizer is not local to server",
             ))
 
-        # Make sure that the ORGANIZER's Outbox is the request URI
-        if self.doingPOST is not None and self.organizer.principal.uid != self.originator_uid:
+        # Make sure that the ORGANIZER's is the request URI owner
+        if self.doingPOST is not None and self.organizer.record.uid != self.originator_uid:
             log.error("Wrong outbox for ORGANIZER in calendar data: %s" % (self.calendar,))
             raise HTTPError(self.errorResponse(
                 responsecode.FORBIDDEN,
@@ -233,18 +234,18 @@
         Only local attendees are allowed for message originating from this server.
         """
 
-        # Attendee's Outbox MUST be the request URI
-        attendeePrincipal = yield self.txn.directoryService().recordWithCalendarUserAddress(self.attendee)
-        if attendeePrincipal:
-            if self.doingPOST is not None and attendeePrincipal.uid != self.originator_uid:
-                log.error("ATTENDEE in calendar data does not match owner of Outbox: %s" % (self.calendar,))
+        # Attendee's MUST be the request URI owner
+        attendeeAddress = yield calendarUserFromCalendarUserAddress(self.attendee, self.txn)
+        if attendeeAddress.hosted():
+            if self.doingPOST is not None and attendeeAddress.record.uid != self.originator_uid:
+                log.error("ATTENDEE in calendar data does not match owner of Outbox: %s" % (self.attendee,))
                 raise HTTPError(self.errorResponse(
                     responsecode.FORBIDDEN,
                     self.errorElements["attendee-denied"],
                     "Outbox does not belong to attendee",
                 ))
         else:
-            log.error("Unknown ATTENDEE in calendar data: %s" % (self.calendar,))
+            log.error("Unknown ATTENDEE in calendar data: %s" % (self.attendee,))
             raise HTTPError(self.errorResponse(
                 responsecode.FORBIDDEN,
                 self.errorElements["attendee-denied"],

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/cuaddress.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/cuaddress.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/cuaddress.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -15,17 +15,27 @@
 ##
 
 from twext.python.log import Logger
+from twext.who.directory import DirectoryRecord as BaseDirectoryRecord
+from twext.who.idirectory import FieldName as BaseFieldName
+from twext.who.idirectory import RecordType as BaseRecordType
 
-from txdav.caldav.datastore.scheduling.delivery import DeliveryService
-from txdav.caldav.datastore.scheduling.utils import extractEmailDomain
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.python.constants import Names, NamedConstant
 
+from txdav.caldav.datastore.scheduling.utils import extractEmailDomain, \
+    uidFromCalendarUserAddress
+from txdav.caldav.icalendardirectoryservice import ICalendarStoreDirectoryRecord
+from txdav.who.directory import CalendarDirectoryRecordMixin
+from txdav.who.idirectory import FieldName
+
+from zope.interface.declarations import implementer
+
 __all__ = [
     "LocalCalendarUser",
     "OtherServerCalendarUser",
     "RemoteCalendarUser",
     "EmailCalendarUser",
     "InvalidCalendarUser",
-    "normalizeCUAddr",
 ]
 
 log = Logger()
@@ -34,30 +44,91 @@
 
     def __init__(self, cuaddr):
         self.cuaddr = cuaddr
-        self.serviceType = None
 
 
+    def hosted(self):
+        """
+        Is this user hosted on this service (this pod or any other)
+        """
+        return False
 
-class LocalCalendarUser(CalendarUser):
 
-    def __init__(self, cuaddr, principal, inbox=None):
+    def validOriginator(self):
+        """
+        Is this user able to originate scheduling messages.
+        """
+        return True
+
+
+    def validRecipient(self):
+        """
+        Is this user able to receive scheduling messages.
+        """
+        return True
+
+
+
+class HostedCalendarUser(CalendarUser):
+    """
+    User hosted on any pod of this service. This is derived from an L{DirectoryRecord}
+    in most cases. However, we need to cope with the situation where a user has been
+    removed from the directory but still has calendar data that needs to be managed
+    (typically purged). In that case we there is no directory record, but we can confirm
+    from the cu-address that corresponding data for their UID exists, and thus can
+    determine the valid UID to use.
+    """
+
+    def __init__(self, cuaddr, record):
         self.cuaddr = cuaddr
-        self.principal = principal
-        self.inbox = inbox
-        self.serviceType = DeliveryService.serviceType_caldav
+        self.record = record
 
 
+    def hosted(self):
+        """
+        Is this user hosted on this service (this pod or any other)
+        """
+        return True
+
+
+    def validOriginator(self):
+        """
+        Is this user able to originate scheduling messages.
+        A user with a temporary directory record can be schedule, but that will
+        only be for purposes of automatic purge.
+        """
+        return self.record.calendarsEnabled()
+
+
+    def validRecipient(self):
+        """
+        Is this user able to receive scheduling messages.
+        A user with a temporary directory record cannot be scheduled with.
+        """
+        return self.record.calendarsEnabled() and not isinstance(self.record, TemporaryDirectoryRecord)
+
+
+
+class LocalCalendarUser(HostedCalendarUser):
+    """
+    User hosted on the current pod.
+    """
+
+    def __init__(self, cuaddr, record):
+        super(LocalCalendarUser, self).__init__(cuaddr, record)
+
+
     def __str__(self):
         return "Local calendar user: %s" % (self.cuaddr,)
 
 
 
-class OtherServerCalendarUser(CalendarUser):
+class OtherServerCalendarUser(HostedCalendarUser):
+    """
+    User hosted on another pod.
+    """
 
-    def __init__(self, cuaddr, principal):
-        self.cuaddr = cuaddr
-        self.principal = principal
-        self.serviceType = DeliveryService.serviceType_ischedule
+    def __init__(self, cuaddr, record):
+        super(OtherServerCalendarUser, self).__init__(cuaddr, record)
 
 
     def __str__(self):
@@ -66,11 +137,13 @@
 
 
 class RemoteCalendarUser(CalendarUser):
+    """
+    User external to the entire system (set of pods). Used for iSchedule.
+    """
 
     def __init__(self, cuaddr):
-        self.cuaddr = cuaddr
+        super(RemoteCalendarUser, self).__init__(cuaddr)
         self.extractDomain()
-        self.serviceType = DeliveryService.serviceType_ischedule
 
 
     def __str__(self):
@@ -89,10 +162,12 @@
 
 
 class EmailCalendarUser(CalendarUser):
+    """
+    User external to the entire system (set of pods). Used for iMIP.
+    """
 
     def __init__(self, cuaddr):
-        self.cuaddr = cuaddr
-        self.serviceType = DeliveryService.serviceType_imip
+        super(EmailCalendarUser, self).__init__(cuaddr)
 
 
     def __str__(self):
@@ -101,37 +176,127 @@
 
 
 class InvalidCalendarUser(CalendarUser):
+    """
+    A calendar user that ought to be hosted on the system, but does not have a valid
+    directory entry.
+    """
 
     def __str__(self):
         return "Invalid calendar user: %s" % (self.cuaddr,)
 
 
+    def validOriginator(self):
+        """
+        Is this user able to originate scheduling messages.
+        """
+        return False
 
-def normalizeCUAddr(addr):
+
+    def validRecipient(self):
+        """
+        Is this user able to receive scheduling messages.
+        """
+        return False
+
+
+
+ at inlineCallbacks
+def calendarUserFromCalendarUserAddress(cuaddr, txn):
     """
-    Normalize a cuaddr string by lower()ing it if it's a mailto:, or
-    removing trailing slash if it's a URL.
-    @param addr: a cuaddr string to normalize
-    @return: normalized string
+    Map a calendar user address into an L{CalendarUser} taking into account whether
+    they are hosted in the directory or known to be locally hosted - or match
+    address patterns for other services.
+
+    @param cuaddr: the calendar user address to map
+    @type cuaddr: L{str}
+    @param txn: a transaction to use for store operations
+    @type txn: L{ICommonStoreTransaction}
     """
-    lower = addr.lower()
-    if lower.startswith("mailto:"):
-        addr = lower
-    if (addr.startswith("/") or
-        addr.startswith("http:") or
-        addr.startswith("https:")):
-        return addr.rstrip("/")
-    else:
-        return addr
 
+    record = yield txn.directoryService().recordWithCalendarUserAddress(cuaddr)
+    returnValue((yield _fromRecord(cuaddr, record, txn)))
 
 
-def calendarUserFromPrincipal(recipient, principal, inbox=None):
+
+ at inlineCallbacks
+def calendarUserFromCalendarUserUID(uid, txn):
     """
-    Get the appropriate calendar user address class for the provided principal.
+    Map a calendar user address into an L{CalendarUser} taking into account whether
+    they are hosted in the directory or known to be locally hosted - or match
+    address patterns for other services.
+
+    @param uid: the calendar user UID to map
+    @type uid: L{str}
+    @param txn: a transaction to use for store operations
+    @type txn: L{ICommonStoreTransaction}
     """
 
-    if principal.thisServer():
-        return LocalCalendarUser(recipient, principal, inbox)
+    record = yield txn.directoryService().recordWithUID(uid)
+    cua = record.canonicalCalendarUserAddress() if record is not None else "urn:x-uid:{}".format(uid)
+    returnValue((yield _fromRecord(cua, record, txn)))
+
+
+
+class RecordType(Names):
+    """
+    Constants for temporary directory record type.
+
+    @cvar unknown: Location record.
+        Represents a calendar user of unknown type.
+    """
+
+    unknown = NamedConstant()
+    unknown.description = u"unknown"
+
+
+
+ at implementer(ICalendarStoreDirectoryRecord)
+class TemporaryDirectoryRecord(BaseDirectoryRecord, CalendarDirectoryRecordMixin):
+
+    def __init__(self, service, uid, nodeUID):
+
+        fields = {
+            BaseFieldName.uid: uid.decode("utf-8"),
+            BaseFieldName.recordType: BaseRecordType.user,
+            FieldName.hasCalendars: True,
+            FieldName.serviceNodeUID: nodeUID,
+        }
+
+        super(TemporaryDirectoryRecord, self).__init__(service, fields)
+        self.fields[BaseFieldName.recordType] = RecordType.unknown
+        self.fields[BaseFieldName.guid] = uid.decode("utf-8")
+
+
+
+ at inlineCallbacks
+def _fromRecord(cuaddr, record, txn):
+    """
+    Map a calendar user record into an L{CalendarUser} taking into account whether
+    they are hosted in the directory or known to be locally hosted - or match
+    address patterns for other services.
+
+    @param cuaddr: the calendar user address to map
+    @type cuaddr: L{str}
+    @param record: the calendar user record to map or L{None}
+    @type record: L{IDirectoryRecord}
+    @param txn: a transaction to use for store operations
+    @type txn: L{ICommonStoreTransaction}
+    """
+    if record is not None:
+        if not record.calendarsEnabled():
+            returnValue(InvalidCalendarUser(cuaddr))
+        elif record.thisServer():
+            returnValue(LocalCalendarUser(cuaddr, record))
+        else:
+            returnValue(OtherServerCalendarUser(cuaddr, record))
     else:
-        return OtherServerCalendarUser(recipient, principal)
+        uid = uidFromCalendarUserAddress(cuaddr)
+        if uid is not None:
+            hosted, serviceNodeUID = yield txn.store().uidInStore(txn, uid)
+            if hosted:
+                record = TemporaryDirectoryRecord(txn.directoryService(), uid, serviceNodeUID)
+                returnValue(LocalCalendarUser(cuaddr, record))
+
+    from txdav.caldav.datastore.scheduling import addressmapping
+    result = (yield addressmapping.mapper.getCalendarUser(cuaddr))
+    returnValue(result)

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/icaldiff.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/icaldiff.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/icaldiff.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -24,7 +24,7 @@
 from twistedcaldav import accounting
 from twistedcaldav.config import config
 from twistedcaldav.ical import Component, Property
-from txdav.caldav.datastore.scheduling.cuaddress import normalizeCUAddr
+from txdav.caldav.datastore.scheduling.utils import normalizeCUAddr
 from txdav.caldav.datastore.scheduling.itip import iTipGenerator
 
 """

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/outbound.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/outbound.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/outbound.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -39,7 +39,7 @@
 from twistedcaldav.config import config
 from twistedcaldav.ical import Component
 from twistedcaldav.localization import translationTo, _, getLanguage
-from txdav.caldav.datastore.scheduling.cuaddress import normalizeCUAddr
+from txdav.caldav.datastore.scheduling.utils import normalizeCUAddr
 from txdav.caldav.datastore.scheduling.imip.smtpsender import SMTPSender
 from txdav.common.datastore.sql_tables import schema
 

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/implicit.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/implicit.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/implicit.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -25,11 +25,12 @@
 from twistedcaldav.config import config
 from twistedcaldav.ical import Property
 
-from txdav.caldav.datastore.scheduling import addressmapping
 from txdav.caldav.datastore.scheduling.caldav.scheduler import CalDAVScheduler
 from txdav.caldav.datastore.scheduling.cuaddress import InvalidCalendarUser, \
     LocalCalendarUser, OtherServerCalendarUser, \
-    normalizeCUAddr
+    calendarUserFromCalendarUserAddress, \
+    calendarUserFromCalendarUserUID
+from txdav.caldav.datastore.scheduling.utils import normalizeCUAddr
 from txdav.caldav.datastore.scheduling.icaldiff import iCalDiff
 from txdav.caldav.datastore.scheduling.itip import iTipGenerator, iTIPRequestStatus
 from txdav.caldav.datastore.scheduling.utils import getCalendarObjectForRecord
@@ -232,6 +233,7 @@
         @type calendar: L{twistedcaldav.ical.Component}
         """
 
+        self.txn = parent._txn
         self.parent = parent
         self.resource = resource
         self.calendar = calendar
@@ -257,7 +259,7 @@
         # Check to see whether the organizer principal is enabled for scheduling. If not, do not allow them
         # to create new scheduling resources.
         if self.action == "create":
-            if self.organizerPrincipal and not self.organizerPrincipal.enabledAsOrganizer():
+            if self.organizerAddress.hosted() and not self.organizerAddress.record.enabledAsOrganizer():
                 log.error("ORGANIZER not allowed to be an Organizer: {organizer}", organizer=self.organizer)
                 raise HTTPError(ErrorResponse(
                     responsecode.FORBIDDEN,
@@ -365,11 +367,9 @@
 
         # Get some useful information from the calendar
         yield self.extractCalendarData()
-        self.organizerPrincipal = yield self.calendar_home.directoryService().recordWithCalendarUserAddress(self.organizer)
-        self.organizerAddress = (yield addressmapping.mapper.getCalendarUser(self.organizer, self.organizerPrincipal))
+        self.organizerAddress = (yield calendarUserFromCalendarUserAddress(self.organizer, self.txn))
 
         # Originator is the organizer in this case
-        self.originatorPrincipal = self.organizerPrincipal
         self.originator = self.organizer
 
         # We want to suppress chatty iMIP messages when other attendees reply
@@ -447,11 +447,9 @@
             self.calendar = calendar_old
 
         yield self.extractCalendarData()
-        self.organizerPrincipal = yield self.calendar_home.directoryService().recordWithCalendarUserAddress(self.organizer)
-        self.organizerAddress = (yield addressmapping.mapper.getCalendarUser(self.organizer, self.organizerPrincipal))
+        self.organizerAddress = (yield calendarUserFromCalendarUserAddress(self.organizer, self.txn))
 
         # Originator is the organizer in this case
-        self.originatorPrincipal = self.organizerPrincipal
         self.originator = self.organizer
 
         self.except_attendees = ()
@@ -479,8 +477,8 @@
         # Get some useful information from the calendar
         yield self.extractCalendarData()
 
-        self.attendeePrincipal = yield self.calendar_home.directoryService().recordWithUID(self.calendar_home.uid().decode("utf-8"))
-        self.originator = self.attendee = self.attendeePrincipal.canonicalCalendarUserAddress()
+        self.attendeeAddress = (yield calendarUserFromCalendarUserUID(self.calendar_home.uid(), self.txn))
+        self.originator = self.attendee = self.attendeeAddress.record.canonicalCalendarUserAddress()
 
         result = (yield self.scheduleWithOrganizer())
 
@@ -491,10 +489,10 @@
     def extractCalendarData(self):
 
         # Get the originator who is the owner of the calendar resource being modified
-        self.originatorPrincipal = yield self.calendar_home.directoryService().recordWithUID(self.calendar_home.uid().decode("utf-8"))
+        originatorAddress = yield calendarUserFromCalendarUserUID(self.calendar_home.uid(), self.txn)
 
         # Pick the canonical CUA:
-        self.originator = self.originatorPrincipal.canonicalCalendarUserAddress()
+        self.originator = originatorAddress.record.canonicalCalendarUserAddress()
 
         # Get the ORGANIZER and verify it is the same for all components
         try:
@@ -567,13 +565,12 @@
             returnValue(False)
 
         # Organizer must map to a valid principal
-        self.organizerPrincipal = yield self.calendar_home.directoryService().recordWithCalendarUserAddress(self.organizer)
-        self.organizerAddress = (yield addressmapping.mapper.getCalendarUser(self.organizer, self.organizerPrincipal))
-        if not self.organizerPrincipal:
+        self.organizerAddress = (yield calendarUserFromCalendarUserAddress(self.organizer, self.txn))
+        if not self.organizerAddress.hosted():
             returnValue(False)
 
         # Organizer must be the owner of the calendar resource
-        if self.calendar_home.uid() != self.organizerPrincipal.uid:
+        if self.calendar_home.uid() != self.organizerAddress.record.uid:
             returnValue(False)
 
         returnValue(True)
@@ -588,10 +585,10 @@
 
         # Check to see whether any attendee is the owner
         for attendee in self.attendees:
-            attendeePrincipal = yield self.calendar_home.directoryService().recordWithCalendarUserAddress(attendee)
-            if attendeePrincipal and attendeePrincipal.uid == self.calendar_home.uid():
+            attendeeAddress = yield calendarUserFromCalendarUserAddress(attendee, self.txn)
+            if attendeeAddress.hosted() and attendeeAddress.record.uid == self.calendar_home.uid():
                 self.attendee = attendee
-                self.attendeePrincipal = attendeePrincipal
+                self.attendeeAddress = attendeeAddress
                 returnValue(True)
 
         returnValue(False)
@@ -670,7 +667,7 @@
                             if attendee.hasParameter("PARTSTAT"):
                                 cuaddr = attendee.value()
 
-                                if cuaddr in self.organizerPrincipal.calendarUserAddresses:
+                                if cuaddr in self.organizerAddress.record.calendarUserAddresses:
                                     # If the attendee is the organizer then do not update
                                     # the PARTSTAT to NEEDS-ACTION.
                                     # The organizer is automatically ACCEPTED to the event.
@@ -971,7 +968,7 @@
         """
         for attendee in self.calendar.getAllAttendeeProperties():
             # Don't adjust ORGANIZER's ATTENDEE
-            if attendee.value() in self.organizerPrincipal.calendarUserAddresses:
+            if attendee.value() in self.organizerAddress.record.calendarUserAddresses:
                 continue
             if attendee.parameterValue("SCHEDULE-AGENT", "SERVER").upper() == "SERVER" and attendee.hasParameter("PARTSTAT"):
                 attendee.setParameter("PARTSTAT", "NEEDS-ACTION")
@@ -1026,7 +1023,7 @@
         changed = False
         for cuaddr, newattendee in new_attendees.items():
             # Don't adjust ORGANIZER's ATTENDEE
-            if newattendee.value() in self.organizerPrincipal.calendarUserAddresses:
+            if newattendee.value() in self.organizerAddress.record.calendarUserAddresses:
                 continue
             new_partstat = newattendee.parameterValue("PARTSTAT", "NEEDS-ACTION").upper()
             if newattendee.parameterValue("SCHEDULE-AGENT", "SERVER").upper() == "SERVER" and new_partstat != "NEEDS-ACTION":
@@ -1060,8 +1057,7 @@
             if attendee.parameterValue("SCHEDULE-AGENT", "SERVER").upper() == "CLIENT":
                 cuaddr = attendee.value()
                 if cuaddr not in coerced:
-                    attendeePrincipal = yield self.calendar_home.directoryService().recordWithCalendarUserAddress(cuaddr)
-                    attendeeAddress = (yield addressmapping.mapper.getCalendarUser(cuaddr, attendeePrincipal))
+                    attendeeAddress = (yield calendarUserFromCalendarUserAddress(cuaddr, self.txn))
                     local_attendee = type(attendeeAddress) in (LocalCalendarUser, OtherServerCalendarUser,)
                     coerced[cuaddr] = local_attendee
                 if coerced[cuaddr]:
@@ -1082,7 +1078,7 @@
             self.resource,
             self.oldcalendar,
             self.calendar,
-            self.organizerPrincipal.canonicalCalendarUserAddress(),
+            self.organizerAddress.record.canonicalCalendarUserAddress(),
             len(self.calendar.getAllUniqueAttendees()) - 1,
             self.do_smart_merge,
         )
@@ -1128,13 +1124,12 @@
         for attendee, rids in aggregated.iteritems():
 
             # Don't send message back to the ORGANIZER
-            if attendee in self.organizerPrincipal.calendarUserAddresses:
+            if attendee in self.organizerAddress.record.calendarUserAddresses:
                 continue
 
             # Handle split by not scheduling local attendees
             if self.split_details is not None:
-                attendeePrincipal = yield self.calendar_home.directoryService().recordWithCalendarUserAddress(attendee)
-                attendeeAddress = (yield addressmapping.mapper.getCalendarUser(attendee, attendeePrincipal))
+                attendeeAddress = (yield calendarUserFromCalendarUserAddress(attendee, self.txn))
                 if type(attendeeAddress) is LocalCalendarUser:
                     continue
 
@@ -1173,7 +1168,7 @@
         for attendee in self.attendees:
 
             # Don't send message back to the ORGANIZER
-            if attendee in self.organizerPrincipal.calendarUserAddresses:
+            if attendee in self.organizerAddress.record.calendarUserAddresses:
                 continue
 
             # Don't send message to specified attendees
@@ -1190,8 +1185,7 @@
 
             # Handle split by not scheduling local attendees
             if self.split_details is not None:
-                attendeePrincipal = yield self.calendar_home.directoryService().recordWithCalendarUserAddress(attendee)
-                attendeeAddress = (yield addressmapping.mapper.getCalendarUser(attendee, attendeePrincipal))
+                attendeeAddress = (yield calendarUserFromCalendarUserAddress(attendee, self.txn))
                 if type(attendeeAddress) is LocalCalendarUser:
                     continue
 
@@ -1245,13 +1239,12 @@
         for attendee, rids in aggregated.iteritems():
 
             # Don't send message back to the ORGANIZER
-            if attendee in self.organizerPrincipal.calendarUserAddresses:
+            if attendee in self.organizerAddress.record.calendarUserAddresses:
                 continue
 
             # Handle split by not scheduling local attendees
             if self.split_details is not None:
-                attendeePrincipal = yield self.calendar_home.directoryService().recordWithCalendarUserAddress(attendee)
-                attendeeAddress = (yield addressmapping.mapper.getCalendarUser(attendee, attendeePrincipal))
+                attendeeAddress = (yield calendarUserFromCalendarUserAddress(attendee, self.txn))
                 if type(attendeeAddress) is LocalCalendarUser:
                     continue
 
@@ -1298,7 +1291,7 @@
         for attendee in self.attendees:
 
             # Don't send message back to the ORGANIZER
-            if attendee in self.organizerPrincipal.calendarUserAddresses:
+            if attendee in self.organizerAddress.record.calendarUserAddresses:
                 continue
 
             # Don't send message to specified attendees
@@ -1315,8 +1308,7 @@
 
             # Handle split by not scheduling local attendees
             if self.split_details is not None:
-                attendeePrincipal = yield self.calendar_home.directoryService().recordWithCalendarUserAddress(attendee)
-                attendeeAddress = (yield addressmapping.mapper.getCalendarUser(attendee, attendeePrincipal))
+                attendeeAddress = (yield calendarUserFromCalendarUserAddress(attendee, self.txn))
                 if type(attendeeAddress) is LocalCalendarUser:
                     continue
 
@@ -1412,7 +1404,7 @@
 
                 # If Organizer copy exists we cannot allow SCHEDULE-AGENT=CLIENT or NONE
                 if not doScheduling:
-                    log.error("Attendee '{attendee}' is not allowed to change SCHEDULE-AGENT on organizer: UID:{uid}", attendee=self.attendeePrincipal, uid=self.uid)
+                    log.error("Attendee '{attendee}' is not allowed to change SCHEDULE-AGENT on organizer: UID:{uid}", attendee=self.attendeeAddress.record, uid=self.uid)
                     raise HTTPError(ErrorResponse(
                         responsecode.FORBIDDEN,
                         (caldav_namespace, "valid-attendee-change"),
@@ -1430,7 +1422,7 @@
                         self.return_status = ImplicitScheduler.STATUS_ORPHANED_EVENT
                         returnValue(None)
                     else:
-                        log.error("Attendee '{attendee}' is not allowed to make an unauthorized change to an organized event: UID:{uid}", attendee=self.attendeePrincipal, uid=self.uid)
+                        log.error("Attendee '{attendee}' is not allowed to make an unauthorized change to an organized event: UID:{uid}", attendee=self.attendeeAddress.record, uid=self.uid)
                         raise HTTPError(ErrorResponse(
                             responsecode.FORBIDDEN,
                             (caldav_namespace, "valid-attendee-change"),
@@ -1462,7 +1454,7 @@
                         if self.oldcalendar:
                             oldScheduling = self.oldcalendar.getOrganizerScheduleAgent()
                             if not oldScheduling:
-                                log.error("Attendee '{attendee}' is not allowed to set SCHEDULE-AGENT=SERVER on organizer: UID:{uid}", attendee=self.attendeePrincipal, uid=self.uid)
+                                log.error("Attendee '{attendee}' is not allowed to set SCHEDULE-AGENT=SERVER on organizer: UID:{uid}", attendee=self.attendeeAddress.record, uid=self.uid)
                                 raise HTTPError(ErrorResponse(
                                     responsecode.FORBIDDEN,
                                     (caldav_namespace, "valid-attendee-change"),
@@ -1533,8 +1525,8 @@
                     oldattendess = self.oldcalendar.getAllUniqueAttendees()
                     found_old = False
                     for attendee in oldattendess:
-                        attendeePrincipal = yield self.calendar_home.directoryService().recordWithCalendarUserAddress(attendee)
-                        if attendeePrincipal and attendeePrincipal.uid == self.calendar_home.uid():
+                        attendeeAddress = (yield calendarUserFromCalendarUserAddress(attendee, self.txn))
+                        if attendeeAddress and attendeeAddress.record.uid == self.calendar_home.uid():
                             found_old = True
                             break
 
@@ -1566,7 +1558,7 @@
 
         if not config.Scheduling.iSchedule.Enabled and not local_organizer and is_server:
             # Coerce ORGANIZER to SCHEDULE-AGENT=NONE
-            log.debug("Attendee '{attendee}' is not allowed to use SCHEDULE-AGENT=SERVER on organizer: UID:{uid}", attendee=self.attendeePrincipal, uid=self.uid)
+            log.debug("Attendee '{attendee}' is not allowed to use SCHEDULE-AGENT=SERVER on organizer: UID:{uid}", attendee=self.attendeeAddress.record, uid=self.uid)
             self.calendar.setParameterToValueForPropertyWithValue("SCHEDULE-AGENT", "NONE", "ORGANIZER", None)
             self.calendar.setParameterToValueForPropertyWithValue("SCHEDULE-STATUS", iTIPRequestStatus.NO_USER_SUPPORT_CODE, "ORGANIZER", None)
             is_server = False
@@ -1585,7 +1577,10 @@
         """
 
         self.organizer_calendar = None
-        calendar_resource = (yield getCalendarObjectForRecord(self.calendar_home.transaction(), self.organizerPrincipal, self.uid))
+        if self.organizerAddress.hosted():
+            calendar_resource = (yield getCalendarObjectForRecord(self.calendar_home.transaction(), self.organizerAddress.record, self.uid))
+        else:
+            calendar_resource = None
         if calendar_resource is not None:
             self.organizer_calendar = (yield calendar_resource.componentForUser())
         elif type(self.organizerAddress) in (OtherServerCalendarUser,):

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/delivery.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/delivery.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/delivery.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -188,12 +188,12 @@
         if not hasattr(self, "otherServers"):
             self.otherServers = {}
 
-        serverURI = recipient.principal.serverURI()
+        serverURI = recipient.record.serverURI()
         if serverURI not in self.otherServers:
             self.otherServers[serverURI] = IScheduleServerRecord(
                 uri=joinURL(serverURI, config.Servers.InboxName),
-                unNormalizeAddresses=not recipient.principal.server().isImplicit,
-                moreHeaders=[recipient.principal.server().secretHeader(), ],
+                unNormalizeAddresses=not recipient.record.server().isImplicit,
+                moreHeaders=[recipient.record.server().secretHeader(), ],
                 podding=True,
             )
 

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/scheduler.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/scheduler.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/scheduler.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -26,8 +26,8 @@
 from twistedcaldav.ical import normalizeCUAddress
 
 from txdav.caldav.datastore.scheduling import addressmapping
-from txdav.caldav.datastore.scheduling.cuaddress import RemoteCalendarUser
-from txdav.caldav.datastore.scheduling.cuaddress import calendarUserFromPrincipal
+from txdav.caldav.datastore.scheduling.cuaddress import LocalCalendarUser, \
+    calendarUserFromCalendarUserAddress, RemoteCalendarUser
 from txdav.caldav.datastore.scheduling.ischedule import xml
 from txdav.caldav.datastore.scheduling.ischedule.dkim import DKIMVerifier, \
     DKIMVerificationError, DKIMMissingError
@@ -226,19 +226,11 @@
         """
 
         # For remote requests we do not allow the originator to be a local user or one within our domain.
-        originatorPrincipal = (yield self.txn.directoryService().recordWithCalendarUserAddress(self.originator))
+        originatorAddress = yield calendarUserFromCalendarUserAddress(self.originator, self.txn)
         localUser = (yield addressmapping.mapper.isCalendarUserInMyDomain(self.originator))
 
-        if (originatorPrincipal or localUser) and not self._podding:
-            log.error("Cannot use originator that is external to this server: %s" % (self.originator,))
-            raise HTTPError(self.errorResponse(
-                responsecode.FORBIDDEN,
-                self.errorElements["originator-denied"],
-                "Originator cannot be external to server",
-            ))
+        if originatorAddress.hosted() or localUser:
 
-        if originatorPrincipal or localUser:
-
             # iSchedule must never deliver for users hosted on the server or any pod
             if not self._podding:
                 log.error("Cannot use originator that is local to this server: %s" % (self.originator,))
@@ -249,7 +241,7 @@
                 ))
 
             # Cannot deliver message for someone hosted on the same pod
-            elif originatorPrincipal.thisServer():
+            elif isinstance(originatorAddress, LocalCalendarUser):
                 log.error("Cannot use originator that is on this server: %s" % (self.originator,))
                 raise HTTPError(self.errorResponse(
                     responsecode.FORBIDDEN,
@@ -257,8 +249,8 @@
                     "Originator cannot be local to server",
                 ))
             else:
-                self.originator = calendarUserFromPrincipal(self.originator, originatorPrincipal)
-                self._validAlternateServer(originatorPrincipal)
+                self.originator = originatorAddress
+                self._validAlternateServer(originatorAddress)
         else:
             if self._podding:
                 log.error("Cannot use originator that is external to this server: %s" % (self.originator,))
@@ -333,13 +325,13 @@
                 ))
 
 
-    def _validAlternateServer(self, principal):
+    def _validAlternateServer(self, cuuser):
         """
         Check the validity of the podded host.
         """
 
         # Extract expected host/port. This will be the serverURI.
-        expected_uri = principal.serverURI()
+        expected_uri = cuuser.record.serverURI()
         expected_uri = urlparse.urlparse(expected_uri)
 
         # Get the request IP and map to hostname.
@@ -395,9 +387,9 @@
         # Verify that the ORGANIZER's cu address does not map to a valid user
         organizer = self.calendar.getOrganizer()
         if organizer:
-            organizerPrincipal = yield self.txn.directoryService().recordWithCalendarUserAddress(organizer)
-            if organizerPrincipal:
-                if organizerPrincipal.thisServer():
+            organizerAddress = yield calendarUserFromCalendarUserAddress(organizer, self.txn)
+            if organizerAddress.hosted():
+                if isinstance(organizerAddress, LocalCalendarUser):
                     log.error("Invalid ORGANIZER in calendar data: %s" % (self.calendar,))
                     raise HTTPError(self.errorResponse(
                         responsecode.FORBIDDEN,
@@ -406,8 +398,8 @@
                     ))
                 else:
                     # Check that the origin server is the correct pod
-                    self.organizer = calendarUserFromPrincipal(organizer, organizerPrincipal)
-                    self._validAlternateServer(self.organizer.principal)
+                    self.organizer = organizerAddress
+                    self._validAlternateServer(self.organizer)
             else:
                 localUser = (yield addressmapping.mapper.isCalendarUserInMyDomain(organizer))
                 if localUser:
@@ -436,9 +428,9 @@
         """
 
         # Attendee cannot be local.
-        attendeePrincipal = yield self.txn.directoryService().recordWithCalendarUserAddress(self.attendee)
-        if attendeePrincipal:
-            if attendeePrincipal.thisServer():
+        attendeeAddress = yield calendarUserFromCalendarUserAddress(self.attendee, self.txn)
+        if attendeeAddress.hosted():
+            if isinstance(attendeeAddress, LocalCalendarUser):
                 log.error("Invalid ATTENDEE in calendar data: %s" % (self.calendar,))
                 raise HTTPError(self.errorResponse(
                     responsecode.FORBIDDEN,
@@ -446,7 +438,7 @@
                     "Local attendee cannot send to this server",
                 ))
             else:
-                self._validAlternateServer(attendeePrincipal)
+                self._validAlternateServer(attendeeAddress)
         else:
             localUser = (yield addressmapping.mapper.isCalendarUserInMyDomain(self.attendee))
             if localUser:

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/processing.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/processing.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/processing.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -29,10 +29,10 @@
 from twistedcaldav.ical import Property
 from twistedcaldav.instance import InvalidOverriddenInstanceError
 
-from txdav.caldav.datastore.scheduling.cuaddress import normalizeCUAddr
 from txdav.caldav.datastore.scheduling.freebusy import generateFreeBusyInfo
 from txdav.caldav.datastore.scheduling.itip import iTipProcessing, iTIPRequestStatus
 from txdav.caldav.datastore.scheduling.utils import getCalendarObjectForRecord
+from txdav.caldav.datastore.scheduling.utils import normalizeCUAddr
 from txdav.caldav.datastore.scheduling.work import ScheduleRefreshWork, \
     ScheduleAutoReplyWork
 from txdav.caldav.icalendarstore import ComponentUpdateState, \
@@ -161,9 +161,9 @@
 
         self.recipient_calendar = None
         self.recipient_calendar_resource = None
-        calendar_resource = (yield getCalendarObjectForRecord(self.txn, self.recipient.principal, self.uid))
+        calendar_resource = (yield getCalendarObjectForRecord(self.txn, self.recipient.record, self.uid))
         if calendar_resource:
-            self.recipient_calendar = (yield calendar_resource.componentForUser(self.recipient.principal.uid))
+            self.recipient_calendar = (yield calendar_resource.componentForUser(self.recipient.record.uid))
             self.recipient_calendar_resource = calendar_resource
 
 
@@ -436,21 +436,21 @@
 
             # Handle auto-reply behavior
             organizer = normalizeCUAddr(self.message.getOrganizer())
-            if (yield self.recipient.principal.canAutoSchedule(organizer=organizer)):
+            if (yield self.recipient.record.canAutoSchedule(organizer=organizer)):
                 # auto schedule mode can depend on who the organizer is
-                mode = yield self.recipient.principal.getAutoScheduleMode(organizer=organizer)
+                mode = yield self.recipient.record.getAutoScheduleMode(organizer=organizer)
                 send_reply, store_inbox, partstat, accounting = (yield self.checkAttendeeAutoReply(new_calendar, mode))
                 if accounting is not None:
                     accounting["action"] = "create"
                     emitAccounting(
                         "AutoScheduling",
-                        self.recipient.principal,
+                        self.recipient.record,
                         json.dumps(accounting) + "\r\n",
                         filename=self.uid.encode("base64")[:-1] + ".txt"
                     )
 
                 # Only store inbox item when reply is not sent or always for users
-                store_inbox = store_inbox or self.recipient.principal.getCUType() == "INDIVIDUAL"
+                store_inbox = store_inbox or self.recipient.record.getCUType() == "INDIVIDUAL"
             else:
                 send_reply = False
                 store_inbox = True
@@ -477,21 +477,21 @@
 
                 # Handle auto-reply behavior
                 organizer = normalizeCUAddr(self.message.getOrganizer())
-                if (yield self.recipient.principal.canAutoSchedule(organizer=organizer)) and not hasattr(self.txn, "doing_attendee_refresh"):
+                if (yield self.recipient.record.canAutoSchedule(organizer=organizer)) and not hasattr(self.txn, "doing_attendee_refresh"):
                     # auto schedule mode can depend on who the organizer is
-                    mode = yield self.recipient.principal.getAutoScheduleMode(organizer=organizer)
+                    mode = yield self.recipient.record.getAutoScheduleMode(organizer=organizer)
                     send_reply, store_inbox, partstat, accounting = (yield self.checkAttendeeAutoReply(new_calendar, mode))
                     if accounting is not None:
                         accounting["action"] = "modify"
                         emitAccounting(
                             "AutoScheduling",
-                            self.recipient.principal,
+                            self.recipient.record,
                             json.dumps(accounting) + "\r\n",
                             filename=self.uid.encode("base64")[:-1] + ".txt"
                         )
 
                     # Only store inbox item when reply is not sent or always for users
-                    store_inbox = store_inbox or self.recipient.principal.getCUType() == "INDIVIDUAL"
+                    store_inbox = store_inbox or self.recipient.record.getCUType() == "INDIVIDUAL"
                 else:
                     send_reply = False
                     store_inbox = True
@@ -566,13 +566,13 @@
             # inbox item on them even if auto-schedule is true so that they get a notification
             # of the cancel.
             organizer = normalizeCUAddr(self.message.getOrganizer())
-            autoprocessed = yield self.recipient.principal.canAutoSchedule(organizer=organizer)
-            store_inbox = not autoprocessed or self.recipient.principal.getCUType() == "INDIVIDUAL"
+            autoprocessed = yield self.recipient.record.canAutoSchedule(organizer=organizer)
+            store_inbox = not autoprocessed or self.recipient.record.getCUType() == "INDIVIDUAL"
 
             # Check to see if this is a cancel of the entire event
             processed_message, delete_original, rids = iTipProcessing.processCancel(self.message, self.recipient_calendar, autoprocessing=autoprocessed)
             if processed_message:
-                if autoprocessed and accountingEnabled("AutoScheduling", self.recipient.principal):
+                if autoprocessed and accountingEnabled("AutoScheduling", self.recipient.record):
                     accounting = {
                         "action": "cancel",
                         "when": DateTime.getNowUTC().getText(),
@@ -580,7 +580,7 @@
                     }
                     emitAccounting(
                         "AutoScheduling",
-                        self.recipient.principal,
+                        self.recipient.record,
                         json.dumps(accounting) + "\r\n",
                         filename=self.uid.encode("base64")[:-1] + ".txt"
                     )
@@ -643,7 +643,7 @@
         @return: C{tuple} of C{bool}, C{bool}, C{str} indicating whether changes were made, whether the inbox item
             should be added, and the new PARTSTAT.
         """
-        if accountingEnabled("AutoScheduling", self.recipient.principal):
+        if accountingEnabled("AutoScheduling", self.recipient.record):
             accounting = {
                 "when": DateTime.getNowUTC().getText(),
                 "automode": automode,
@@ -670,7 +670,7 @@
 
         log.debug("ImplicitProcessing - recipient '%s' processing UID: '%s' - checking for auto-reply with mode: %s" % (self.recipient.cuaddr, self.uid, automode.name,))
 
-        cuas = self.recipient.principal.calendarUserAddresses
+        cuas = self.recipient.record.calendarUserAddresses
 
         # First expand current one to get instances (only go 1 year into the future)
         default_future_expansion_duration = Duration(days=config.Scheduling.Options.AutoSchedule.FutureFreeBusyDays)
@@ -979,20 +979,20 @@
             raise ImplicitProcessorException("5.1;Service unavailable")
 
         # Check to see whether the originator is hosted on this server
-        if not self.originator.principal:
+        if not self.originator.record:
             raise ImplicitProcessorException("5.1;Service unavailable")
 
         # Locate the originator's copy of the event
-        calendar_resource = (yield getCalendarObjectForRecord(self.txn, self.originator.principal, self.uid))
+        calendar_resource = (yield getCalendarObjectForRecord(self.txn, self.originator.record, self.uid))
         if calendar_resource is None:
             raise ImplicitProcessorException("5.1;Service unavailable")
-        originator_calendar = (yield calendar_resource.componentForUser(self.originator.principal.uid))
+        originator_calendar = (yield calendar_resource.componentForUser(self.originator.record.uid))
 
         # Get attendee's view of that
         originator_calendar.attendeesView((self.recipient.cuaddr,))
 
         # Locate the attendee's copy of the event if it exists.
-        recipient_resource = (yield getCalendarObjectForRecord(self.txn, self.recipient.principal, self.uid))
+        recipient_resource = (yield getCalendarObjectForRecord(self.txn, self.recipient.record, self.uid))
 
         # We only need to fix data that already exists
         if recipient_resource is not None:

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/scheduler.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/scheduler.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/scheduler.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -33,11 +33,11 @@
 from twistedcaldav.ical import Component
 from txdav.caldav.datastore.scheduling import addressmapping
 from txdav.caldav.datastore.scheduling.caldav.delivery import ScheduleViaCalDAV
+from txdav.caldav.datastore.scheduling.cuaddress import EmailCalendarUser
 from txdav.caldav.datastore.scheduling.cuaddress import InvalidCalendarUser, \
-    calendarUserFromPrincipal, OtherServerCalendarUser
+    OtherServerCalendarUser, calendarUserFromCalendarUserAddress
 from txdav.caldav.datastore.scheduling.cuaddress import LocalCalendarUser
 from txdav.caldav.datastore.scheduling.cuaddress import RemoteCalendarUser
-from txdav.caldav.datastore.scheduling.cuaddress import EmailCalendarUser
 from txdav.caldav.datastore.scheduling.imip.delivery import ScheduleViaIMip
 from txdav.caldav.datastore.scheduling.ischedule.delivery import ScheduleViaISchedule
 from txdav.caldav.datastore.scheduling.itip import iTIPRequestStatus
@@ -402,10 +402,10 @@
         #
         if isinstance(self.organizer, LocalCalendarUser):
             accountingType = "iTIP-VFREEBUSY" if self.calendar.mainType() == "VFREEBUSY" else "iTIP"
-            if accountingEnabled(accountingType, self.organizer.principal):
+            if accountingEnabled(accountingType, self.organizer.record):
                 emitAccounting(
                     accountingType,
-                    self.organizer.principal,
+                    self.organizer.record,
                     "Originator: %s\nRecipients:\n%sMethod:%s\n\n%s"
                     % (
                         str(self.originator),
@@ -550,34 +550,35 @@
 
         results = []
         for recipient in self.recipients:
-            # Get the principal resource for this recipient
-            principal = yield self.txn.directoryService().recordWithCalendarUserAddress(recipient)
+            # Get the calendar user object for this recipient
+            recipientAddress = yield calendarUserFromCalendarUserAddress(recipient, self.txn)
 
-            # If no principal we may have a remote recipient but we should check whether
+            # If no calendar user we may have a remote recipient but we should check whether
             # the address is one that ought to be on our server and treat that as a missing
             # user. Also if server-to-server is not enabled then remote addresses are not allowed.
-            if principal is None:
+            if not recipientAddress.hosted():
                 localUser = (yield addressmapping.mapper.isCalendarUserInMyDomain(recipient))
                 if localUser:
-                    log.error("No principal for calendar user address: %s" % (recipient,))
+                    log.error("No record for calendar user address: %s" % (recipient,))
                 else:
                     log.error("Unknown calendar user address: %s" % (recipient,))
                 results.append(InvalidCalendarUser(recipient))
             else:
-                # Map recipient to their inbox
+                # Map recipient to their inbox and cache on calendar user object
                 inbox = None
-                if principal.calendarsEnabled():
-                    if principal.thisServer():
-                        recipient_home = yield self.txn.calendarHomeWithUID(principal.uid, create=True)
+                if recipientAddress.validRecipient():
+                    if isinstance(recipientAddress, LocalCalendarUser):
+                        recipient_home = yield self.txn.calendarHomeWithUID(recipientAddress.record.uid, create=True)
                         if recipient_home:
                             inbox = (yield recipient_home.calendarWithName("inbox"))
                     else:
                         inbox = "dummy"
+                    recipientAddress.inbox = inbox
 
                 if inbox:
-                    results.append(calendarUserFromPrincipal(recipient, principal, inbox))
+                    results.append(recipientAddress)
                 else:
-                    log.error("No schedule inbox for principal: %s" % (principal,))
+                    log.error("No scheduling for calendar user: %s" % (recipient,))
                     results.append(InvalidCalendarUser(recipient))
 
         self.recipients = results

Added: CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_cuaddress.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_cuaddress.py	                        (rev 0)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_cuaddress.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -0,0 +1,103 @@
+##
+# Copyright (c) 2013-2014 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+"""
+Tests for txdav.caldav.datastore.cuaddress
+"""
+
+from twisted.internet.defer import inlineCallbacks
+from twisted.trial import unittest
+
+from txdav.caldav.datastore.scheduling.cuaddress import calendarUserFromCalendarUserAddress, \
+    LocalCalendarUser, InvalidCalendarUser
+from txdav.caldav.datastore.test.util import buildCalendarStore
+from txdav.common.datastore.test.util import populateCalendarsFrom, CommonCommonTests
+
+
+class CalendarUser(CommonCommonTests, unittest.TestCase):
+    """
+    Tests for deleting events older than a given date
+    """
+
+    requirements = {
+        "user01" : {
+            "calendar1" : {},
+            "inbox" : {},
+        },
+        "user02" : {
+            "calendar2" : {},
+            "inbox" : {},
+        },
+        "user03" : {
+            "calendar3" : {},
+            "inbox" : {},
+        }
+    }
+
+    @inlineCallbacks
+    def setUp(self):
+
+        yield super(CalendarUser, self).setUp()
+        self._sqlCalendarStore = yield buildCalendarStore(self, self.notifierFactory)
+        self.directory = self._sqlCalendarStore.directoryService()
+        yield self.populate()
+        self.directory.removeRecord("user03")
+
+
+    @inlineCallbacks
+    def populate(self):
+        yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
+        self.notifierFactory.reset()
+
+
+    def storeUnderTest(self):
+        """
+        Create and return a L{CalendarStore} for testing.
+        """
+        return self._sqlCalendarStore
+
+
+    @inlineCallbacks
+    def test_lookup(self):
+        """
+        Test that L{CalendarUser.hosted} returns the expected results.
+        """
+
+        txn = self.transactionUnderTest()
+        cu = yield calendarUserFromCalendarUserAddress("urn:x-uid:user01", txn)
+        yield self.commit()
+
+        self.assertTrue(isinstance(cu, LocalCalendarUser))
+        self.assertTrue(cu.hosted())
+        self.assertTrue(cu.validOriginator())
+        self.assertTrue(cu.validRecipient())
+
+        txn = self.transactionUnderTest()
+        cu = yield calendarUserFromCalendarUserAddress("mailto:foobar at example.org", txn)
+        yield self.commit()
+
+        self.assertTrue(isinstance(cu, InvalidCalendarUser))
+        self.assertFalse(cu.hosted())
+        self.assertFalse(cu.validOriginator())
+        self.assertFalse(cu.validRecipient())
+
+        txn = self.transactionUnderTest()
+        cu = yield calendarUserFromCalendarUserAddress("urn:x-uid:user03", txn)
+        yield self.commit()
+
+        self.assertTrue(isinstance(cu, LocalCalendarUser))
+        self.assertTrue(cu.hosted())
+        self.assertTrue(cu.validOriginator())
+        self.assertFalse(cu.validRecipient())

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_implicit.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_implicit.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_implicit.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -30,6 +30,7 @@
 from twistedcaldav.ical import Component
 from twistedcaldav.timezones import TimezoneCache
 
+from txdav.caldav.datastore.scheduling.cuaddress import LocalCalendarUser
 from txdav.caldav.datastore.scheduling.implicit import ImplicitScheduler, \
     ScheduleReplyWork
 from txdav.caldav.datastore.scheduling.scheduler import ScheduleResponseQueue
@@ -81,6 +82,15 @@
 
 
 
+class FakeTxn(object):
+
+    def directoryService(self):
+        if not hasattr(self, "_directoryService"):
+            self._directoryService = FakeDirectoryService()
+        return self._directoryService
+
+
+
 class Implicit(TestCase):
     """
     iCalendar support tests
@@ -817,6 +827,7 @@
             scheduler.calendar = Component.fromString(calendar2)
 
             scheduler.calendar_home = FakeCalendarHome("user01")
+            scheduler.txn = FakeTxn()
 
             yield scheduler.extractCalendarData()
             scheduler.findRemovedAttendees()
@@ -864,10 +875,14 @@
             scheduler.reinvites = None
 
             scheduler.calendar_home = FakeCalendarHome("user1")
+            scheduler.txn = FakeTxn()
 
             # Get some useful information from the calendar
             yield scheduler.extractCalendarData()
-            scheduler.organizerPrincipal = buildDirectoryRecord(scheduler.calendar_home.uid())
+            scheduler.organizerAddress = LocalCalendarUser(
+                "mailto:user1 at example.com",
+                buildDirectoryRecord(scheduler.calendar_home.uid()),
+            )
 
             recipients = []
 

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_utils.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_utils.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_utils.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -15,7 +15,7 @@
 ##
 
 """
-Tests for calendarserver.tools.purge
+Tests for txdav.caldav.datastore.utils
 """
 
 from pycalendar.datetime import DateTime
@@ -24,7 +24,7 @@
 from twisted.trial import unittest
 
 from txdav.caldav.datastore.scheduling.utils import getCalendarObjectForRecord, \
-    extractEmailDomain
+    extractEmailDomain, uidFromCalendarUserAddress
 from txdav.caldav.datastore.test.util import buildCalendarStore, \
     buildDirectoryRecord
 from txdav.common.datastore.test.util import populateCalendarsFrom, CommonCommonTests
@@ -190,6 +190,25 @@
         yield self.commit()
 
 
+    def test_uidFromCalendarUserAddress(self):
+        """
+        Test that L{uidFromCalendarUserAddress} returns the expected results.
+        """
+
+        data = (
+            ("urn:x-uid:foobar", "foobar"),
+            ("urn:uuid:foobar", "foobar"),
+            ("urn:uuid:49DE7436-F01C-4AD8-B685-A94303F40301", "49DE7436-F01C-4AD8-B685-A94303F40301"),
+            ("/principals/__uids__/foobar", "foobar"),
+            ("/principals/users/foobar", None),
+            ("/principals/groups/foobar", None),
+            ("mailto:foo at example.com", None),
+        )
+
+        for cuaddr, uid in data:
+            self.assertEqual(uidFromCalendarUserAddress(cuaddr), uid)
+
+
     def test_extractEmailDomain(self):
         """
         Test that L{extractEmailDomain} returns the expected results.

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/utils.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/utils.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/utils.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -17,6 +17,7 @@
 from twisted.internet.defer import inlineCallbacks, returnValue
 from twext.python.log import Logger
 from txdav.caldav.icalendarstore import ComponentRemoveState
+from uuid import UUID
 
 log = Logger()
 
@@ -51,6 +52,60 @@
 
 
 
+def normalizeCUAddr(addr):
+    """
+    Normalize a cuaddr string by lower()ing it if it's a mailto:, or
+    removing trailing slash if it's a URL.
+    @param addr: a cuaddr string to normalize
+    @return: normalized string
+    """
+    lower = addr.lower()
+    if lower.startswith("mailto:"):
+        addr = lower
+    if (addr.startswith("/") or
+        addr.startswith("http:") or
+        addr.startswith("https:")):
+        return addr.rstrip("/")
+    else:
+        return addr
+
+
+
+def uidFromCalendarUserAddress(address):
+    """
+    Try to extract a record UID from a calendar user address of the appropriate format.
+    Allowed formats are urn:x-uid, urn:uuid, or /principals/(__uids__).
+
+    @param address: calendar user address to operate on
+    @type address: L{str}
+
+    @return: the extracted uid or L{None}
+    @rtype: L{str} or L{None}
+    """
+
+    address = normalizeCUAddr(address)
+
+    if address.startswith("urn:x-uid:"):
+        return address[10:]
+
+    elif address.startswith("urn:uuid:"):
+        try:
+            UUID(address[9:])
+        except ValueError:
+            log.info("Invalid GUID: {guid}", guid=address[9:])
+            return address[9:]
+        else:
+            return address[9:]
+
+    elif address.startswith("/principals/__uids__"):
+        parts = address.split("/")
+        if len(parts) == 4:
+            return parts[3]
+
+    return None
+
+
+
 def extractEmailDomain(mailtoURI):
     try:
         addr = mailtoURI[7:].split("?")[0]

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/work.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/work.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/work.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -25,14 +25,16 @@
 from twistedcaldav.config import config
 from twistedcaldav.ical import Component
 
+from txdav.caldav.datastore.scheduling.cuaddress import calendarUserFromCalendarUserUID
 from txdav.caldav.datastore.scheduling.itip import iTipGenerator, iTIPRequestStatus
 from txdav.caldav.icalendarstore import ComponentUpdateState
 from txdav.common.datastore.sql_tables import schema, \
     scheduleActionToSQL, scheduleActionFromSQL
 
+from pycalendar.datetime import DateTime
+
 import datetime
 import hashlib
-from pycalendar.datetime import DateTime
 import traceback
 
 __all__ = [
@@ -191,8 +193,8 @@
         try:
             home = (yield self.transaction.calendarHomeWithResourceID(self.homeResourceID))
             resource = (yield home.objectResourceWithID(self.resourceID))
-            organizerPrincipal = yield home.directoryService().recordWithUID(home.uid().decode("utf-8"))
-            organizer = organizerPrincipal.canonicalCalendarUserAddress()
+            organizerAddress = yield calendarUserFromCalendarUserUID(home.uid(), self.transaction)
+            organizer = organizerAddress.record.canonicalCalendarUserAddress()
             calendar_old = Component.fromString(self.icalendarTextOld) if self.icalendarTextOld else None
             calendar_new = Component.fromString(self.icalendarTextNew) if self.icalendarTextNew else None
 
@@ -313,8 +315,8 @@
         try:
             home = (yield self.transaction.calendarHomeWithResourceID(self.homeResourceID))
             resource = (yield home.objectResourceWithID(self.resourceID))
-            attendeePrincipal = yield home.directoryService().recordWithUID(home.uid().decode("utf-8"))
-            attendee = attendeePrincipal.canonicalCalendarUserAddress()
+            attendeeAddress = yield calendarUserFromCalendarUserUID(home.uid(), self.transaction)
+            attendee = attendeeAddress.record.canonicalCalendarUserAddress()
             calendar = (yield resource.componentForUser())
             organizer = calendar.validOrganizerForScheduling()
 
@@ -384,8 +386,8 @@
 
         try:
             home = (yield self.transaction.calendarHomeWithResourceID(self.homeResourceID))
-            attendeePrincipal = yield home.directoryService().recordWithUID(home.uid().decode("utf-8"))
-            attendee = attendeePrincipal.canonicalCalendarUserAddress()
+            attendeeAddress = yield calendarUserFromCalendarUserUID(home.uid(), self.transaction)
+            attendee = attendeeAddress.record.canonicalCalendarUserAddress()
             calendar = Component.fromString(self.icalendarText)
             organizer = calendar.validOrganizerForScheduling()
 

Modified: CalendarServer/trunk/txdav/caldav/datastore/sql.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/sql.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/caldav/datastore/sql.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -61,8 +61,10 @@
 from txdav.caldav.datastore.query.builder import buildExpression
 from txdav.caldav.datastore.query.filter import Filter
 from txdav.caldav.datastore.query.generator import CalDAVSQLQueryGenerator
+from txdav.caldav.datastore.scheduling.cuaddress import calendarUserFromCalendarUserAddress
 from txdav.caldav.datastore.scheduling.icalsplitter import iCalSplitter
 from txdav.caldav.datastore.scheduling.implicit import ImplicitScheduler
+from txdav.caldav.datastore.scheduling.utils import uidFromCalendarUserAddress
 from txdav.caldav.datastore.util import AttachmentRetrievalTransport, \
     normalizationLookup
 from txdav.caldav.datastore.util import CalendarObjectBase
@@ -1993,8 +1995,6 @@
             if groupRecord is not None:
                 groupUID = groupRecord.uid
             else:
-                #FIXME:  here to avoid circular import
-                from txdav.who.util import uidFromCalendarUserAddress
                 groupUID = uidFromCalendarUserAddress(groupCUA)
             groupID, _ignore_name, membershipHash, _ignore_modDate = yield self._txn.groupByUID(groupUID)
 
@@ -3931,10 +3931,9 @@
             raise InvalidSplit()
 
         # Cannot be attendee
-        ownerPrincipal = yield self.calendar().ownerHome().directoryRecord()
         organizer = component.getOrganizer()
-        organizerPrincipal = (yield self.directoryService().recordWithCalendarUserAddress(organizer)) if organizer else None
-        if organizer is not None and organizerPrincipal.uid != ownerPrincipal.uid:
+        organizerAddress = (yield calendarUserFromCalendarUserAddress(organizer, self._txn)) if organizer else None
+        if organizer is not None and organizerAddress.record.uid != self.calendar().ownerHome().uid():
             raise InvalidSplit()
 
         # Determine valid split point

Modified: CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -146,6 +146,27 @@
 
 
     @inlineCallbacks
+    def test_purgingHome(self):
+        """
+        Purging a calendar homes changes its status.
+        """
+
+        home = yield self.homeUnderTest(name="home1")
+        self.assertFalse(home.purging())
+        self.assertFalse(home.external())
+
+        yield home.purge()
+        self.assertTrue(home.purging())
+        self.assertFalse(home.external())
+
+        yield self.commit()
+
+        home = yield self.homeUnderTest(name="home1")
+        self.assertTrue(home is not None)
+        yield self.commit()
+
+
+    @inlineCallbacks
     def test_migrateCalendarFromFile(self):
         """
         C{_migrateCalendar()} can migrate a file-backed calendar to a database-

Modified: CalendarServer/trunk/txdav/caldav/datastore/test/util.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/test/util.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/caldav/datastore/test/util.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -49,7 +49,14 @@
             self.recordsByCUA[cuaddr] = record
 
 
+    def removeRecord(self, uid):
+        record = self.records[uid]
+        del self.records[uid]
+        for cuaddr in record.calendarUserAddresses:
+            del self.recordsByCUA[cuaddr]
 
+
+
 class TestCalendarStoreDirectoryRecord(TestStoreDirectoryRecord):
 
     implements(ICalendarStoreDirectoryRecord)

Modified: CalendarServer/trunk/txdav/carddav/datastore/sql.py
===================================================================
--- CalendarServer/trunk/txdav/carddav/datastore/sql.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/carddav/datastore/sql.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -246,7 +246,7 @@
         Get the owner home for a shared child ID
         """
         # addressbook and home have same resourceID
-        ownerHome = yield self._txn.homeWithResourceID(self._homeType, resourceID, create=True)
+        ownerHome = yield self._txn.homeWithResourceID(self._homeType, resourceID)
         returnValue(ownerHome)
 
 
@@ -1120,7 +1120,7 @@
         for groupRow in groupRows:
             bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = groupRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
             ownerAddressBookID = yield AddressBookObject.ownerAddressBookIDFromGroupID(home._txn, resourceID)
-            ownerHome = yield home._txn.homeWithResourceID(home._homeType, ownerAddressBookID, create=True)
+            ownerHome = yield home._txn.homeWithResourceID(home._homeType, ownerAddressBookID)
             names |= set([ownerHome.uid()])
 
         returnValue(tuple(names))

Modified: CalendarServer/trunk/txdav/common/datastore/sql.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/common/datastore/sql.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -64,7 +64,8 @@
 from txdav.common.datastore.sql_tables import _BIND_MODE_OWN, \
     _BIND_STATUS_ACCEPTED, _BIND_STATUS_DECLINED, _BIND_STATUS_INVALID, \
     _BIND_STATUS_INVITED, _BIND_MODE_DIRECT, _BIND_STATUS_DELETED, \
-    _BIND_MODE_INDIRECT, _HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL
+    _BIND_MODE_INDIRECT, _HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL, \
+    _HOME_STATUS_PURGING
 from txdav.common.datastore.sql_tables import schema, splitSQLString
 from txdav.common.icommondatastore import ConcurrentModification, \
     RecordNotAllowedError, ExternalShareFailed, ShareNotAllowed, \
@@ -208,6 +209,13 @@
         __import__("txdav.carddav.datastore.sql")
 
 
+    def availablePrimaryStoreTypes(self):
+        """
+        The list of store home types supported.
+        """
+        return (ECALENDARTYPE, EADDRESSBOOKTYPE,)
+
+
     def directoryService(self):
         return self._directoryService
 
@@ -365,7 +373,37 @@
         return self.queryCacher is not None
 
 
+    @inlineCallbacks
+    def uidInStore(self, txn, uid):
+        """
+        Indicate whether the specified user UID is hosted in the current store, or
+        possibly in another pod.
 
+        @param txn: transaction to use
+        @type txn: L{CommonStoreTransaction}
+        @param uid: the user UID to test
+        @type uid: L{str}
+
+        @return: a tuple of L{bool}, L{str} - the first indicates whether the user is
+            hosted, the second the serviceNodeUID of the pod hosting the user or
+            C{None} if on this pod.
+        @rtype: L{tuple}
+        """
+
+        # Check if locally stored first
+        for storeType in self.availablePrimaryStoreTypes():
+            home = yield txn.homeWithUID(storeType, uid)
+            if home is not None:
+                if home.external():
+                    # TODO: locate the pod where the user is hosted
+                    returnValue((True, "unknown",))
+                else:
+                    returnValue((True, None,))
+        else:
+            returnValue((False, None,))
+
+
+
 class TransactionStatsCollector(object):
     """
     Used to log each SQL query and statistics about that query during the course of a single transaction.
@@ -698,14 +736,14 @@
 
 
     @inlineCallbacks
-    def homeWithResourceID(self, storeType, rid, create=False):
+    def homeWithResourceID(self, storeType, rid):
         """
         Load a calendar or addressbook home by its integer resource ID.
         """
         uid = (yield self._homeClass[storeType]
                .homeUIDWithResourceID(self, rid))
         if uid:
-            result = (yield self.homeWithUID(storeType, uid, create))
+            result = (yield self.homeWithUID(storeType, uid))
         else:
             result = None
         returnValue(result)
@@ -3108,9 +3146,33 @@
 
         @return: a string.
         """
-        return False
+        return self._status == _HOME_STATUS_EXTERNAL
 
 
+    def purging(self):
+        """
+        Is this an external home.
+
+        @return: a string.
+        """
+        return self._status == _HOME_STATUS_PURGING
+
+
+    @inlineCallbacks
+    def purge(self):
+        """
+        Mark this home as being purged.
+        """
+        # Only if normal
+        if self._status == _HOME_STATUS_NORMAL:
+            yield Update(
+                {self._homeSchema.STATUS: _HOME_STATUS_PURGING},
+                Where=(self._homeSchema.RESOURCE_ID == self._resourceID),
+            ).on(self._txn)
+            self._status = _HOME_STATUS_PURGING
+            yield self._cacher.delete(self._ownerUID)
+
+
     def transaction(self):
         return self._txn
 

Modified: CalendarServer/trunk/txdav/common/datastore/sql_schema/current-oracle-dialect.sql
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql_schema/current-oracle-dialect.sql	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/current-oracle-dialect.sql	2014-05-02 20:07:26 UTC (rev 13408)
@@ -39,6 +39,7 @@
 
 insert into HOME_STATUS (DESCRIPTION, ID) values ('normal', 0);
 insert into HOME_STATUS (DESCRIPTION, ID) values ('external', 1);
+insert into HOME_STATUS (DESCRIPTION, ID) values ('purging', 2);
 create table CALENDAR (
     "RESOURCE_ID" integer primary key
 );
@@ -555,12 +556,18 @@
     "UID" nvarchar2(255)
 );
 
+create table PRINCIPAL_PURGE_HOME_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB,
+    "HOME_RESOURCE_ID" integer not null references CALENDAR_HOME on delete cascade
+);
+
 create table CALENDARSERVER (
     "NAME" nvarchar2(255) primary key,
     "VALUE" nvarchar2(255)
 );
 
-insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '40');
+insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '41');
 insert into CALENDARSERVER (NAME, VALUE) values ('CALENDAR-DATAVERSION', '6');
 insert into CALENDARSERVER (NAME, VALUE) values ('ADDRESSBOOK-DATAVERSION', '2');
 insert into CALENDARSERVER (NAME, VALUE) values ('NOTIFICATION-DATAVERSION', '1');
@@ -841,6 +848,14 @@
     JOB_ID
 );
 
+create index PRINCIPAL_PURGE_HOME__f35eea7a on PRINCIPAL_PURGE_HOME_WORK (
+    JOB_ID
+);
+
+create index PRINCIPAL_PURGE_HOME__967e4480 on PRINCIPAL_PURGE_HOME_WORK (
+    HOME_RESOURCE_ID
+);
+
 -- Skipped Function next_job
 
 -- Extras

Modified: CalendarServer/trunk/txdav/common/datastore/sql_schema/current.sql
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql_schema/current.sql	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/current.sql	2014-05-02 20:07:26 UTC (rev 13408)
@@ -91,6 +91,7 @@
 
 insert into HOME_STATUS values (0, 'normal' );
 insert into HOME_STATUS values (1, 'external');
+insert into HOME_STATUS values (2, 'purging');
 
 
 --------------
@@ -1055,6 +1056,22 @@
   PRINCIPAL_PURGE_WORK(JOB_ID);
 
 
+--------------------------------
+-- Principal Home Remove Work --
+--------------------------------
+
+create table PRINCIPAL_PURGE_HOME_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  HOME_RESOURCE_ID              integer      not null references CALENDAR_HOME on delete cascade
+);
+
+create index PRINCIPAL_PURGE_HOME_WORK_JOB_ID on
+  PRINCIPAL_PURGE_HOME_WORK(JOB_ID);
+create index PRINCIPAL_PURGE_HOME_HOME_RESOURCE_ID on
+  PRINCIPAL_PURGE_HOME_WORK(HOME_RESOURCE_ID);
+
+
 --------------------
 -- Schema Version --
 --------------------
@@ -1064,7 +1081,7 @@
   VALUE                         varchar(255)
 );
 
-insert into CALENDARSERVER values ('VERSION', '40');
+insert into CALENDARSERVER values ('VERSION', '41');
 insert into CALENDARSERVER values ('CALENDAR-DATAVERSION', '6');
 insert into CALENDARSERVER values ('ADDRESSBOOK-DATAVERSION', '2');
 insert into CALENDARSERVER values ('NOTIFICATION-DATAVERSION', '1');

Added: CalendarServer/trunk/txdav/common/datastore/sql_schema/old/oracle-dialect/v40.sql
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql_schema/old/oracle-dialect/v40.sql	                        (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/old/oracle-dialect/v40.sql	2014-05-02 20:07:26 UTC (rev 13408)
@@ -0,0 +1,858 @@
+create sequence RESOURCE_ID_SEQ;
+create sequence JOB_SEQ;
+create sequence INSTANCE_ID_SEQ;
+create sequence ATTACHMENT_ID_SEQ;
+create sequence REVISION_SEQ;
+create sequence WORKITEM_SEQ;
+create table NODE_INFO (
+    "HOSTNAME" nvarchar2(255),
+    "PID" integer not null,
+    "PORT" integer not null,
+    "TIME" timestamp default CURRENT_TIMESTAMP at time zone 'UTC' not null, 
+    primary key ("HOSTNAME", "PORT")
+);
+
+create table NAMED_LOCK (
+    "LOCK_NAME" nvarchar2(255) primary key
+);
+
+create table JOB (
+    "JOB_ID" integer primary key not null,
+    "WORK_TYPE" nvarchar2(255),
+    "PRIORITY" integer default 0,
+    "WEIGHT" integer default 0,
+    "NOT_BEFORE" timestamp default null,
+    "NOT_AFTER" timestamp default null
+);
+
+create table CALENDAR_HOME (
+    "RESOURCE_ID" integer primary key,
+    "OWNER_UID" nvarchar2(255) unique,
+    "STATUS" integer default 0 not null,
+    "DATAVERSION" integer default 0 not null
+);
+
+create table HOME_STATUS (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into HOME_STATUS (DESCRIPTION, ID) values ('normal', 0);
+insert into HOME_STATUS (DESCRIPTION, ID) values ('external', 1);
+create table CALENDAR (
+    "RESOURCE_ID" integer primary key
+);
+
+create table CALENDAR_HOME_METADATA (
+    "RESOURCE_ID" integer primary key references CALENDAR_HOME on delete cascade,
+    "QUOTA_USED_BYTES" integer default 0 not null,
+    "DEFAULT_EVENTS" integer default null references CALENDAR on delete set null,
+    "DEFAULT_TASKS" integer default null references CALENDAR on delete set null,
+    "DEFAULT_POLLS" integer default null references CALENDAR on delete set null,
+    "ALARM_VEVENT_TIMED" nclob default null,
+    "ALARM_VEVENT_ALLDAY" nclob default null,
+    "ALARM_VTODO_TIMED" nclob default null,
+    "ALARM_VTODO_ALLDAY" nclob default null,
+    "AVAILABILITY" nclob default null,
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table CALENDAR_METADATA (
+    "RESOURCE_ID" integer primary key references CALENDAR on delete cascade,
+    "SUPPORTED_COMPONENTS" nvarchar2(255) default null,
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table NOTIFICATION_HOME (
+    "RESOURCE_ID" integer primary key,
+    "OWNER_UID" nvarchar2(255) unique,
+    "STATUS" integer default 0 not null,
+    "DATAVERSION" integer default 0 not null
+);
+
+create table NOTIFICATION (
+    "RESOURCE_ID" integer primary key,
+    "NOTIFICATION_HOME_RESOURCE_ID" integer not null references NOTIFICATION_HOME,
+    "NOTIFICATION_UID" nvarchar2(255),
+    "NOTIFICATION_TYPE" nvarchar2(255),
+    "NOTIFICATION_DATA" nclob,
+    "MD5" nchar(32),
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    unique ("NOTIFICATION_UID", "NOTIFICATION_HOME_RESOURCE_ID")
+);
+
+create table CALENDAR_BIND (
+    "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+    "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+    "EXTERNAL_ID" integer default null,
+    "CALENDAR_RESOURCE_NAME" nvarchar2(255),
+    "BIND_MODE" integer not null,
+    "BIND_STATUS" integer not null,
+    "BIND_REVISION" integer default 0 not null,
+    "MESSAGE" nclob,
+    "TRANSP" integer default 0 not null,
+    "ALARM_VEVENT_TIMED" nclob default null,
+    "ALARM_VEVENT_ALLDAY" nclob default null,
+    "ALARM_VTODO_TIMED" nclob default null,
+    "ALARM_VTODO_ALLDAY" nclob default null,
+    "TIMEZONE" nclob default null, 
+    primary key ("CALENDAR_HOME_RESOURCE_ID", "CALENDAR_RESOURCE_ID"), 
+    unique ("CALENDAR_HOME_RESOURCE_ID", "CALENDAR_RESOURCE_NAME")
+);
+
+create table CALENDAR_BIND_MODE (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('own', 0);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('read', 1);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('write', 2);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('direct', 3);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('indirect', 4);
+create table CALENDAR_BIND_STATUS (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('invited', 0);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('accepted', 1);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('declined', 2);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('invalid', 3);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('deleted', 4);
+create table CALENDAR_TRANSP (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_TRANSP (DESCRIPTION, ID) values ('opaque', 0);
+insert into CALENDAR_TRANSP (DESCRIPTION, ID) values ('transparent', 1);
+create table CALENDAR_OBJECT (
+    "RESOURCE_ID" integer primary key,
+    "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+    "RESOURCE_NAME" nvarchar2(255),
+    "ICALENDAR_TEXT" nclob,
+    "ICALENDAR_UID" nvarchar2(255),
+    "ICALENDAR_TYPE" nvarchar2(255),
+    "ATTACHMENTS_MODE" integer default 0 not null,
+    "DROPBOX_ID" nvarchar2(255),
+    "ORGANIZER" nvarchar2(255),
+    "RECURRANCE_MIN" date,
+    "RECURRANCE_MAX" date,
+    "ACCESS" integer default 0 not null,
+    "SCHEDULE_OBJECT" integer default 0,
+    "SCHEDULE_TAG" nvarchar2(36) default null,
+    "SCHEDULE_ETAGS" nclob default null,
+    "PRIVATE_COMMENTS" integer default 0 not null,
+    "MD5" nchar(32),
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    unique ("CALENDAR_RESOURCE_ID", "RESOURCE_NAME")
+);
+
+create table CALENDAR_OBJ_ATTACHMENTS_MODE (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_OBJ_ATTACHMENTS_MODE (DESCRIPTION, ID) values ('none', 0);
+insert into CALENDAR_OBJ_ATTACHMENTS_MODE (DESCRIPTION, ID) values ('read', 1);
+insert into CALENDAR_OBJ_ATTACHMENTS_MODE (DESCRIPTION, ID) values ('write', 2);
+create table CALENDAR_ACCESS_TYPE (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(32) unique
+);
+
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('', 0);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('public', 1);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('private', 2);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('confidential', 3);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('restricted', 4);
+create table TIME_RANGE (
+    "INSTANCE_ID" integer primary key,
+    "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+    "CALENDAR_OBJECT_RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+    "FLOATING" integer not null,
+    "START_DATE" timestamp not null,
+    "END_DATE" timestamp not null,
+    "FBTYPE" integer not null,
+    "TRANSPARENT" integer not null
+);
+
+create table FREE_BUSY_TYPE (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('unknown', 0);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('free', 1);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy', 2);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy-unavailable', 3);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy-tentative', 4);
+create table PERUSER (
+    "TIME_RANGE_INSTANCE_ID" integer not null references TIME_RANGE on delete cascade,
+    "USER_ID" nvarchar2(255),
+    "TRANSPARENT" integer not null,
+    "ADJUSTED_START_DATE" timestamp default null,
+    "ADJUSTED_END_DATE" timestamp default null
+);
+
+create table ATTACHMENT (
+    "ATTACHMENT_ID" integer primary key,
+    "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+    "DROPBOX_ID" nvarchar2(255),
+    "CONTENT_TYPE" nvarchar2(255),
+    "SIZE" integer not null,
+    "MD5" nchar(32),
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "PATH" nvarchar2(1024)
+);
+
+create table ATTACHMENT_CALENDAR_OBJECT (
+    "ATTACHMENT_ID" integer not null references ATTACHMENT on delete cascade,
+    "MANAGED_ID" nvarchar2(255),
+    "CALENDAR_OBJECT_RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade, 
+    primary key ("ATTACHMENT_ID", "CALENDAR_OBJECT_RESOURCE_ID"), 
+    unique ("MANAGED_ID", "CALENDAR_OBJECT_RESOURCE_ID")
+);
+
+create table RESOURCE_PROPERTY (
+    "RESOURCE_ID" integer not null,
+    "NAME" nvarchar2(255),
+    "VALUE" nclob,
+    "VIEWER_UID" nvarchar2(255), 
+    primary key ("RESOURCE_ID", "NAME", "VIEWER_UID")
+);
+
+create table ADDRESSBOOK_HOME (
+    "RESOURCE_ID" integer primary key,
+    "ADDRESSBOOK_PROPERTY_STORE_ID" integer not null,
+    "OWNER_UID" nvarchar2(255) unique,
+    "STATUS" integer default 0 not null,
+    "DATAVERSION" integer default 0 not null
+);
+
+create table ADDRESSBOOK_HOME_METADATA (
+    "RESOURCE_ID" integer primary key references ADDRESSBOOK_HOME on delete cascade,
+    "QUOTA_USED_BYTES" integer default 0 not null,
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table SHARED_ADDRESSBOOK_BIND (
+    "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+    "OWNER_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+    "EXTERNAL_ID" integer default null,
+    "ADDRESSBOOK_RESOURCE_NAME" nvarchar2(255),
+    "BIND_MODE" integer not null,
+    "BIND_STATUS" integer not null,
+    "BIND_REVISION" integer default 0 not null,
+    "MESSAGE" nclob, 
+    primary key ("ADDRESSBOOK_HOME_RESOURCE_ID", "OWNER_HOME_RESOURCE_ID"), 
+    unique ("ADDRESSBOOK_HOME_RESOURCE_ID", "ADDRESSBOOK_RESOURCE_NAME")
+);
+
+create table ADDRESSBOOK_OBJECT (
+    "RESOURCE_ID" integer primary key,
+    "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+    "RESOURCE_NAME" nvarchar2(255),
+    "VCARD_TEXT" nclob,
+    "VCARD_UID" nvarchar2(255),
+    "KIND" integer not null,
+    "MD5" nchar(32),
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    unique ("ADDRESSBOOK_HOME_RESOURCE_ID", "RESOURCE_NAME"), 
+    unique ("ADDRESSBOOK_HOME_RESOURCE_ID", "VCARD_UID")
+);
+
+create table ADDRESSBOOK_OBJECT_KIND (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('person', 0);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('group', 1);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('resource', 2);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('location', 3);
+create table ABO_MEMBERS (
+    "GROUP_ID" integer not null,
+    "ADDRESSBOOK_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+    "MEMBER_ID" integer not null,
+    "REVISION" integer not null,
+    "REMOVED" integer default 0 not null,
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    primary key ("GROUP_ID", "MEMBER_ID", "REVISION")
+);
+
+create table ABO_FOREIGN_MEMBERS (
+    "GROUP_ID" integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+    "ADDRESSBOOK_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+    "MEMBER_ADDRESS" nvarchar2(255), 
+    primary key ("GROUP_ID", "MEMBER_ADDRESS")
+);
+
+create table SHARED_GROUP_BIND (
+    "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+    "GROUP_RESOURCE_ID" integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+    "EXTERNAL_ID" integer default null,
+    "GROUP_ADDRESSBOOK_NAME" nvarchar2(255),
+    "BIND_MODE" integer not null,
+    "BIND_STATUS" integer not null,
+    "BIND_REVISION" integer default 0 not null,
+    "MESSAGE" nclob, 
+    primary key ("ADDRESSBOOK_HOME_RESOURCE_ID", "GROUP_RESOURCE_ID"), 
+    unique ("ADDRESSBOOK_HOME_RESOURCE_ID", "GROUP_ADDRESSBOOK_NAME")
+);
+
+create table CALENDAR_OBJECT_REVISIONS (
+    "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+    "CALENDAR_RESOURCE_ID" integer references CALENDAR,
+    "CALENDAR_NAME" nvarchar2(255) default null,
+    "RESOURCE_NAME" nvarchar2(255),
+    "REVISION" integer not null,
+    "DELETED" integer not null,
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table ADDRESSBOOK_OBJECT_REVISIONS (
+    "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+    "OWNER_HOME_RESOURCE_ID" integer references ADDRESSBOOK_HOME,
+    "ADDRESSBOOK_NAME" nvarchar2(255) default null,
+    "OBJECT_RESOURCE_ID" integer default 0,
+    "RESOURCE_NAME" nvarchar2(255),
+    "REVISION" integer not null,
+    "DELETED" integer not null,
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table NOTIFICATION_OBJECT_REVISIONS (
+    "NOTIFICATION_HOME_RESOURCE_ID" integer not null references NOTIFICATION_HOME on delete cascade,
+    "RESOURCE_NAME" nvarchar2(255),
+    "REVISION" integer not null,
+    "DELETED" integer not null,
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    unique ("NOTIFICATION_HOME_RESOURCE_ID", "RESOURCE_NAME")
+);
+
+create table APN_SUBSCRIPTIONS (
+    "TOKEN" nvarchar2(255),
+    "RESOURCE_KEY" nvarchar2(255),
+    "MODIFIED" integer not null,
+    "SUBSCRIBER_GUID" nvarchar2(255),
+    "USER_AGENT" nvarchar2(255) default null,
+    "IP_ADDR" nvarchar2(255) default null, 
+    primary key ("TOKEN", "RESOURCE_KEY")
+);
+
+create table IMIP_TOKENS (
+    "TOKEN" nvarchar2(255),
+    "ORGANIZER" nvarchar2(255),
+    "ATTENDEE" nvarchar2(255),
+    "ICALUID" nvarchar2(255),
+    "ACCESSED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    primary key ("ORGANIZER", "ATTENDEE", "ICALUID")
+);
+
+create table IMIP_INVITATION_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB,
+    "FROM_ADDR" nvarchar2(255),
+    "TO_ADDR" nvarchar2(255),
+    "ICALENDAR_TEXT" nclob
+);
+
+create table IMIP_POLLING_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB
+);
+
+create table IMIP_REPLY_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB,
+    "ORGANIZER" nvarchar2(255),
+    "ATTENDEE" nvarchar2(255),
+    "ICALENDAR_TEXT" nclob
+);
+
+create table PUSH_NOTIFICATION_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB,
+    "PUSH_ID" nvarchar2(255),
+    "PUSH_PRIORITY" integer not null
+);
+
+create table GROUP_CACHER_POLLING_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB
+);
+
+create table GROUP_REFRESH_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB,
+    "GROUP_UID" nvarchar2(255)
+);
+
+create table GROUP_ATTENDEE_RECONCILE_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB,
+    "RESOURCE_ID" integer,
+    "GROUP_ID" integer
+);
+
+create table GROUPS (
+    "GROUP_ID" integer primary key,
+    "NAME" nvarchar2(255),
+    "GROUP_UID" nvarchar2(255),
+    "MEMBERSHIP_HASH" nvarchar2(255),
+    "EXTANT" integer default 1,
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table GROUP_MEMBERSHIP (
+    "GROUP_ID" integer not null references GROUPS on delete cascade,
+    "MEMBER_UID" nvarchar2(255), 
+    primary key ("GROUP_ID", "MEMBER_UID")
+);
+
+create table GROUP_ATTENDEE (
+    "GROUP_ID" integer not null references GROUPS on delete cascade,
+    "RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+    "MEMBERSHIP_HASH" nvarchar2(255), 
+    primary key ("GROUP_ID", "RESOURCE_ID")
+);
+
+create table DELEGATES (
+    "DELEGATOR" nvarchar2(255),
+    "DELEGATE" nvarchar2(255),
+    "READ_WRITE" integer not null, 
+    primary key ("DELEGATOR", "READ_WRITE", "DELEGATE")
+);
+
+create table DELEGATE_GROUPS (
+    "DELEGATOR" nvarchar2(255),
+    "GROUP_ID" integer not null references GROUPS on delete cascade,
+    "READ_WRITE" integer not null,
+    "IS_EXTERNAL" integer not null, 
+    primary key ("DELEGATOR", "READ_WRITE", "GROUP_ID")
+);
+
+create table EXTERNAL_DELEGATE_GROUPS (
+    "DELEGATOR" nvarchar2(255) primary key,
+    "GROUP_UID_READ" nvarchar2(255),
+    "GROUP_UID_WRITE" nvarchar2(255)
+);
+
+create table CALENDAR_OBJECT_SPLITTER_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB,
+    "RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade
+);
+
+create table FIND_MIN_VALID_REVISION_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB
+);
+
+create table REVISION_CLEANUP_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB
+);
+
+create table INBOX_CLEANUP_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB
+);
+
+create table CLEANUP_ONE_INBOX_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB,
+    "HOME_ID" integer not null unique references CALENDAR_HOME on delete cascade
+);
+
+create table SCHEDULE_REFRESH_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB,
+    "ICALENDAR_UID" nvarchar2(255),
+    "HOME_RESOURCE_ID" integer not null references CALENDAR_HOME on delete cascade,
+    "RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+    "ATTENDEE_COUNT" integer
+);
+
+create table SCHEDULE_REFRESH_ATTENDEES (
+    "RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+    "ATTENDEE" nvarchar2(255), 
+    primary key ("RESOURCE_ID", "ATTENDEE")
+);
+
+create table SCHEDULE_AUTO_REPLY_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB,
+    "ICALENDAR_UID" nvarchar2(255),
+    "HOME_RESOURCE_ID" integer not null references CALENDAR_HOME on delete cascade,
+    "RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+    "PARTSTAT" nvarchar2(255)
+);
+
+create table SCHEDULE_ORGANIZER_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB,
+    "ICALENDAR_UID" nvarchar2(255),
+    "SCHEDULE_ACTION" integer not null,
+    "HOME_RESOURCE_ID" integer not null references CALENDAR_HOME on delete cascade,
+    "RESOURCE_ID" integer,
+    "ICALENDAR_TEXT_OLD" nclob,
+    "ICALENDAR_TEXT_NEW" nclob,
+    "ATTENDEE_COUNT" integer,
+    "SMART_MERGE" integer
+);
+
+create table SCHEDULE_ACTION (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into SCHEDULE_ACTION (DESCRIPTION, ID) values ('create', 0);
+insert into SCHEDULE_ACTION (DESCRIPTION, ID) values ('modify', 1);
+insert into SCHEDULE_ACTION (DESCRIPTION, ID) values ('modify-cancelled', 2);
+insert into SCHEDULE_ACTION (DESCRIPTION, ID) values ('remove', 3);
+create table SCHEDULE_REPLY_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB,
+    "ICALENDAR_UID" nvarchar2(255),
+    "HOME_RESOURCE_ID" integer not null references CALENDAR_HOME on delete cascade,
+    "RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+    "CHANGED_RIDS" nclob
+);
+
+create table SCHEDULE_REPLY_CANCEL_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB,
+    "ICALENDAR_UID" nvarchar2(255),
+    "HOME_RESOURCE_ID" integer not null references CALENDAR_HOME on delete cascade,
+    "ICALENDAR_TEXT" nclob
+);
+
+create table PRINCIPAL_PURGE_POLLING_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB
+);
+
+create table PRINCIPAL_PURGE_CHECK_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB,
+    "UID" nvarchar2(255)
+);
+
+create table PRINCIPAL_PURGE_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB,
+    "UID" nvarchar2(255)
+);
+
+create table CALENDARSERVER (
+    "NAME" nvarchar2(255) primary key,
+    "VALUE" nvarchar2(255)
+);
+
+insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '40');
+insert into CALENDARSERVER (NAME, VALUE) values ('CALENDAR-DATAVERSION', '6');
+insert into CALENDARSERVER (NAME, VALUE) values ('ADDRESSBOOK-DATAVERSION', '2');
+insert into CALENDARSERVER (NAME, VALUE) values ('NOTIFICATION-DATAVERSION', '1');
+insert into CALENDARSERVER (NAME, VALUE) values ('MIN-VALID-REVISION', '1');
+create index CALENDAR_HOME_METADAT_3cb9049e on CALENDAR_HOME_METADATA (
+    DEFAULT_EVENTS
+);
+
+create index CALENDAR_HOME_METADAT_d55e5548 on CALENDAR_HOME_METADATA (
+    DEFAULT_TASKS
+);
+
+create index CALENDAR_HOME_METADAT_910264ce on CALENDAR_HOME_METADATA (
+    DEFAULT_POLLS
+);
+
+create index NOTIFICATION_NOTIFICA_f891f5f9 on NOTIFICATION (
+    NOTIFICATION_HOME_RESOURCE_ID
+);
+
+create index CALENDAR_BIND_RESOURC_e57964d4 on CALENDAR_BIND (
+    CALENDAR_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_CALEN_a9a453a9 on CALENDAR_OBJECT (
+    CALENDAR_RESOURCE_ID,
+    ICALENDAR_UID
+);
+
+create index CALENDAR_OBJECT_CALEN_96e83b73 on CALENDAR_OBJECT (
+    CALENDAR_RESOURCE_ID,
+    RECURRANCE_MAX
+);
+
+create index CALENDAR_OBJECT_ICALE_82e731d5 on CALENDAR_OBJECT (
+    ICALENDAR_UID
+);
+
+create index CALENDAR_OBJECT_DROPB_de041d80 on CALENDAR_OBJECT (
+    DROPBOX_ID
+);
+
+create index TIME_RANGE_CALENDAR_R_beb6e7eb on TIME_RANGE (
+    CALENDAR_RESOURCE_ID
+);
+
+create index TIME_RANGE_CALENDAR_O_acf37bd1 on TIME_RANGE (
+    CALENDAR_OBJECT_RESOURCE_ID
+);
+
+create index PERUSER_TIME_RANGE_IN_5468a226 on PERUSER (
+    TIME_RANGE_INSTANCE_ID
+);
+
+create index ATTACHMENT_CALENDAR_H_0078845c on ATTACHMENT (
+    CALENDAR_HOME_RESOURCE_ID
+);
+
+create index ATTACHMENT_DROPBOX_ID_5073cf23 on ATTACHMENT (
+    DROPBOX_ID
+);
+
+create index ATTACHMENT_CALENDAR_O_81508484 on ATTACHMENT_CALENDAR_OBJECT (
+    CALENDAR_OBJECT_RESOURCE_ID
+);
+
+create index SHARED_ADDRESSBOOK_BI_e9a2e6d4 on SHARED_ADDRESSBOOK_BIND (
+    OWNER_HOME_RESOURCE_ID
+);
+
+create index ABO_MEMBERS_ADDRESSBO_4effa879 on ABO_MEMBERS (
+    ADDRESSBOOK_ID
+);
+
+create index ABO_MEMBERS_MEMBER_ID_8d66adcf on ABO_MEMBERS (
+    MEMBER_ID
+);
+
+create index ABO_FOREIGN_MEMBERS_A_1fd2c5e9 on ABO_FOREIGN_MEMBERS (
+    ADDRESSBOOK_ID
+);
+
+create index SHARED_GROUP_BIND_RES_cf52f95d on SHARED_GROUP_BIND (
+    GROUP_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_REVIS_3a3956c4 on CALENDAR_OBJECT_REVISIONS (
+    CALENDAR_HOME_RESOURCE_ID,
+    CALENDAR_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_REVIS_6d9d929c on CALENDAR_OBJECT_REVISIONS (
+    CALENDAR_RESOURCE_ID,
+    RESOURCE_NAME,
+    DELETED,
+    REVISION
+);
+
+create index CALENDAR_OBJECT_REVIS_265c8acf on CALENDAR_OBJECT_REVISIONS (
+    CALENDAR_RESOURCE_ID,
+    REVISION
+);
+
+create index ADDRESSBOOK_OBJECT_RE_2bfcf757 on ADDRESSBOOK_OBJECT_REVISIONS (
+    ADDRESSBOOK_HOME_RESOURCE_ID,
+    OWNER_HOME_RESOURCE_ID
+);
+
+create index ADDRESSBOOK_OBJECT_RE_00fe8288 on ADDRESSBOOK_OBJECT_REVISIONS (
+    OWNER_HOME_RESOURCE_ID,
+    RESOURCE_NAME,
+    DELETED,
+    REVISION
+);
+
+create index ADDRESSBOOK_OBJECT_RE_45004780 on ADDRESSBOOK_OBJECT_REVISIONS (
+    OWNER_HOME_RESOURCE_ID,
+    REVISION
+);
+
+create index NOTIFICATION_OBJECT_R_036a9cee on NOTIFICATION_OBJECT_REVISIONS (
+    NOTIFICATION_HOME_RESOURCE_ID,
+    REVISION
+);
+
+create index APN_SUBSCRIPTIONS_RES_9610d78e on APN_SUBSCRIPTIONS (
+    RESOURCE_KEY
+);
+
+create index IMIP_TOKENS_TOKEN_e94b918f on IMIP_TOKENS (
+    TOKEN
+);
+
+create index IMIP_INVITATION_WORK__586d064c on IMIP_INVITATION_WORK (
+    JOB_ID
+);
+
+create index IMIP_POLLING_WORK_JOB_d5535891 on IMIP_POLLING_WORK (
+    JOB_ID
+);
+
+create index IMIP_REPLY_WORK_JOB_I_bf4ae73e on IMIP_REPLY_WORK (
+    JOB_ID
+);
+
+create index PUSH_NOTIFICATION_WOR_8bbab117 on PUSH_NOTIFICATION_WORK (
+    JOB_ID
+);
+
+create index GROUP_CACHER_POLLING__6eb3151c on GROUP_CACHER_POLLING_WORK (
+    JOB_ID
+);
+
+create index GROUP_REFRESH_WORK_JO_717ede20 on GROUP_REFRESH_WORK (
+    JOB_ID
+);
+
+create index GROUP_ATTENDEE_RECONC_da73d3c2 on GROUP_ATTENDEE_RECONCILE_WORK (
+    JOB_ID
+);
+
+create index GROUPS_GROUP_UID_b35cce23 on GROUPS (
+    GROUP_UID
+);
+
+create index GROUP_MEMBERSHIP_MEMB_0ca508e8 on GROUP_MEMBERSHIP (
+    MEMBER_UID
+);
+
+create index GROUP_ATTENDEE_RESOUR_855124dc on GROUP_ATTENDEE (
+    RESOURCE_ID
+);
+
+create index DELEGATE_TO_DELEGATOR_5e149b11 on DELEGATES (
+    DELEGATE,
+    READ_WRITE,
+    DELEGATOR
+);
+
+create index DELEGATE_GROUPS_GROUP_25117446 on DELEGATE_GROUPS (
+    GROUP_ID
+);
+
+create index CALENDAR_OBJECT_SPLIT_af71dcda on CALENDAR_OBJECT_SPLITTER_WORK (
+    RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_SPLIT_33603b72 on CALENDAR_OBJECT_SPLITTER_WORK (
+    JOB_ID
+);
+
+create index FIND_MIN_VALID_REVISI_78d17400 on FIND_MIN_VALID_REVISION_WORK (
+    JOB_ID
+);
+
+create index REVISION_CLEANUP_WORK_eb062686 on REVISION_CLEANUP_WORK (
+    JOB_ID
+);
+
+create index INBOX_CLEANUP_WORK_JO_799132bd on INBOX_CLEANUP_WORK (
+    JOB_ID
+);
+
+create index CLEANUP_ONE_INBOX_WOR_375dac36 on CLEANUP_ONE_INBOX_WORK (
+    JOB_ID
+);
+
+create index SCHEDULE_REFRESH_WORK_26084c7b on SCHEDULE_REFRESH_WORK (
+    HOME_RESOURCE_ID
+);
+
+create index SCHEDULE_REFRESH_WORK_989efe54 on SCHEDULE_REFRESH_WORK (
+    RESOURCE_ID
+);
+
+create index SCHEDULE_REFRESH_WORK_3ffa2718 on SCHEDULE_REFRESH_WORK (
+    JOB_ID
+);
+
+create index SCHEDULE_REFRESH_ATTE_83053b91 on SCHEDULE_REFRESH_ATTENDEES (
+    RESOURCE_ID,
+    ATTENDEE
+);
+
+create index SCHEDULE_AUTO_REPLY_W_0256478d on SCHEDULE_AUTO_REPLY_WORK (
+    HOME_RESOURCE_ID
+);
+
+create index SCHEDULE_AUTO_REPLY_W_0755e754 on SCHEDULE_AUTO_REPLY_WORK (
+    RESOURCE_ID
+);
+
+create index SCHEDULE_AUTO_REPLY_W_4d7bb5a8 on SCHEDULE_AUTO_REPLY_WORK (
+    JOB_ID
+);
+
+create index SCHEDULE_ORGANIZER_WO_18ce4edd on SCHEDULE_ORGANIZER_WORK (
+    HOME_RESOURCE_ID
+);
+
+create index SCHEDULE_ORGANIZER_WO_14702035 on SCHEDULE_ORGANIZER_WORK (
+    RESOURCE_ID
+);
+
+create index SCHEDULE_ORGANIZER_WO_1e9f246d on SCHEDULE_ORGANIZER_WORK (
+    JOB_ID
+);
+
+create index SCHEDULE_REPLY_WORK_H_745af8cf on SCHEDULE_REPLY_WORK (
+    HOME_RESOURCE_ID
+);
+
+create index SCHEDULE_REPLY_WORK_R_11bd3fbb on SCHEDULE_REPLY_WORK (
+    RESOURCE_ID
+);
+
+create index SCHEDULE_REPLY_WORK_J_5913b4a4 on SCHEDULE_REPLY_WORK (
+    JOB_ID
+);
+
+create index SCHEDULE_REPLY_CANCEL_dab513ef on SCHEDULE_REPLY_CANCEL_WORK (
+    HOME_RESOURCE_ID
+);
+
+create index SCHEDULE_REPLY_CANCEL_94a0c766 on SCHEDULE_REPLY_CANCEL_WORK (
+    JOB_ID
+);
+
+create index PRINCIPAL_PURGE_POLLI_6383e68a on PRINCIPAL_PURGE_POLLING_WORK (
+    JOB_ID
+);
+
+create index PRINCIPAL_PURGE_CHECK_b0c024c1 on PRINCIPAL_PURGE_CHECK_WORK (
+    JOB_ID
+);
+
+create index PRINCIPAL_PURGE_WORK__7a8141a3 on PRINCIPAL_PURGE_WORK (
+    JOB_ID
+);
+
+-- Skipped Function next_job
+
+-- Extras
+
+create or replace function next_job return integer is
+declare
+  cursor c1 is select JOB_ID from JOB for update skip locked;
+  result integer;
+begin
+  open c1;
+  fetch c1 into result;
+  select JOB_ID from JOB where ID = result for update;
+  return result;
+end;
+/

Added: CalendarServer/trunk/txdav/common/datastore/sql_schema/old/postgres-dialect/v40.sql
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql_schema/old/postgres-dialect/v40.sql	                        (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/old/postgres-dialect/v40.sql	2014-05-02 20:07:26 UTC (rev 13408)
@@ -0,0 +1,1071 @@
+-- -*- test-case-name: txdav.caldav.datastore.test.test_sql,txdav.carddav.datastore.test.test_sql -*-
+
+----
+-- Copyright (c) 2010-2014 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+
+-----------------
+-- Resource ID --
+-----------------
+
+create sequence RESOURCE_ID_SEQ;
+
+
+-------------------------
+-- Cluster Bookkeeping --
+-------------------------
+
+-- Information about a process connected to this database.
+
+-- Note that this must match the node info schema in twext.enterprise.queue.
+create table NODE_INFO (
+  HOSTNAME  varchar(255) not null,
+  PID       integer      not null,
+  PORT      integer      not null,
+  TIME      timestamp    not null default timezone('UTC', CURRENT_TIMESTAMP),
+
+  primary key (HOSTNAME, PORT)
+);
+
+-- Unique named locks.  This table should always be empty, but rows are
+-- temporarily created in order to prevent undesirable concurrency.
+create table NAMED_LOCK (
+    LOCK_NAME varchar(255) primary key
+);
+
+
+--------------------
+-- Jobs           --
+--------------------
+
+create sequence JOB_SEQ;
+
+create table JOB (
+  JOB_ID      integer primary key default nextval('JOB_SEQ') not null, --implicit index
+  WORK_TYPE   varchar(255) not null,
+  PRIORITY    integer default 0,
+  WEIGHT      integer default 0,
+  NOT_BEFORE  timestamp default null,
+  NOT_AFTER   timestamp default null
+);
+
+create or replace function next_job() returns integer as $$
+declare
+  result integer;
+begin
+  select JOB_ID into result from JOB where pg_try_advisory_xact_lock(JOB_ID) limit 1 for update;
+  return result;
+end
+$$ LANGUAGE plpgsql;
+
+-------------------
+-- Calendar Home --
+-------------------
+
+create table CALENDAR_HOME (
+  RESOURCE_ID      integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  OWNER_UID        varchar(255) not null unique,                                -- implicit index
+  STATUS           integer      default 0 not null,                             -- enum HOME_STATUS
+  DATAVERSION      integer      default 0 not null
+);
+
+-- Enumeration of statuses
+
+create table HOME_STATUS (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into HOME_STATUS values (0, 'normal' );
+insert into HOME_STATUS values (1, 'external');
+
+
+--------------
+-- Calendar --
+--------------
+
+create table CALENDAR (
+  RESOURCE_ID integer   primary key default nextval('RESOURCE_ID_SEQ') -- implicit index
+);
+
+
+----------------------------
+-- Calendar Home Metadata --
+----------------------------
+
+create table CALENDAR_HOME_METADATA (
+  RESOURCE_ID              integer     primary key references CALENDAR_HOME on delete cascade, -- implicit index
+  QUOTA_USED_BYTES         integer     default 0 not null,
+  DEFAULT_EVENTS           integer     default null references CALENDAR on delete set null,
+  DEFAULT_TASKS            integer     default null references CALENDAR on delete set null,
+  DEFAULT_POLLS            integer     default null references CALENDAR on delete set null,
+  ALARM_VEVENT_TIMED       text        default null,
+  ALARM_VEVENT_ALLDAY      text        default null,
+  ALARM_VTODO_TIMED        text        default null,
+  ALARM_VTODO_ALLDAY       text        default null,
+  AVAILABILITY             text        default null,
+  CREATED                  timestamp   default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                 timestamp   default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+create index CALENDAR_HOME_METADATA_DEFAULT_EVENTS on
+  CALENDAR_HOME_METADATA(DEFAULT_EVENTS);
+create index CALENDAR_HOME_METADATA_DEFAULT_TASKS on
+  CALENDAR_HOME_METADATA(DEFAULT_TASKS);
+create index CALENDAR_HOME_METADATA_DEFAULT_POLLS on
+  CALENDAR_HOME_METADATA(DEFAULT_POLLS);
+
+
+-----------------------
+-- Calendar Metadata --
+-----------------------
+
+create table CALENDAR_METADATA (
+  RESOURCE_ID           integer      primary key references CALENDAR on delete cascade, -- implicit index
+  SUPPORTED_COMPONENTS  varchar(255) default null,
+  CREATED               timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED              timestamp    default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+---------------------------
+-- Sharing Notifications --
+---------------------------
+
+create table NOTIFICATION_HOME (
+  RESOURCE_ID integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  OWNER_UID   varchar(255) not null unique,                                -- implicit index
+  STATUS      integer      default 0 not null,                             -- enum HOME_STATUS
+  DATAVERSION integer      default 0 not null
+);
+
+create table NOTIFICATION (
+  RESOURCE_ID                   integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  NOTIFICATION_HOME_RESOURCE_ID integer      not null references NOTIFICATION_HOME,
+  NOTIFICATION_UID              varchar(255) not null,
+  NOTIFICATION_TYPE             varchar(255) not null,
+  NOTIFICATION_DATA             text         not null,
+  MD5                           char(32)     not null,
+  CREATED                       timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                      timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+
+  unique (NOTIFICATION_UID, NOTIFICATION_HOME_RESOURCE_ID) -- implicit index
+);
+
+create index NOTIFICATION_NOTIFICATION_HOME_RESOURCE_ID on
+  NOTIFICATION(NOTIFICATION_HOME_RESOURCE_ID);
+
+
+-------------------
+-- Calendar Bind --
+-------------------
+
+-- Joins CALENDAR_HOME and CALENDAR
+
+create table CALENDAR_BIND (
+  CALENDAR_HOME_RESOURCE_ID integer      not null references CALENDAR_HOME,
+  CALENDAR_RESOURCE_ID      integer      not null references CALENDAR on delete cascade,
+  EXTERNAL_ID               integer      default null,
+  CALENDAR_RESOURCE_NAME    varchar(255) not null,
+  BIND_MODE                 integer      not null, -- enum CALENDAR_BIND_MODE
+  BIND_STATUS               integer      not null, -- enum CALENDAR_BIND_STATUS
+  BIND_REVISION             integer      default 0 not null,
+  MESSAGE                   text,
+  TRANSP                    integer      default 0 not null, -- enum CALENDAR_TRANSP
+  ALARM_VEVENT_TIMED        text         default null,
+  ALARM_VEVENT_ALLDAY       text         default null,
+  ALARM_VTODO_TIMED         text         default null,
+  ALARM_VTODO_ALLDAY        text         default null,
+  TIMEZONE                  text         default null,
+
+  primary key (CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_ID), -- implicit index
+  unique (CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_NAME)     -- implicit index
+);
+
+create index CALENDAR_BIND_RESOURCE_ID on
+  CALENDAR_BIND(CALENDAR_RESOURCE_ID);
+
+-- Enumeration of calendar bind modes
+
+create table CALENDAR_BIND_MODE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_BIND_MODE values (0, 'own'  );
+insert into CALENDAR_BIND_MODE values (1, 'read' );
+insert into CALENDAR_BIND_MODE values (2, 'write');
+insert into CALENDAR_BIND_MODE values (3, 'direct');
+insert into CALENDAR_BIND_MODE values (4, 'indirect');
+
+-- Enumeration of statuses
+
+create table CALENDAR_BIND_STATUS (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_BIND_STATUS values (0, 'invited' );
+insert into CALENDAR_BIND_STATUS values (1, 'accepted');
+insert into CALENDAR_BIND_STATUS values (2, 'declined');
+insert into CALENDAR_BIND_STATUS values (3, 'invalid');
+insert into CALENDAR_BIND_STATUS values (4, 'deleted');
+
+
+-- Enumeration of transparency
+
+create table CALENDAR_TRANSP (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_TRANSP values (0, 'opaque' );
+insert into CALENDAR_TRANSP values (1, 'transparent');
+
+
+---------------------
+-- Calendar Object --
+---------------------
+
+create table CALENDAR_OBJECT (
+  RESOURCE_ID          integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  CALENDAR_RESOURCE_ID integer      not null references CALENDAR on delete cascade,
+  RESOURCE_NAME        varchar(255) not null,
+  ICALENDAR_TEXT       text         not null,
+  ICALENDAR_UID        varchar(255) not null,
+  ICALENDAR_TYPE       varchar(255) not null,
+  ATTACHMENTS_MODE     integer      default 0 not null, -- enum CALENDAR_OBJ_ATTACHMENTS_MODE
+  DROPBOX_ID           varchar(255),
+  ORGANIZER            varchar(255),
+  RECURRANCE_MIN       date,        -- minimum date that recurrences have been expanded to.
+  RECURRANCE_MAX       date,        -- maximum date that recurrences have been expanded to.
+  ACCESS               integer      default 0 not null,
+  SCHEDULE_OBJECT      boolean      default false,
+  SCHEDULE_TAG         varchar(36)  default null,
+  SCHEDULE_ETAGS       text         default null,
+  PRIVATE_COMMENTS     boolean      default false not null,
+  MD5                  char(32)     not null,
+  CREATED              timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED             timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+
+  unique (CALENDAR_RESOURCE_ID, RESOURCE_NAME) -- implicit index
+
+  -- since the 'inbox' is a 'calendar resource' for the purpose of storing
+  -- calendar objects, this constraint has to be selectively enforced by the
+  -- application layer.
+
+  -- unique (CALENDAR_RESOURCE_ID, ICALENDAR_UID)
+);
+
+create index CALENDAR_OBJECT_CALENDAR_RESOURCE_ID_AND_ICALENDAR_UID on
+  CALENDAR_OBJECT(CALENDAR_RESOURCE_ID, ICALENDAR_UID);
+
+create index CALENDAR_OBJECT_CALENDAR_RESOURCE_ID_RECURRANCE_MAX on
+  CALENDAR_OBJECT(CALENDAR_RESOURCE_ID, RECURRANCE_MAX);
+
+create index CALENDAR_OBJECT_ICALENDAR_UID on
+  CALENDAR_OBJECT(ICALENDAR_UID);
+
+create index CALENDAR_OBJECT_DROPBOX_ID on
+  CALENDAR_OBJECT(DROPBOX_ID);
+
+-- Enumeration of attachment modes
+
+create table CALENDAR_OBJ_ATTACHMENTS_MODE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_OBJ_ATTACHMENTS_MODE values (0, 'none' );
+insert into CALENDAR_OBJ_ATTACHMENTS_MODE values (1, 'read' );
+insert into CALENDAR_OBJ_ATTACHMENTS_MODE values (2, 'write');
+
+
+-- Enumeration of calendar access types
+
+create table CALENDAR_ACCESS_TYPE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(32) not null unique
+);
+
+insert into CALENDAR_ACCESS_TYPE values (0, ''             );
+insert into CALENDAR_ACCESS_TYPE values (1, 'public'       );
+insert into CALENDAR_ACCESS_TYPE values (2, 'private'      );
+insert into CALENDAR_ACCESS_TYPE values (3, 'confidential' );
+insert into CALENDAR_ACCESS_TYPE values (4, 'restricted'   );
+
+
+-----------------
+-- Instance ID --
+-----------------
+
+create sequence INSTANCE_ID_SEQ;
+
+
+----------------
+-- Time Range --
+----------------
+
+create table TIME_RANGE (
+  INSTANCE_ID                 integer        primary key default nextval('INSTANCE_ID_SEQ'), -- implicit index
+  CALENDAR_RESOURCE_ID        integer        not null references CALENDAR on delete cascade,
+  CALENDAR_OBJECT_RESOURCE_ID integer        not null references CALENDAR_OBJECT on delete cascade,
+  FLOATING                    boolean        not null,
+  START_DATE                  timestamp      not null,
+  END_DATE                    timestamp      not null,
+  FBTYPE                      integer        not null,
+  TRANSPARENT                 boolean        not null
+);
+
+create index TIME_RANGE_CALENDAR_RESOURCE_ID on
+  TIME_RANGE(CALENDAR_RESOURCE_ID);
+create index TIME_RANGE_CALENDAR_OBJECT_RESOURCE_ID on
+  TIME_RANGE(CALENDAR_OBJECT_RESOURCE_ID);
+
+
+-- Enumeration of free/busy types
+
+create table FREE_BUSY_TYPE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into FREE_BUSY_TYPE values (0, 'unknown'         );
+insert into FREE_BUSY_TYPE values (1, 'free'            );
+insert into FREE_BUSY_TYPE values (2, 'busy'            );
+insert into FREE_BUSY_TYPE values (3, 'busy-unavailable');
+insert into FREE_BUSY_TYPE values (4, 'busy-tentative'  );
+
+
+-------------------
+-- Per-user data --
+-------------------
+
+create table PERUSER (
+  TIME_RANGE_INSTANCE_ID      integer      not null references TIME_RANGE on delete cascade,
+  USER_ID                     varchar(255) not null,
+  TRANSPARENT                 boolean      not null,
+  ADJUSTED_START_DATE         timestamp	   default null,
+  ADJUSTED_END_DATE           timestamp    default null
+);
+
+create index PERUSER_TIME_RANGE_INSTANCE_ID on
+  PERUSER(TIME_RANGE_INSTANCE_ID);
+
+
+----------------
+-- Attachment --
+----------------
+
+create sequence ATTACHMENT_ID_SEQ;
+
+create table ATTACHMENT (
+  ATTACHMENT_ID               integer           primary key default nextval('ATTACHMENT_ID_SEQ'), -- implicit index
+  CALENDAR_HOME_RESOURCE_ID   integer           not null references CALENDAR_HOME,
+  DROPBOX_ID                  varchar(255),
+  CONTENT_TYPE                varchar(255)      not null,
+  SIZE                        integer           not null,
+  MD5                         char(32)          not null,
+  CREATED                     timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                    timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+  PATH                        varchar(1024)     not null
+);
+
+create index ATTACHMENT_CALENDAR_HOME_RESOURCE_ID on
+  ATTACHMENT(CALENDAR_HOME_RESOURCE_ID);
+
+create index ATTACHMENT_DROPBOX_ID on
+  ATTACHMENT(DROPBOX_ID);
+
+-- Many-to-many relationship between attachments and calendar objects
+create table ATTACHMENT_CALENDAR_OBJECT (
+  ATTACHMENT_ID                  integer      not null references ATTACHMENT on delete cascade,
+  MANAGED_ID                     varchar(255) not null,
+  CALENDAR_OBJECT_RESOURCE_ID    integer      not null references CALENDAR_OBJECT on delete cascade,
+
+  primary key (ATTACHMENT_ID, CALENDAR_OBJECT_RESOURCE_ID), -- implicit index
+  unique (MANAGED_ID, CALENDAR_OBJECT_RESOURCE_ID) --implicit index
+);
+
+create index ATTACHMENT_CALENDAR_OBJECT_CALENDAR_OBJECT_RESOURCE_ID on
+  ATTACHMENT_CALENDAR_OBJECT(CALENDAR_OBJECT_RESOURCE_ID);
+
+-----------------------
+-- Resource Property --
+-----------------------
+
+create table RESOURCE_PROPERTY (
+  RESOURCE_ID integer      not null, -- foreign key: *.RESOURCE_ID
+  NAME        varchar(255) not null,
+  VALUE       text         not null, -- FIXME: xml?
+  VIEWER_UID  varchar(255),
+
+  primary key (RESOURCE_ID, NAME, VIEWER_UID) -- implicit index
+);
+
+
+----------------------
+-- AddressBook Home --
+----------------------
+
+create table ADDRESSBOOK_HOME (
+  RESOURCE_ID                   integer         primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  ADDRESSBOOK_PROPERTY_STORE_ID integer         default nextval('RESOURCE_ID_SEQ') not null,    -- implicit index
+  OWNER_UID                     varchar(255)    not null unique,                                -- implicit index
+  STATUS                        integer         default 0 not null,                             -- enum HOME_STATUS
+  DATAVERSION                   integer         default 0 not null
+);
+
+
+-------------------------------
+-- AddressBook Home Metadata --
+-------------------------------
+
+create table ADDRESSBOOK_HOME_METADATA (
+  RESOURCE_ID      integer      primary key references ADDRESSBOOK_HOME on delete cascade, -- implicit index
+  QUOTA_USED_BYTES integer      default 0 not null,
+  CREATED          timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED         timestamp    default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+-----------------------------
+-- Shared AddressBook Bind --
+-----------------------------
+
+-- Joins sharee ADDRESSBOOK_HOME and owner ADDRESSBOOK_HOME
+
+create table SHARED_ADDRESSBOOK_BIND (
+  ADDRESSBOOK_HOME_RESOURCE_ID          integer         not null references ADDRESSBOOK_HOME,
+  OWNER_HOME_RESOURCE_ID                integer         not null references ADDRESSBOOK_HOME on delete cascade,
+  EXTERNAL_ID                           integer         default null,
+  ADDRESSBOOK_RESOURCE_NAME             varchar(255)    not null,
+  BIND_MODE                             integer         not null, -- enum CALENDAR_BIND_MODE
+  BIND_STATUS                           integer         not null, -- enum CALENDAR_BIND_STATUS
+  BIND_REVISION                         integer         default 0 not null,
+  MESSAGE                               text,                     -- FIXME: xml?
+
+  primary key (ADDRESSBOOK_HOME_RESOURCE_ID, OWNER_HOME_RESOURCE_ID), -- implicit index
+  unique (ADDRESSBOOK_HOME_RESOURCE_ID, ADDRESSBOOK_RESOURCE_NAME)     -- implicit index
+);
+
+create index SHARED_ADDRESSBOOK_BIND_RESOURCE_ID on
+  SHARED_ADDRESSBOOK_BIND(OWNER_HOME_RESOURCE_ID);
+
+
+------------------------
+-- AddressBook Object --
+------------------------
+
+create table ADDRESSBOOK_OBJECT (
+  RESOURCE_ID                   integer         primary key default nextval('RESOURCE_ID_SEQ'),    -- implicit index
+  ADDRESSBOOK_HOME_RESOURCE_ID  integer         not null references ADDRESSBOOK_HOME on delete cascade,
+  RESOURCE_NAME                 varchar(255)    not null,
+  VCARD_TEXT                    text            not null,
+  VCARD_UID                     varchar(255)    not null,
+  KIND                          integer         not null,  -- enum ADDRESSBOOK_OBJECT_KIND
+  MD5                           char(32)        not null,
+  CREATED                       timestamp       default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                      timestamp       default timezone('UTC', CURRENT_TIMESTAMP),
+
+  unique (ADDRESSBOOK_HOME_RESOURCE_ID, RESOURCE_NAME), -- implicit index
+  unique (ADDRESSBOOK_HOME_RESOURCE_ID, VCARD_UID)      -- implicit index
+);
+
+
+-----------------------------
+-- AddressBook Object kind --
+-----------------------------
+
+create table ADDRESSBOOK_OBJECT_KIND (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into ADDRESSBOOK_OBJECT_KIND values (0, 'person');
+insert into ADDRESSBOOK_OBJECT_KIND values (1, 'group' );
+insert into ADDRESSBOOK_OBJECT_KIND values (2, 'resource');
+insert into ADDRESSBOOK_OBJECT_KIND values (3, 'location');
+
+
+----------------------------------
+-- Revisions, forward reference --
+----------------------------------
+
+create sequence REVISION_SEQ;
+
+---------------------------------
+-- Address Book Object Members --
+---------------------------------
+
+create table ABO_MEMBERS (
+  GROUP_ID        integer     not null, -- references ADDRESSBOOK_OBJECT on delete cascade,   -- AddressBook Object's (kind=='group') RESOURCE_ID
+  ADDRESSBOOK_ID  integer     not null references ADDRESSBOOK_HOME on delete cascade,
+  MEMBER_ID       integer     not null, -- references ADDRESSBOOK_OBJECT,                     -- member AddressBook Object's RESOURCE_ID
+  REVISION        integer     default nextval('REVISION_SEQ') not null,
+  REMOVED         boolean     default false not null,
+  MODIFIED        timestamp   default timezone('UTC', CURRENT_TIMESTAMP),
+
+    primary key (GROUP_ID, MEMBER_ID, REVISION) -- implicit index
+);
+
+create index ABO_MEMBERS_ADDRESSBOOK_ID on
+  ABO_MEMBERS(ADDRESSBOOK_ID);
+create index ABO_MEMBERS_MEMBER_ID on
+  ABO_MEMBERS(MEMBER_ID);
+
+------------------------------------------
+-- Address Book Object Foreign Members  --
+------------------------------------------
+
+create table ABO_FOREIGN_MEMBERS (
+  GROUP_ID           integer      not null references ADDRESSBOOK_OBJECT on delete cascade,  -- AddressBook Object's (kind=='group') RESOURCE_ID
+  ADDRESSBOOK_ID     integer      not null references ADDRESSBOOK_HOME on delete cascade,
+  MEMBER_ADDRESS     varchar(255) not null,                                                  -- member AddressBook Object's 'calendar' address
+
+  primary key (GROUP_ID, MEMBER_ADDRESS) -- implicit index
+);
+
+create index ABO_FOREIGN_MEMBERS_ADDRESSBOOK_ID on
+  ABO_FOREIGN_MEMBERS(ADDRESSBOOK_ID);
+
+-----------------------
+-- Shared Group Bind --
+-----------------------
+
+-- Joins ADDRESSBOOK_HOME and ADDRESSBOOK_OBJECT (kind == group)
+
+create table SHARED_GROUP_BIND (
+  ADDRESSBOOK_HOME_RESOURCE_ID      integer      not null references ADDRESSBOOK_HOME,
+  GROUP_RESOURCE_ID                 integer      not null references ADDRESSBOOK_OBJECT on delete cascade,
+  EXTERNAL_ID                       integer      default null,
+  GROUP_ADDRESSBOOK_NAME            varchar(255) not null,
+  BIND_MODE                         integer      not null, -- enum CALENDAR_BIND_MODE
+  BIND_STATUS                       integer      not null, -- enum CALENDAR_BIND_STATUS
+  BIND_REVISION                     integer      default 0 not null,
+  MESSAGE                           text,                  -- FIXME: xml?
+
+  primary key (ADDRESSBOOK_HOME_RESOURCE_ID, GROUP_RESOURCE_ID), -- implicit index
+  unique (ADDRESSBOOK_HOME_RESOURCE_ID, GROUP_ADDRESSBOOK_NAME)  -- implicit index
+);
+
+create index SHARED_GROUP_BIND_RESOURCE_ID on
+  SHARED_GROUP_BIND(GROUP_RESOURCE_ID);
+
+
+---------------
+-- Revisions --
+---------------
+
+-- create sequence REVISION_SEQ;
+
+
+-------------------------------
+-- Calendar Object Revisions --
+-------------------------------
+
+create table CALENDAR_OBJECT_REVISIONS (
+  CALENDAR_HOME_RESOURCE_ID integer      not null references CALENDAR_HOME,
+  CALENDAR_RESOURCE_ID      integer      references CALENDAR,
+  CALENDAR_NAME             varchar(255) default null,
+  RESOURCE_NAME             varchar(255),
+  REVISION                  integer      default nextval('REVISION_SEQ') not null,
+  DELETED                   boolean      not null,
+  MODIFIED                  timestamp    default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+create index CALENDAR_OBJECT_REVISIONS_HOME_RESOURCE_ID_CALENDAR_RESOURCE_ID
+  on CALENDAR_OBJECT_REVISIONS(CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_ID);
+
+create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_RESOURCE_NAME_DELETED_REVISION
+  on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, RESOURCE_NAME, DELETED, REVISION);
+
+create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_REVISION
+  on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, REVISION);
+
+
+----------------------------------
+-- AddressBook Object Revisions --
+----------------------------------
+
+create table ADDRESSBOOK_OBJECT_REVISIONS (
+  ADDRESSBOOK_HOME_RESOURCE_ID  integer      not null references ADDRESSBOOK_HOME,
+  OWNER_HOME_RESOURCE_ID        integer      references ADDRESSBOOK_HOME,
+  ADDRESSBOOK_NAME              varchar(255) default null,
+  OBJECT_RESOURCE_ID            integer      default 0,
+  RESOURCE_NAME                 varchar(255),
+  REVISION                      integer      default nextval('REVISION_SEQ') not null,
+  DELETED                       boolean      not null,
+  MODIFIED                      timestamp    default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_HOME_RESOURCE_ID_OWNER_HOME_RESOURCE_ID
+  on ADDRESSBOOK_OBJECT_REVISIONS(ADDRESSBOOK_HOME_RESOURCE_ID, OWNER_HOME_RESOURCE_ID);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_OWNER_HOME_RESOURCE_ID_RESOURCE_NAME_DELETED_REVISION
+  on ADDRESSBOOK_OBJECT_REVISIONS(OWNER_HOME_RESOURCE_ID, RESOURCE_NAME, DELETED, REVISION);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_OWNER_HOME_RESOURCE_ID_REVISION
+  on ADDRESSBOOK_OBJECT_REVISIONS(OWNER_HOME_RESOURCE_ID, REVISION);
+
+
+-----------------------------------
+-- Notification Object Revisions --
+-----------------------------------
+
+create table NOTIFICATION_OBJECT_REVISIONS (
+  NOTIFICATION_HOME_RESOURCE_ID integer      not null references NOTIFICATION_HOME on delete cascade,
+  RESOURCE_NAME                 varchar(255),
+  REVISION                      integer      default nextval('REVISION_SEQ') not null,
+  DELETED                       boolean      not null,
+  MODIFIED                      timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+
+  unique (NOTIFICATION_HOME_RESOURCE_ID, RESOURCE_NAME) -- implicit index
+);
+
+create index NOTIFICATION_OBJECT_REVISIONS_RESOURCE_ID_REVISION
+  on NOTIFICATION_OBJECT_REVISIONS(NOTIFICATION_HOME_RESOURCE_ID, REVISION);
+
+
+-------------------------------------------
+-- Apple Push Notification Subscriptions --
+-------------------------------------------
+
+create table APN_SUBSCRIPTIONS (
+  TOKEN                         varchar(255) not null,
+  RESOURCE_KEY                  varchar(255) not null,
+  MODIFIED                      integer      not null,
+  SUBSCRIBER_GUID               varchar(255) not null,
+  USER_AGENT                    varchar(255) default null,
+  IP_ADDR                       varchar(255) default null,
+
+  primary key (TOKEN, RESOURCE_KEY) -- implicit index
+);
+
+create index APN_SUBSCRIPTIONS_RESOURCE_KEY
+  on APN_SUBSCRIPTIONS(RESOURCE_KEY);
+
+
+-----------------
+-- IMIP Tokens --
+-----------------
+
+create table IMIP_TOKENS (
+  TOKEN                         varchar(255) not null,
+  ORGANIZER                     varchar(255) not null,
+  ATTENDEE                      varchar(255) not null,
+  ICALUID                       varchar(255) not null,
+  ACCESSED                      timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+
+  primary key (ORGANIZER, ATTENDEE, ICALUID) -- implicit index
+);
+
+create index IMIP_TOKENS_TOKEN
+  on IMIP_TOKENS(TOKEN);
+
+
+----------------
+-- Work Items --
+----------------
+
+create sequence WORKITEM_SEQ;
+
+
+---------------------------
+-- IMIP Inivitation Work --
+---------------------------
+
+create table IMIP_INVITATION_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  FROM_ADDR                     varchar(255) not null,
+  TO_ADDR                       varchar(255) not null,
+  ICALENDAR_TEXT                text         not null
+);
+
+create index IMIP_INVITATION_WORK_JOB_ID on
+  IMIP_INVITATION_WORK(JOB_ID);
+
+-----------------------
+-- IMIP Polling Work --
+-----------------------
+
+create table IMIP_POLLING_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null
+);
+
+create index IMIP_POLLING_WORK_JOB_ID on
+  IMIP_POLLING_WORK(JOB_ID);
+
+
+---------------------
+-- IMIP Reply Work --
+---------------------
+
+create table IMIP_REPLY_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  ORGANIZER                     varchar(255) not null,
+  ATTENDEE                      varchar(255) not null,
+  ICALENDAR_TEXT                text         not null
+);
+
+create index IMIP_REPLY_WORK_JOB_ID on
+  IMIP_REPLY_WORK(JOB_ID);
+
+
+------------------------
+-- Push Notifications --
+------------------------
+
+create table PUSH_NOTIFICATION_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  PUSH_ID                       varchar(255) not null,
+  PUSH_PRIORITY                 integer      not null -- 1:low 5:medium 10:high
+);
+
+create index PUSH_NOTIFICATION_WORK_JOB_ID on
+  PUSH_NOTIFICATION_WORK(JOB_ID);
+
+-----------------
+-- GroupCacher --
+-----------------
+
+create table GROUP_CACHER_POLLING_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null
+);
+
+create index GROUP_CACHER_POLLING_WORK_JOB_ID on
+  GROUP_CACHER_POLLING_WORK(JOB_ID);
+
+create table GROUP_REFRESH_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  GROUP_UID                     varchar(255) not null
+);
+
+create index GROUP_REFRESH_WORK_JOB_ID on
+  GROUP_REFRESH_WORK(JOB_ID);
+
+create table GROUP_ATTENDEE_RECONCILE_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  RESOURCE_ID                   integer,
+  GROUP_ID                      integer
+);
+
+create index GROUP_ATTENDEE_RECONCILE_WORK_JOB_ID on
+  GROUP_ATTENDEE_RECONCILE_WORK(JOB_ID);
+
+
+create table GROUPS (
+  GROUP_ID                      integer      primary key default nextval('RESOURCE_ID_SEQ'),    -- implicit index
+  NAME                          varchar(255) not null,
+  GROUP_UID                     varchar(255) not null,
+  MEMBERSHIP_HASH               varchar(255) not null,
+  EXTANT                        integer default 1,
+  CREATED                       timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                      timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+create index GROUPS_GROUP_UID on
+  GROUPS(GROUP_UID);
+
+create table GROUP_MEMBERSHIP (
+  GROUP_ID                     integer not null references GROUPS on delete cascade,
+  MEMBER_UID                   varchar(255) not null,
+  
+  primary key (GROUP_ID, MEMBER_UID)
+);
+
+create index GROUP_MEMBERSHIP_MEMBER on
+  GROUP_MEMBERSHIP(MEMBER_UID);
+
+create table GROUP_ATTENDEE (
+  GROUP_ID                      integer not null references GROUPS on delete cascade,
+  RESOURCE_ID                   integer not null references CALENDAR_OBJECT on delete cascade,
+  MEMBERSHIP_HASH               varchar(255) not null,
+  
+  primary key (GROUP_ID, RESOURCE_ID)
+);
+create index GROUP_ATTENDEE_RESOURCE_ID on
+  GROUP_ATTENDEE(RESOURCE_ID);
+
+---------------
+-- Delegates --
+---------------
+
+create table DELEGATES (
+  DELEGATOR                     varchar(255) not null,
+  DELEGATE                      varchar(255) not null,
+  READ_WRITE                    integer      not null, -- 1 = ReadWrite, 0 = ReadOnly
+
+  primary key (DELEGATOR, READ_WRITE, DELEGATE)
+);
+create index DELEGATE_TO_DELEGATOR on
+  DELEGATES(DELEGATE, READ_WRITE, DELEGATOR);
+
+create table DELEGATE_GROUPS (
+  DELEGATOR                     varchar(255) not null,
+  GROUP_ID                      integer      not null references GROUPS on delete cascade,
+  READ_WRITE                    integer      not null, -- 1 = ReadWrite, 0 = ReadOnly
+  IS_EXTERNAL                   integer      not null, -- 1 = ReadWrite, 0 = ReadOnly
+
+  primary key (DELEGATOR, READ_WRITE, GROUP_ID)
+);
+create index DELEGATE_GROUPS_GROUP_ID on
+  DELEGATE_GROUPS(GROUP_ID);
+
+create table EXTERNAL_DELEGATE_GROUPS (
+  DELEGATOR                     varchar(255) primary key not null,
+  GROUP_UID_READ                varchar(255),
+  GROUP_UID_WRITE               varchar(255)
+);
+
+--------------------------
+-- Object Splitter Work --
+--------------------------
+
+create table CALENDAR_OBJECT_SPLITTER_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  RESOURCE_ID                   integer      not null references CALENDAR_OBJECT on delete cascade
+);
+
+create index CALENDAR_OBJECT_SPLITTER_WORK_RESOURCE_ID on
+  CALENDAR_OBJECT_SPLITTER_WORK(RESOURCE_ID);
+create index CALENDAR_OBJECT_SPLITTER_WORK_JOB_ID on
+  CALENDAR_OBJECT_SPLITTER_WORK(JOB_ID);
+
+---------------------------
+-- Revision Cleanup Work --
+---------------------------
+
+create table FIND_MIN_VALID_REVISION_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null
+);
+
+create index FIND_MIN_VALID_REVISION_WORK_JOB_ID on
+  FIND_MIN_VALID_REVISION_WORK(JOB_ID);
+
+create table REVISION_CLEANUP_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null
+);
+
+create index REVISION_CLEANUP_WORK_JOB_ID on
+  REVISION_CLEANUP_WORK(JOB_ID);
+
+------------------------
+-- Inbox Cleanup Work --
+------------------------
+
+create table INBOX_CLEANUP_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null
+);
+
+create index INBOX_CLEANUP_WORK_JOB_ID on
+   INBOX_CLEANUP_WORK(JOB_ID);
+
+create table CLEANUP_ONE_INBOX_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  HOME_ID                       integer      not null unique references CALENDAR_HOME on delete cascade
+);
+
+create index CLEANUP_ONE_INBOX_WORK_JOB_ID on
+  CLEANUP_ONE_INBOX_WORK(JOB_ID);
+
+---------------------------
+-- Schedule Refresh Work --
+---------------------------
+
+create table SCHEDULE_REFRESH_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  ICALENDAR_UID                 varchar(255) not null,
+  HOME_RESOURCE_ID              integer      not null references CALENDAR_HOME on delete cascade,
+  RESOURCE_ID                   integer      not null references CALENDAR_OBJECT on delete cascade,
+  ATTENDEE_COUNT                integer
+);
+
+create index SCHEDULE_REFRESH_WORK_HOME_RESOURCE_ID on
+  SCHEDULE_REFRESH_WORK(HOME_RESOURCE_ID);
+create index SCHEDULE_REFRESH_WORK_RESOURCE_ID on
+  SCHEDULE_REFRESH_WORK(RESOURCE_ID);
+create index SCHEDULE_REFRESH_WORK_JOB_ID on
+  SCHEDULE_REFRESH_WORK(JOB_ID);
+
+create table SCHEDULE_REFRESH_ATTENDEES (
+  RESOURCE_ID                   integer      not null references CALENDAR_OBJECT on delete cascade,
+  ATTENDEE                      varchar(255) not null,
+  
+  primary key (RESOURCE_ID, ATTENDEE)
+);
+
+create index SCHEDULE_REFRESH_ATTENDEES_RESOURCE_ID_ATTENDEE on
+  SCHEDULE_REFRESH_ATTENDEES(RESOURCE_ID, ATTENDEE);
+
+------------------------------
+-- Schedule Auto Reply Work --
+------------------------------
+
+create table SCHEDULE_AUTO_REPLY_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  ICALENDAR_UID                 varchar(255) not null,
+  HOME_RESOURCE_ID              integer      not null references CALENDAR_HOME on delete cascade,
+  RESOURCE_ID                   integer      not null references CALENDAR_OBJECT on delete cascade,
+  PARTSTAT                      varchar(255) not null
+);
+
+create index SCHEDULE_AUTO_REPLY_WORK_HOME_RESOURCE_ID on
+  SCHEDULE_AUTO_REPLY_WORK(HOME_RESOURCE_ID);
+create index SCHEDULE_AUTO_REPLY_WORK_RESOURCE_ID on
+  SCHEDULE_AUTO_REPLY_WORK(RESOURCE_ID);
+create index SCHEDULE_AUTO_REPLY_WORK_JOB_ID on
+  SCHEDULE_AUTO_REPLY_WORK(JOB_ID);
+
+-----------------------------
+-- Schedule Organizer Work --
+-----------------------------
+
+create table SCHEDULE_ORGANIZER_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  ICALENDAR_UID                 varchar(255) not null,
+  SCHEDULE_ACTION               integer      not null, -- Enum SCHEDULE_ACTION
+  HOME_RESOURCE_ID              integer      not null references CALENDAR_HOME on delete cascade,
+  RESOURCE_ID                   integer,     -- this references a possibly non-existent CALENDR_OBJECT
+  ICALENDAR_TEXT_OLD            text,
+  ICALENDAR_TEXT_NEW            text,
+  ATTENDEE_COUNT                integer,
+  SMART_MERGE                   boolean
+);
+
+create index SCHEDULE_ORGANIZER_WORK_HOME_RESOURCE_ID on
+  SCHEDULE_ORGANIZER_WORK(HOME_RESOURCE_ID);
+create index SCHEDULE_ORGANIZER_WORK_RESOURCE_ID on
+  SCHEDULE_ORGANIZER_WORK(RESOURCE_ID);
+create index SCHEDULE_ORGANIZER_WORK_JOB_ID on
+  SCHEDULE_ORGANIZER_WORK(JOB_ID);
+
+-- Enumeration of schedule actions
+
+create table SCHEDULE_ACTION (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into SCHEDULE_ACTION values (0, 'create');
+insert into SCHEDULE_ACTION values (1, 'modify');
+insert into SCHEDULE_ACTION values (2, 'modify-cancelled');
+insert into SCHEDULE_ACTION values (3, 'remove');
+
+-------------------------
+-- Schedule Reply Work --
+-------------------------
+
+create table SCHEDULE_REPLY_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  ICALENDAR_UID                 varchar(255) not null,
+  HOME_RESOURCE_ID              integer      not null references CALENDAR_HOME on delete cascade,
+  RESOURCE_ID                   integer      not null references CALENDAR_OBJECT on delete cascade,
+  CHANGED_RIDS                  text
+);
+
+create index SCHEDULE_REPLY_WORK_HOME_RESOURCE_ID on
+  SCHEDULE_REPLY_WORK(HOME_RESOURCE_ID);
+create index SCHEDULE_REPLY_WORK_RESOURCE_ID on
+  SCHEDULE_REPLY_WORK(RESOURCE_ID);
+create index SCHEDULE_REPLY_WORK_JOB_ID on
+  SCHEDULE_REPLY_WORK(JOB_ID);
+
+--------------------------------
+-- Schedule Reply Cancel Work --
+--------------------------------
+
+create table SCHEDULE_REPLY_CANCEL_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  ICALENDAR_UID                 varchar(255) not null,
+  HOME_RESOURCE_ID              integer      not null references CALENDAR_HOME on delete cascade,
+  ICALENDAR_TEXT                text         not null
+);
+
+create index SCHEDULE_REPLY_CANCEL_WORK_HOME_RESOURCE_ID on
+  SCHEDULE_REPLY_CANCEL_WORK(HOME_RESOURCE_ID);
+create index SCHEDULE_REPLY_CANCEL_WORK_JOB_ID on
+  SCHEDULE_REPLY_CANCEL_WORK(JOB_ID);
+
+----------------------------------
+-- Principal Purge Polling Work --
+----------------------------------
+
+create table PRINCIPAL_PURGE_POLLING_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null
+);
+
+create index PRINCIPAL_PURGE_POLLING_WORK_JOB_ID on
+  PRINCIPAL_PURGE_POLLING_WORK(JOB_ID);
+
+--------------------------------
+-- Principal Purge Check Work --
+--------------------------------
+
+create table PRINCIPAL_PURGE_CHECK_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  UID                           varchar(255) not null
+);
+
+create index PRINCIPAL_PURGE_CHECK_WORK_JOB_ID on
+  PRINCIPAL_PURGE_CHECK_WORK(JOB_ID);
+
+--------------------------
+-- Principal Purge Work --
+--------------------------
+
+create table PRINCIPAL_PURGE_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  UID                           varchar(255) not null
+);
+
+create index PRINCIPAL_PURGE_WORK_JOB_ID on
+  PRINCIPAL_PURGE_WORK(JOB_ID);
+
+
+--------------------
+-- Schema Version --
+--------------------
+
+create table CALENDARSERVER (
+  NAME                          varchar(255) primary key, -- implicit index
+  VALUE                         varchar(255)
+);
+
+insert into CALENDARSERVER values ('VERSION', '40');
+insert into CALENDARSERVER values ('CALENDAR-DATAVERSION', '6');
+insert into CALENDARSERVER values ('ADDRESSBOOK-DATAVERSION', '2');
+insert into CALENDARSERVER values ('NOTIFICATION-DATAVERSION', '1');
+insert into CALENDARSERVER values ('MIN-VALID-REVISION', '1');

Added: CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_40_to_41.sql
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_40_to_41.sql	                        (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_40_to_41.sql	2014-05-02 20:07:26 UTC (rev 13408)
@@ -0,0 +1,41 @@
+----
+-- Copyright (c) 2012-2014 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+---------------------------------------------------
+-- Upgrade database schema from VERSION 40 to 41 --
+---------------------------------------------------
+
+insert into HOME_STATUS (DESCRIPTION, ID) values ('purging', 2);
+
+--------------------------------
+-- Principal Home Remove Work --
+--------------------------------
+
+create table PRINCIPAL_PURGE_HOME_WORK (
+    "WORK_ID" integer primary key not null,
+    "JOB_ID" integer not null references JOB,
+    "HOME_RESOURCE_ID" integer not null references CALENDAR_HOME on delete cascade
+);
+
+create index PRINCIPAL_PURGE_HOME__f35eea7a on PRINCIPAL_PURGE_HOME_WORK (
+    JOB_ID
+);
+create index PRINCIPAL_PURGE_HOME__967e4480 on PRINCIPAL_PURGE_HOME_WORK (
+    HOME_RESOURCE_ID
+);
+
+-- update the version
+update CALENDARSERVER set VALUE = '41' where NAME = 'VERSION';

Added: CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_40_to_41.sql
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_40_to_41.sql	                        (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_40_to_41.sql	2014-05-02 20:07:26 UTC (rev 13408)
@@ -0,0 +1,39 @@
+----
+-- Copyright (c) 2012-2014 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+---------------------------------------------------
+-- Upgrade database schema from VERSION 40 to 41 --
+---------------------------------------------------
+
+insert into HOME_STATUS values (2, 'purging');
+
+--------------------------------
+-- Principal Home Remove Work --
+--------------------------------
+
+create table PRINCIPAL_PURGE_HOME_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  HOME_RESOURCE_ID              integer      not null references CALENDAR_HOME on delete cascade
+);
+
+create index PRINCIPAL_PURGE_HOME_WORK_JOB_ID on
+  PRINCIPAL_PURGE_HOME_WORK(JOB_ID);
+create index PRINCIPAL_PURGE_HOME_HOME_RESOURCE_ID on
+  PRINCIPAL_PURGE_HOME_WORK(HOME_RESOURCE_ID);
+
+-- update the version
+update CALENDARSERVER set VALUE = '41' where NAME = 'VERSION';

Modified: CalendarServer/trunk/txdav/common/datastore/sql_tables.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql_tables.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/common/datastore/sql_tables.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -186,6 +186,7 @@
 
 _HOME_STATUS_NORMAL = _homeStatus('normal')
 _HOME_STATUS_EXTERNAL = _homeStatus('external')
+_HOME_STATUS_PURGING = _homeStatus('purging')
 
 _bindStatus = _schemaConstants(
     schema.CALENDAR_BIND_STATUS.DESCRIPTION,

Modified: CalendarServer/trunk/txdav/common/datastore/test/util.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/test/util.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/common/datastore/test/util.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -122,11 +122,24 @@
         return succeed(None)
 
 
+    def recordWithCalendarUserAddress(self, cuaddr):
+        if cuaddr.startswith("urn:x-uid:"):
+            return self.recordWithUID(cuaddr[10:])
+        elif cuaddr.startswith("urn:uuid:"):
+            return self.recordWithUID(cuaddr[9:])
+        else:
+            return succeed(None)
+
+
     def addRecord(self, record):
         self.records[record.uid] = record
 
 
+    def removeRecord(self, uid):
+        del self.records[uid]
 
+
+
 class TestStoreDirectoryRecord(object):
 
     implements(IStoreDirectoryRecord)
@@ -151,7 +164,7 @@
 
 
     def serverURI(self):
-        return self._server.uri
+        return self._server.uri if self._server else ""
 
 
 

Modified: CalendarServer/trunk/txdav/who/directory.py
===================================================================
--- CalendarServer/trunk/txdav/who/directory.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/who/directory.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -30,6 +30,7 @@
 from twistedcaldav.config import config
 from twistedcaldav.ical import Property
 from txdav.caldav.datastore.scheduling.ischedule.localservers import Servers
+from txdav.caldav.datastore.scheduling.utils import normalizeCUAddr
 from txdav.who.delegates import RecordType as DelegateRecordType
 from txdav.who.idirectory import (
     RecordType as DAVRecordType, AutoScheduleMode
@@ -64,8 +65,6 @@
 
     @inlineCallbacks
     def recordWithCalendarUserAddress(self, address):
-        # FIXME: moved this here to avoid circular import problems
-        from txdav.caldav.datastore.scheduling.cuaddress import normalizeCUAddr
         address = normalizeCUAddr(address)
         record = None
 
@@ -430,7 +429,7 @@
         if config.Scheduling.Options.AutoSchedule.Enabled:
             if (
                 config.Scheduling.Options.AutoSchedule.Always or
-                self.autoScheduleMode not in (AutoScheduleMode.none, None) or  # right???
+                getattr(self, "autoScheduleMode", None) not in (AutoScheduleMode.none, None) or  # right???
                 (
                     yield self.autoAcceptFromOrganizer(organizer)
                 )

Modified: CalendarServer/trunk/txdav/who/util.py
===================================================================
--- CalendarServer/trunk/txdav/who/util.py	2014-05-02 18:42:58 UTC (rev 13407)
+++ CalendarServer/trunk/txdav/who/util.py	2014-05-02 20:07:26 UTC (rev 13408)
@@ -33,10 +33,10 @@
 )
 from twext.who.util import ConstantsContainer
 from twisted.cred.credentials import UsernamePassword
+from twisted.internet.defer import succeed, inlineCallbacks
 from twisted.python.filepath import FilePath
 from twisted.python.reflect import namedClass
 from twistedcaldav.config import fullServerPath
-from txdav.caldav.datastore.scheduling.cuaddress import normalizeCUAddr
 from txdav.who.augment import AugmentedDirectoryService
 from txdav.who.delegates import DirectoryService as DelegateDirectoryService
 from txdav.who.idirectory import (
@@ -45,8 +45,6 @@
 )
 from txdav.who.wiki import DirectoryService as WikiDirectoryService
 from txdav.who.xml import DirectoryService as XMLDirectoryService
-from uuid import UUID
-from twisted.internet.defer import succeed, inlineCallbacks
 
 
 log = Logger()
@@ -248,38 +246,6 @@
 
 
 
-def uidFromCalendarUserAddress(address):
-    """
-        Get a uid from a calendar user address
-        May return None
-    """
-
-    address = normalizeCUAddr(address)
-
-    if address.startswith("urn:x-uid:"):
-        return address[10:]
-
-    elif address.startswith("urn:uuid:"):
-        try:
-            UUID(address[9:])
-        except ValueError:
-            log.info("Invalid GUID: {guid}", guid=address[9:])
-        else:
-            return address[9:]
-
-    elif address.startswith("mailto:"):
-        return address[7:]
-
-    elif address.startswith("/principals/"):
-        parts = address.split("/")
-        if len(parts) == 4:
-            return parts[3]
-
-    return None
-
-
-
-
 class InMemoryDirectoryService(IndexDirectoryService):
     """
     An in-memory IDirectoryService.  You must call updateRecords( ) if you want
@@ -306,7 +272,7 @@
         recordsByUID = dict(((record.uid, record) for record in records))
         if not create:
             # Make sure all the records already exist
-            for uid, record in recordsByUID.items():
+            for uid, _ignore_record in recordsByUID.items():
                 if uid not in self._index[self.fieldName.uid]:
                     raise NoSuchRecordError(uid)
 
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20140502/e389b8e7/attachment-0001.html>


More information about the calendarserver-changes mailing list