[CalendarServer-changes] [12418] CalendarServer/branches/users/cdaboo/pod-migration/txdav

source_changes at macosforge.org source_changes at macosforge.org
Wed Mar 12 11:19:45 PDT 2014


Revision: 12418
          http://trac.calendarserver.org//changeset/12418
Author:   cdaboo at apple.com
Date:     2014-01-21 20:07:03 -0800 (Tue, 21 Jan 2014)
Log Message:
-----------
Checkpoint: cross-pod migration. Currently supports home child sync without data.

Modified Paths:
--------------
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/base/datastore/util.py
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/caldav/datastore/schedule.py
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/caldav/datastore/sql.py
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/carddav/datastore/sql.py
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/carddav/datastore/sql_external.py
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/file.py
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/conduit.py
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/test_conduit.py
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/util.py
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql.py
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_external.py
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/current-oracle-dialect.sql
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/current.sql
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_32_to_33.sql
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_32_to_33.sql
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_tables.py
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/test/util.py

Added Paths:
-----------
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/migration.py
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/test_migration.py
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/old/oracle-dialect/v33.sql
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/old/postgres-dialect/v33.sql
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_33_to_34.sql
    CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_33_to_34.sql

Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/base/datastore/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/base/datastore/util.py	2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/base/datastore/util.py	2014-01-22 04:07:03 UTC (rev 12418)
@@ -104,6 +104,12 @@
         return "objectWithExternalID:%s:%s" % (homeResourceID, externalID)
 
 
+    # Home
+
+    def keyForHomeData(self, homeType, ownerUID, migration):
+        return "homeData:%s.%s.%s" % (homeType, migration, ownerUID)
+
+
     # Home metadata (Created/Modified)
 
     def keyForHomeMetaData(self, homeResourceID):

Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/caldav/datastore/schedule.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/caldav/datastore/schedule.py	2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/caldav/datastore/schedule.py	2014-01-22 04:07:03 UTC (rev 12418)
@@ -21,6 +21,7 @@
 from twisted.python.util import FancyEqMixin
 from twisted.python.components import proxyForInterface
 from twisted.internet.defer import inlineCallbacks, returnValue
+from txdav.common.datastore.sql_tables import _MIGRATION_STATUS_NONE
 
 
 
@@ -41,10 +42,10 @@
 
 
     @inlineCallbacks
-    def calendarHomeWithUID(self, uid, create=False):
+    def calendarHomeWithUID(self, uid, create=False, migration=_MIGRATION_STATUS_NONE):
         # FIXME: 'create' flag
         newHome = yield super(ImplicitTransaction, self
-            ).calendarHomeWithUID(uid, create)
+            ).calendarHomeWithUID(uid, create=create, migration=migration)
 #        return ImplicitCalendarHome(newHome, self)
         if newHome is None:
             returnValue(None)

Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/caldav/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/caldav/datastore/sql.py	2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/caldav/datastore/sql.py	2014-01-22 04:07:03 UTC (rev 12418)
@@ -55,7 +55,6 @@
     pyCalendarTodatetime, parseSQLDateToPyCalendar
 from twistedcaldav.ical import Component, InvalidICalendarDataError, Property
 from twistedcaldav.instance import InvalidOverriddenInstanceError
-from twistedcaldav.memcacher import Memcacher
 
 from txdav.base.propertystore.base import PropertyName
 from txdav.caldav.datastore.query.builder import buildExpression
@@ -415,8 +414,6 @@
     _notifierPrefix = "CalDAV"
     _dataVersionKey = "CALENDAR-DATAVERSION"
 
-    _cacher = Memcacher("SQL.calhome", pickle=True, key_normalization=False)
-
     _componentCalendarName = {
         "VEVENT": "calendar",
         "VTODO": "tasks",
@@ -517,7 +514,10 @@
             Where=rp.RESOURCE_ID == self._resourceID
         ).on(self._txn)
 
-        yield self._cacher.delete(str(self._ownerUID))
+        queryCacher = self._txn._queryCacher
+        if queryCacher:
+            cacheKey = queryCacher.keyForHomeData(self._homeType, self._ownerUID, self._migration)
+            yield queryCacher.delete(cacheKey)
 
 
     @inlineCallbacks

Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/carddav/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/carddav/datastore/sql.py	2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/carddav/datastore/sql.py	2014-01-22 04:07:03 UTC (rev 12418)
@@ -41,7 +41,6 @@
 
 from twistedcaldav import carddavxml, customxml
 from twistedcaldav.config import config
-from twistedcaldav.memcacher import Memcacher
 from twistedcaldav.vcard import Component as VCard, InvalidVCardDataError, Property, \
     vCardProductID
 
@@ -58,7 +57,8 @@
 from txdav.common.datastore.sql_tables import _ABO_KIND_PERSON, \
     _ABO_KIND_GROUP, _ABO_KIND_RESOURCE, _ABO_KIND_LOCATION, schema, \
     _BIND_MODE_OWN, _BIND_MODE_WRITE, _BIND_STATUS_ACCEPTED, \
-    _BIND_STATUS_INVITED, _BIND_MODE_INDIRECT, _BIND_STATUS_DECLINED
+    _BIND_STATUS_INVITED, _BIND_MODE_INDIRECT, _BIND_STATUS_DECLINED, \
+    _MIGRATION_STATUS_NONE, _MIGRATION_STATUS_MIGRATING, _HOME_STATUS_EXTERNAL
 from txdav.common.icommondatastore import InternalDataStoreError, \
     InvalidUIDError, UIDExistsError, ObjectResourceTooBigError, \
     InvalidObjectResourceError, InvalidComponentForStoreError, \
@@ -84,12 +84,11 @@
 
     _notifierPrefix = "CardDAV"
     _dataVersionKey = "ADDRESSBOOK-DATAVERSION"
-    _cacher = Memcacher("SQL.adbkhome", pickle=True, key_normalization=False)
 
 
-    def __init__(self, transaction, ownerUID):
+    def __init__(self, transaction, ownerUID, migration=_MIGRATION_STATUS_NONE):
 
-        super(AddressBookHome, self).__init__(transaction, ownerUID)
+        super(AddressBookHome, self).__init__(transaction, ownerUID, migration=migration)
         self._addressbookPropertyStoreID = None
         self._addressbook = None
 
@@ -116,6 +115,8 @@
         return (
             cls._homeSchema.RESOURCE_ID,
             cls._homeSchema.OWNER_UID,
+            cls._homeSchema.STATUS,
+            cls._homeSchema.MIGRATION,
             cls._homeSchema.ADDRESSBOOK_PROPERTY_STORE_ID,
         )
 
@@ -131,40 +132,37 @@
         return (
             "_resourceID",
             "_ownerUID",
+            "_status",
+            "_migration",
             "_addressbookPropertyStoreID",
         )
 
 
     @inlineCallbacks
-    def initFromStore(self, no_cache=False):
+    def made(self):
         """
-        Initialize this object from the store. We read in and cache all the
-        extra meta-data from the DB to avoid having to do DB queries for those
-        individually later.
+        Create the default owned address book.
         """
 
-        result = yield super(AddressBookHome, self).initFromStore(no_cache)
-        if result is not None:
-            # Created owned address book
-            addressbook = AddressBook(
-                home=self,
-                name="addressbook",
-                resourceID=self._resourceID,
-                mode=_BIND_MODE_OWN,
-                status=_BIND_STATUS_ACCEPTED,
-            )
-            yield addressbook._loadPropertyStore()
+        # Created owned address book
+        c = AddressBook._externalClass if self.external() else AddressBook
+        addressbook = c(
+            home=self,
+            name="addressbook",
+            resourceID=self._resourceID,
+            mode=_BIND_MODE_OWN,
+            status=_BIND_STATUS_ACCEPTED,
+        )
+        yield addressbook._loadPropertyStore()
 
-            # Extra check for shared
-            invites = yield addressbook.sharingInvites()
-            if len(invites) != 0:
-                addressbook._bindMessage = "shared"
+        # Extra check for shared
+        invites = yield addressbook.sharingInvites()
+        if len(invites) != 0:
+            addressbook._bindMessage = "shared"
 
-            self._addressbook = addressbook
+        self._addressbook = addressbook
 
-        returnValue(result)
 
-
     @inlineCallbacks
     def remove(self):
         ah = schema.ADDRESSBOOK_HOME
@@ -194,7 +192,10 @@
             )
         ).on(self._txn)
 
-        yield self._cacher.delete(str(self._ownerUID))
+        queryCacher = self._txn._queryCacher
+        if queryCacher:
+            cacheKey = queryCacher.keyForHomeData(self._homeType, self._ownerUID, self._migration)
+            yield queryCacher.delete(cacheKey)
 
 
     @inlineCallbacks
@@ -352,6 +353,49 @@
         returnValue(result)
 
 
+    #
+    # Migration related
+    #
+
+    @inlineCallbacks
+    def migrateToThisPod(self, user, final):
+        """
+        Migrate data for the specified user (on a different pod) to this pod. L{final} is
+        C{True} when this is the final incremental sync (where we need to sync everything).
+        When C{False} we only sync owned child collections (without properties).
+
+        @param user: the user to migrate
+        @type user: C{str}
+        @param final: whether or not this is the final sync
+        @type final: C{bool}
+        """
+
+        assert self._migration == _MIGRATION_STATUS_MIGRATING
+
+        # Get external home for the user (create if needed)
+        otherHome = yield self._txn.homeWithUID(self._homeType, user, create=True)
+        assert otherHome._status == _HOME_STATUS_EXTERNAL
+
+        # Force the external home to look like it is migrating. This will enable certain external API calls
+        # that are normally disabled for sharing (e.g., ability to load all child resources).
+        otherHome._migration = _MIGRATION_STATUS_MIGRATING
+
+        local_children = yield self.loadChildren()
+        local_children = dict([(child.external_id(), child) for child in local_children if child.owned()])
+
+        # Get list of owned child collections
+        remote_children = yield otherHome.loadChildren()
+        remote_children = dict([(child.external_id(), child) for child in remote_children if child.owned()])
+
+        # For address books, there is only one owned child
+        assert len(local_children) == 1
+        assert len(remote_children) == 1
+
+        local_child = local_children.values()[0]
+        remote_child = remote_children.values()[0]
+        yield local_child.migrateToThisPod(remote_child, final)
+
+
 AddressBookHome._register(EADDRESSBOOKTYPE)
 
 
@@ -500,7 +544,9 @@
         overallBindStatus = _BIND_STATUS_INVITED
         minBindRevision = None
         for row in rows:
-            bindMode, homeID, resourceGroupID, externalID, name, bindStatus, bindRevision, bindMessage = row[:cls.bindColumnCount] #@UnusedVariable
+            resourceGroupID = row[cls.bindColumns().index(cls._bindSchema.RESOURCE_ID)]
+            bindStatus = row[cls.bindColumns().index(cls._bindSchema.BIND_STATUS)]
+            bindRevision = row[cls.bindColumns().index(cls._bindSchema.BIND_REVISION)]
             if groupID is None:
                 groupID = resourceGroupID
             minBindRevision = min(minBindRevision, bindRevision) if minBindRevision is not None else bindRevision
@@ -1124,7 +1170,7 @@
             home._txn, homeID=home._resourceID
         )
         for groupRow in groupRows:
-            bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = groupRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
+            resourceID = groupRow[cls.bindColumns().index(cls._bindSchema.RESOURCE_ID)]
             ownerAddressBookID = yield AddressBookObject.ownerAddressBookIDFromGroupID(home._txn, resourceID)
             ownerHome = yield home._txn.homeWithResourceID(home._homeType, ownerAddressBookID, create=True)
             names |= set([ownerHome.uid()])
@@ -1152,7 +1198,7 @@
         )
         # get ownerHomeIDs
         for dataRow in dataRows:
-            bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = dataRow[:cls.bindColumnCount] #@UnusedVariable
+            resourceID = dataRow[cls.bindColumns().index(cls._bindSchema.RESOURCE_ID)]
             ownerHome = yield home.ownerHomeWithChildID(resourceID)
             ownerHomeToDataRowMap[ownerHome] = dataRow
 
@@ -1161,12 +1207,15 @@
             home._txn, homeID=home._resourceID
         )
         for groupBindRow in groupBindRows:
-            bindMode, homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
+            resourceID = dataRow[cls.bindColumns().index(cls._bindSchema.RESOURCE_ID)]
             ownerAddressBookID = yield AddressBookObject.ownerAddressBookIDFromGroupID(home._txn, resourceID)
             ownerHome = yield home.ownerHomeWithChildID(ownerAddressBookID)
             if ownerHome not in ownerHomeToDataRowMap:
-                groupBindRow[0] = _BIND_MODE_INDIRECT
-                groupBindRow[3:7] = 4 * [None]  # bindName, bindStatus, bindRevision, bindMessage
+                groupBindRow[cls.bindColumns().index(cls._bindSchema.BIND_MODE)] = _BIND_MODE_INDIRECT
+                groupBindRow[cls.bindColumns().index(cls._bindSchema.BIND_NAME)] = None
+                groupBindRow[cls.bindColumns().index(cls._bindSchema.BIND_STATUS)] = None
+                groupBindRow[cls.bindColumns().index(cls._bindSchema.BIND_REVISION)] = None
+                groupBindRow[cls.bindColumns().index(cls._bindSchema.MESSAGE)] = None
                 ownerHomeToDataRowMap[ownerHome] = groupBindRow
 
         if ownerHomeToDataRowMap:
@@ -1404,7 +1453,8 @@
             readWriteGroupIDs = set()
             readOnlyGroupIDs = set()
             for groupBindRow in groupBindRows:
-                bindMode, homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
+                resourceID = groupBindRow[self.bindColumns().index(self._bindSchema.RESOURCE_ID)]
+                bindMode = groupBindRow[self.bindColumns().index(self._bindSchema.BIND_MODE)]
                 if bindMode == _BIND_MODE_WRITE:
                     readWriteGroupIDs.add(resourceID)
                 else:
@@ -1465,7 +1515,7 @@
         readWriteGroupIDs = []
         readOnlyGroupIDs = []
         for groupBindRow in groupBindRows:
-            bindMode, homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
+            #bindMode, homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
             if bindMode == _BIND_MODE_WRITE:
                 readWriteGroupIDs.append(resourceID)
             else:
@@ -1898,12 +1948,12 @@
         yield child._loadPropertyStore(propstore)
 
         if groupBindData:
-            bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = groupBindData[:AddressBookObject.bindColumnCount] #@UnusedVariable
-            child._bindMode = bindMode
-            child._bindStatus = bindStatus
-            child._bindMessage = bindMessage
-            child._bindName = bindName
-            child._bindRevision = bindRevision
+            bindData = dict(zip(cls.bindColumns(), groupBindData[:AddressBookObject.bindColumnCount]))
+            child._bindMode = bindData[cls._bindSchema.BIND_MODE]
+            child._bindStatus = bindData[cls._bindSchema.BIND_STATUS]
+            child._bindMessage = bindData[cls._bindSchema.MESSAGE]
+            child._bindName = bindData[cls._bindSchema.RESOURCE_NAME]
+            child._bindRevision = bindData[cls._bindSchema.BIND_REVISION]
         else:
             invites = yield child.sharingInvites()
             if len(invites):
@@ -2195,7 +2245,8 @@
         )
         if groupBindRows:
             groupBindRow = groupBindRows[0]
-            bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
+            resourceID = groupBindRow[cls.bindColumns().index(cls._bindSchema.RESOURCE_ID)]
+            bindStatus = groupBindRow[cls.bindColumns().index(cls._bindSchema.BIND_STATUS)]
 
             if accepted is not None and (bindStatus == _BIND_STATUS_ACCEPTED) != bool(accepted):
                 returnValue(None)

Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/carddav/datastore/sql_external.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/carddav/datastore/sql_external.py	2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/carddav/datastore/sql_external.py	2014-01-22 04:07:03 UTC (rev 12418)
@@ -14,6 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ##
+from txdav.common.datastore.sql_tables import _MIGRATION_STATUS_MIGRATING
 """
 SQL backend for CardDAV storage when resources are external.
 """
@@ -62,10 +63,14 @@
         """
         No children.
         """
-        raise AssertionError("CommonHomeExternal: not supported")
+        # Only available if migrating
+        if self._migration != _MIGRATION_STATUS_MIGRATING:
+            raise AssertionError("CommonHomeExternal: not supported")
 
+        return super(CommonHomeExternal, self).addressbook()
 
 
+
 class AddressBookExternal(CommonHomeChildExternal, AddressBook):
     """
     SQL-based implementation of L{IAddressBook}.

Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/file.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/file.py	2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/file.py	2014-01-22 04:07:03 UTC (rev 12418)
@@ -304,15 +304,15 @@
         CommonStoreTransaction._homeClass[EADDRESSBOOKTYPE] = AddressBookHome
 
 
-    def calendarHomeWithUID(self, uid, create=False):
-        return self.homeWithUID(ECALENDARTYPE, uid, create=create)
+    def calendarHomeWithUID(self, uid, create=False, migration=None):
+        return self.homeWithUID(ECALENDARTYPE, uid, create=create, migration=migration)
 
 
-    def addressbookHomeWithUID(self, uid, create=False):
-        return self.homeWithUID(EADDRESSBOOKTYPE, uid, create=create)
+    def addressbookHomeWithUID(self, uid, create=False, migration=None):
+        return self.homeWithUID(EADDRESSBOOKTYPE, uid, create=create, migration=migration)
 
 
-    def _determineMemo(self, storeType, uid, create=False):
+    def _determineMemo(self, storeType, uid, create=False, migration=None):
         """
         Determine the memo dictionary to use for homeWithUID.
         """
@@ -335,7 +335,7 @@
 
 
     @memoizedKey("uid", _determineMemo, deferredResult=False)
-    def homeWithUID(self, storeType, uid, create=False):
+    def homeWithUID(self, storeType, uid, create=False, migration=None):
         if uid.startswith("."):
             return None
 
@@ -346,7 +346,7 @@
 
 
     @memoizedKey("uid", "_notificationHomes", deferredResult=False)
-    def notificationsWithUID(self, uid, home=None):
+    def notificationsWithUID(self, uid, home=None, migration=None):
 
         if home is None:
             home = self.homeWithUID(self._notificationHomeType, uid, create=True)

Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/conduit.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/conduit.py	2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/conduit.py	2014-01-22 04:07:03 UTC (rev 12418)
@@ -76,34 +76,6 @@
         self.store = store
 
 
-    def validRequst(self, source_guid, destination_guid):
-        """
-        Verify that the specified GUIDs are valid for the request and return the
-        matching directory records.
-
-        @param source_guid: GUID for the user on whose behalf the request is being made
-        @type source_guid: C{str}
-        @param destination_guid: GUID for the user to whom the request is being sent
-        @type destination_guid: C{str}
-
-        @return: C{tuple} of L{IStoreDirectoryRecord}
-        """
-
-        source = self.store.directoryService().recordWithUID(source_guid)
-        if source is None:
-            raise DirectoryRecordNotFoundError("Cross-pod source: {}".format(source_guid))
-        if not source.thisServer():
-            raise FailedCrossPodRequestError("Cross-pod source not on this server: {}".format(source_guid))
-
-        destination = self.store.directoryService().recordWithUID(destination_guid)
-        if destination is None:
-            raise DirectoryRecordNotFoundError("Cross-pod destination: {}".format(destination_guid))
-        if destination.thisServer():
-            raise FailedCrossPodRequestError("Cross-pod destination on this server: {}".format(destination_guid))
-
-        return (source, destination,)
-
-
     @inlineCallbacks
     def sendRequest(self, txn, recipient, data, stream=None, streamType=None):
 
@@ -156,9 +128,37 @@
 
 
     #
-    # Invite related apis
+    # Sharing related apis
     #
 
+    def validShareRequest(self, source_guid, destination_guid):
+        """
+        Verify that the specified GUIDs are valid for the request and return the
+        matching directory records.
+
+        @param source_guid: GUID for the user on whose behalf the request is being made
+        @type source_guid: C{str}
+        @param destination_guid: GUID for the user to whom the request is being sent
+        @type destination_guid: C{str}
+
+        @return: C{tuple} of L{IStoreDirectoryRecord}
+        """
+
+        source = self.store.directoryService().recordWithUID(source_guid)
+        if source is None:
+            raise DirectoryRecordNotFoundError("Cross-pod source: {}".format(source_guid))
+        if not source.thisServer():
+            raise FailedCrossPodRequestError("Cross-pod source not on this server: {}".format(source_guid))
+
+        destination = self.store.directoryService().recordWithUID(destination_guid)
+        if destination is None:
+            raise DirectoryRecordNotFoundError("Cross-pod destination: {}".format(destination_guid))
+        if destination.thisServer():
+            raise FailedCrossPodRequestError("Cross-pod destination on this server: {}".format(destination_guid))
+
+        return (source, destination,)
+
+
     @inlineCallbacks
     def send_shareinvite(self, txn, homeType, ownerUID, ownerID, ownerName, shareeUID, shareUID, bindMode, summary, copy_properties, supported_components):
         """
@@ -186,7 +186,7 @@
         @type supported_components: C{str}
         """
 
-        _ignore_sender, recipient = self.validRequst(ownerUID, shareeUID)
+        _ignore_sender, recipient = self.validShareRequest(ownerUID, shareeUID)
 
         action = {
             "action": "shareinvite",
@@ -260,7 +260,7 @@
         @type shareUID: C{str}
         """
 
-        _ignore_sender, recipient = self.validRequst(ownerUID, shareeUID)
+        _ignore_sender, recipient = self.validShareRequest(ownerUID, shareeUID)
 
         action = {
             "action": "shareuninvite",
@@ -290,7 +290,7 @@
         # Create a share
         shareeHome = yield txn.homeWithUID(message["type"], message["sharee"], create=True)
         if shareeHome is None or shareeHome.external():
-            FailedCrossPodRequestError("Invalid sharee UID specified")
+            raise FailedCrossPodRequestError("Invalid sharee UID specified")
 
         try:
             yield shareeHome.processExternalUninvite(
@@ -299,7 +299,7 @@
                 message["share_id"],
             )
         except ExternalShareFailed as e:
-            FailedCrossPodRequestError(str(e))
+            raise FailedCrossPodRequestError(str(e))
 
         returnValue({
             "result": "ok",
@@ -325,7 +325,7 @@
         @type summary: C{str}
         """
 
-        _ignore_sender, recipient = self.validRequst(shareeUID, ownerUID)
+        _ignore_sender, recipient = self.validShareRequest(shareeUID, ownerUID)
 
         action = {
             "action": "sharereply",
@@ -357,7 +357,7 @@
         # Create a share
         ownerHome = yield txn.homeWithUID(message["type"], message["owner"])
         if ownerHome is None or ownerHome.external():
-            FailedCrossPodRequestError("Invalid owner UID specified")
+            raise FailedCrossPodRequestError("Invalid owner UID specified")
 
         try:
             yield ownerHome.processExternalReply(
@@ -368,7 +368,7 @@
                 summary=message.get("summary")
             )
         except ExternalShareFailed as e:
-            FailedCrossPodRequestError(str(e))
+            raise FailedCrossPodRequestError(str(e))
 
         returnValue({
             "result": "ok",
@@ -398,7 +398,7 @@
 
         actionName = "add-attachment"
         shareeView = objectResource._parentCollection
-        action, recipient = self._send(actionName, shareeView, objectResource)
+        action, recipient = self._share_send(actionName, shareeView, objectResource)
         action["rids"] = rids
         action["filename"] = filename
         result = yield self.sendRequest(shareeView._txn, recipient, action, stream, content_type)
@@ -418,7 +418,7 @@
         """
 
         actionName = "add-attachment"
-        _ignore_shareeView, objectResource = yield self._recv(txn, message, actionName)
+        _ignore_shareeView, objectResource = yield self._share_recv(txn, message, actionName)
         try:
             attachment, location = yield objectResource.addAttachment(
                 message["rids"],
@@ -458,7 +458,7 @@
 
         actionName = "update-attachment"
         shareeView = objectResource._parentCollection
-        action, recipient = self._send(actionName, shareeView, objectResource)
+        action, recipient = self._share_send(actionName, shareeView, objectResource)
         action["managedID"] = managed_id
         action["filename"] = filename
         result = yield self.sendRequest(shareeView._txn, recipient, action, stream, content_type)
@@ -478,7 +478,7 @@
         """
 
         actionName = "update-attachment"
-        _ignore_shareeView, objectResource = yield self._recv(txn, message, actionName)
+        _ignore_shareeView, objectResource = yield self._share_recv(txn, message, actionName)
         try:
             attachment, location = yield objectResource.updateAttachment(
                 message["managedID"],
@@ -514,7 +514,7 @@
 
         actionName = "remove-attachment"
         shareeView = objectResource._parentCollection
-        action, recipient = self._send(actionName, shareeView, objectResource)
+        action, recipient = self._share_send(actionName, shareeView, objectResource)
         action["rids"] = rids
         action["managedID"] = managed_id
         result = yield self.sendRequest(shareeView._txn, recipient, action)
@@ -534,7 +534,7 @@
         """
 
         actionName = "remove-attachment"
-        _ignore_shareeView, objectResource = yield self._recv(txn, message, actionName)
+        _ignore_shareeView, objectResource = yield self._share_recv(txn, message, actionName)
         try:
             yield objectResource.removeAttachment(
                 message["rids"],
@@ -557,12 +557,16 @@
     # Sharer data access related apis
     #
 
-    def _send(self, action, parent, child=None):
+    def _share_send(self, action, parent, child=None):
         """
         Base behavior for an operation on a L{CommonHomeChild}.
 
-        @param shareeView: sharee resource being operated on.
-        @type shareeView: L{CommonHomeChildExternal}
+        @param action: remote API call to execute
+        @type action: C{str}
+        @param parent: home child resource
+        @type parent: L{CommonHomeChild}
+        @param child: object resource (or C{None} to operate on the parent only
+        @type child: L{CommonObjectResource}
         """
 
         homeType = parent.ownerHome()._homeType
@@ -570,7 +574,7 @@
         ownerID = parent.external_id()
         shareeUID = parent.viewerHome().uid()
 
-        _ignore_sender, recipient = self.validRequst(shareeUID, ownerUID)
+        _ignore_sender, recipient = self.validShareRequest(shareeUID, ownerUID)
 
         result = {
             "action": action,
@@ -585,12 +589,16 @@
 
 
     @inlineCallbacks
-    def _recv(self, txn, message, expected_action):
+    def _share_recv(self, txn, message, expected_action):
         """
         Base behavior for sharer data access.
 
-        @param message: message arguments
+        @param txn: transaction
+        @type txn: L{CommonStoreTransaction}
+        @param message: JSON data to process
         @type message: C{dict}
+        @param expected_action: expected action in message
+        @type expected_action: C{str}
         """
 
         if message["action"] != expected_action:
@@ -599,21 +607,21 @@
         # Get a share
         ownerHome = yield txn.homeWithUID(message["type"], message["owner"])
         if ownerHome is None or ownerHome.external():
-            FailedCrossPodRequestError("Invalid owner UID specified")
+            raise FailedCrossPodRequestError("Invalid owner UID specified")
 
         shareeHome = yield txn.homeWithUID(message["type"], message["sharee"])
         if shareeHome is None or not shareeHome.external():
-            FailedCrossPodRequestError("Invalid sharee UID specified")
+            raise FailedCrossPodRequestError("Invalid sharee UID specified")
 
         shareeView = yield shareeHome.childWithID(message["owner_id"])
         if shareeView is None:
-            FailedCrossPodRequestError("Invalid shared resource specified")
+            raise FailedCrossPodRequestError("Invalid shared resource specified")
 
         resourceID = message.get("resource_id", None)
         if resourceID is not None:
             objectResource = yield shareeView.objectResourceWithID(resourceID)
             if objectResource is None:
-                FailedCrossPodRequestError("Invalid owner shared object resource specified")
+                raise FailedCrossPodRequestError("Invalid owner shared object resource specified")
         else:
             objectResource = None
 
@@ -626,7 +634,7 @@
     #
 
     @inlineCallbacks
-    def _simple_send(self, actionName, shareeView, objectResource=None, transform=None, args=None, kwargs=None):
+    def _simple_share_send(self, actionName, shareeView, objectResource=None, transform=None, args=None, kwargs=None):
         """
         A simple send operation that returns a value.
 
@@ -644,20 +652,20 @@
         @type kwargs: C{dict}
         """
 
-        action, recipient = self._send(actionName, shareeView, objectResource)
+        action, recipient = self._share_send(actionName, shareeView, objectResource)
         if args is not None:
             action["arguments"] = args
         if kwargs is not None:
             action["keywords"] = kwargs
         result = yield self.sendRequest(shareeView._txn, recipient, action)
         if result["result"] == "ok":
-            returnValue(result["value"] if transform is None else transform(result["value"], shareeView, objectResource))
+            returnValue(result["value"] if transform is None else transform(result["value"]))
         elif result["result"] == "exception":
             raise namedClass(result["class"])(result["message"])
 
 
     @inlineCallbacks
-    def _simple_recv(self, txn, actionName, message, method, onHomeChild=True, transform=None):
+    def _simple_share_recv(self, txn, actionName, message, method, onHomeChild=True, transform=None):
         """
         A simple recv operation that returns a value. We also look for an optional set of arguments/keywords
         and include those only if present.
@@ -672,7 +680,7 @@
         @type transform: C{callable}
         """
 
-        shareeView, objectResource = yield self._recv(txn, message, actionName)
+        shareeView, objectResource = yield self._share_recv(txn, message, actionName)
         try:
             if onHomeChild:
                 # Operate on the L{CommonHomeChild}
@@ -693,7 +701,7 @@
 
         returnValue({
             "result": "ok",
-            "value": transform(value, shareeView, objectResource) if transform is not None else value,
+            "value": transform(value) if transform is not None else value,
         })
 
 
@@ -710,7 +718,7 @@
         servertoserver,
         event_details,
     ):
-        action, recipient = self._send("freebusy", calresource)
+        action, recipient = self._share_send("freebusy", calresource)
         action["timerange"] = [timerange.start.getText(), timerange.end.getText()]
         action["matchtotal"] = matchtotal
         action["excludeuid"] = excludeuid
@@ -735,7 +743,7 @@
         @type message: C{dict}
         """
 
-        shareeView, _ignore_objectResource = yield self._recv(txn, message, "freebusy")
+        shareeView, _ignore_objectResource = yield self._share_recv(txn, message, "freebusy")
         try:
             # Operate on the L{CommonHomeChild}
             fbinfo = [[], [], []]
@@ -771,72 +779,269 @@
 
 
     @staticmethod
-    def _to_tuple(value, shareeView, objectResource):
+    def _to_tuple(value):
         return tuple(value)
 
 
     @staticmethod
-    def _to_string(value, shareeView, objectResource):
+    def _to_string(value):
         return str(value)
 
 
     @staticmethod
-    def _to_externalize(value, shareeView, objectResource):
-        if isinstance(value, shareeView._objectResourceClass):
-            value = value.externalize()
-        elif value is not None:
-            value = [v.externalize() for v in value]
-        return value
+    def _to_externalize(value):
+        return value.externalize() if value is not None else None
 
 
+    @staticmethod
+    def _to_externalize_list(value):
+        return [v.externalize() for v in value] if value is not None else None
+
+
     @classmethod
-    def _make_simple_homechild_action(cls, action, method, transform_recv=None, transform_send=None):
+    def _make_share_homechild_action(cls, action, method, transform_recv=None, transform_send=None):
         setattr(
             cls,
             "send_{}".format(action),
             lambda self, shareeView, *args, **kwargs:
-                self._simple_send(action, shareeView, transform=transform_send, args=args, kwargs=kwargs)
+                self._simple_share_send(action, shareeView, transform=transform_send, args=args, kwargs=kwargs)
         )
         setattr(
             cls,
             "recv_{}".format(action),
             lambda self, txn, message:
-                self._simple_recv(txn, action, message, method, transform=transform_recv)
+                self._simple_share_recv(txn, action, message, method, transform=transform_recv)
         )
 
 
     @classmethod
-    def _make_simple_object_action(cls, action, method, transform_recv=None, transform_send=None):
+    def _make_share_object_action(cls, action, method, transform_recv=None, transform_send=None):
         setattr(
             cls,
             "send_{}".format(action),
             lambda self, shareeView, objectResource, *args, **kwargs:
-                self._simple_send(action, shareeView, objectResource, transform=transform_send, args=args, kwargs=kwargs)
+                self._simple_share_send(action, shareeView, objectResource, transform=transform_send, args=args, kwargs=kwargs)
         )
         setattr(
             cls,
             "recv_{}".format(action),
             lambda self, txn, message:
-                self._simple_recv(txn, action, message, method, onHomeChild=False, transform=transform_recv)
+                self._simple_share_recv(txn, action, message, method, onHomeChild=False, transform=transform_recv)
         )
 
 
+    #
+    # Migration data access related apis
+    #
+
+    def validMigrateRequest(self, destination_guid):
+        """
+        Verify that the specified GUIDs are valid for the request and return the
+        matching directory records.
+
+        @param destination_guid: GUID for the user to whom the request is being sent
+        @type destination_guid: C{str}
+
+        @return: C{tuple} of L{IStoreDirectoryRecord}
+        """
+
+        destination = self.store.directoryService().recordWithUID(destination_guid)
+        if destination is None:
+            raise DirectoryRecordNotFoundError("Cross-pod destination: {}".format(destination_guid))
+        if destination.thisServer():
+            raise FailedCrossPodRequestError("Cross-pod destination on this server: {}".format(destination_guid))
+
+        return destination
+
+
+    def _migrate_send(self, action, target):
+        """
+        Base behavior for an operation on a store resource. L{target} is the store object that is
+        the target of the remote API call. It can be an L{CommonHome}, an L{CommonHomeChild},
+        or an L{CommonObjectResource}.
+
+        @param action: remote API call to execute
+        @type action: C{str}
+        @param target: store object
+        @type target: L{CommonHome} or L{CommonHomeChild} or L{CommonObjectResource}
+        """
+
+        if target._storeType == "home":
+            home = target
+            child = None
+            resource = None
+        elif target._storeType == "child":
+            home = target.viewerHome()
+            child = target
+            resource = None
+        elif target._storeType == "resource":
+            home = target.parentCollection().viewerHome()
+            child = target.parentCollection()
+            resource = target
+
+        homeType = home._homeType
+        ownerUID = home.uid()
+        childID = child.external_id() if child else None
+        objectName = resource.name() if resource else None
+
+        recipient = self.validMigrateRequest(ownerUID)
+
+        result = {
+            "action": action,
+            "type": homeType,
+            "owner": ownerUID,
+        }
+
+        if childID:
+            result["child_id"] = childID
+            if objectName:
+                result["object_name"] = objectName
+
+        return result, recipient
+
+
+    @inlineCallbacks
+    def _migrate_recv(self, txn, message, expected_action):
+        """
+        Base behavior for migrate data access.
+
+        @param txn: transaction
+        @type txn: L{CommonStoreTransaction}
+        @param message: JSON data to process
+        @type message: C{dict}
+        @param expected_action: expected action in message
+        @type expected_action: C{str}
+        """
+
+        if message["action"] != expected_action:
+            raise FailedCrossPodRequestError("Wrong action '{}' for recv_{}".format(message["action"], expected_action))
+
+        # Get a home
+        ownerHome = yield txn.homeWithUID(message["type"], message["owner"])
+        if ownerHome is None or ownerHome.external():
+            raise FailedCrossPodRequestError("Invalid owner UID specified")
+
+        # Get child resource
+        if "child_id" in message:
+            child = yield ownerHome.childWithID(message["child_id"])
+            if child is None:
+                raise FailedCrossPodRequestError("Invalid child resource specified")
+
+            if "object_name" in message:
+                resource = yield child.objectResourceWithName(message["object_name"])
+                if resource is None:
+                    raise FailedCrossPodRequestError("Invalid owner shared object resource specified")
+                target = resource
+            else:
+                target = child
+
+        else:
+            target = ownerHome
+
+        returnValue(target)
+
+
+    #
+    # Simple calls are ones where there is no argument and a single return value. We can simplify
+    # code generation for these by dynamically generating the appropriate class methods.
+    #
+
+    @inlineCallbacks
+    def _simple_migrate_send(self, actionName, target, transform=None, args=None, kwargs=None):
+        """
+        A simple send operation that returns a value.
+
+        @param actionName: name of the action.
+        @type actionName: C{str}
+        @param target: store object
+        @type target: L{CommonHome} or L{CommonHomeChild} or L{CommonObjectResource}
+        @param transform: a function used to convert the JSON result into return values.
+        @type transform: C{callable}
+        @param args: list of optional arguments.
+        @type args: C{list}
+        @param kwargs: optional keyword arguments.
+        @type kwargs: C{dict}
+        """
+
+        action, recipient = self._migrate_send(actionName, target)
+        if args is not None:
+            action["arguments"] = args
+        if kwargs is not None:
+            action["keywords"] = kwargs
+        result = yield self.sendRequest(target.transaction(), recipient, action)
+        if result["result"] == "ok":
+            returnValue(result["value"] if transform is None else transform(result["value"]))
+        elif result["result"] == "exception":
+            raise namedClass(result["class"])(result["message"])
+
+
+    @inlineCallbacks
+    def _simple_migrate_recv(self, txn, actionName, message, method, transform=None):
+        """
+        A simple recv operation that returns a value. We also look for an optional set of arguments/keywords
+        and include those only if present.
+
+        @param actionName: name of the action.
+        @type actionName: C{str}
+        @param message: message arguments
+        @type message: C{dict}
+        @param method: name of the method to execute on the shared resource to get the result.
+        @type method: C{str}
+        @param transform: method to call on returned JSON value to convert it to something useful.
+        @type transform: C{callable}
+        """
+
+        target = yield self._migrate_recv(txn, message, actionName)
+        try:
+            # Operate on the L{CommonHomeChild}
+            value = yield getattr(target, method)(*message.get("arguments", ()), **message.get("keywords", {}))
+        except Exception as e:
+            returnValue({
+                "result": "exception",
+                "class": ".".join((e.__class__.__module__, e.__class__.__name__,)),
+                "message": str(e),
+            })
+
+        returnValue({
+            "result": "ok",
+            "value": transform(value) if transform is not None else value,
+        })
+
+
+    @classmethod
+    def _make_migrate_action(cls, action, method, transform_recv=None, transform_send=None):
+        setattr(
+            cls,
+            "send_{}".format(action),
+            lambda self, target, *args, **kwargs:
+                self._simple_migrate_send(action, target, transform=transform_send, args=args, kwargs=kwargs)
+        )
+        setattr(
+            cls,
+            "recv_{}".format(action),
+            lambda self, txn, message:
+                self._simple_migrate_recv(txn, action, message, method, transform=transform_recv)
+        )
+
+# Migrate calls
+PoddingConduit._make_migrate_action("loadchildren", "loadChildren", transform_recv=PoddingConduit._to_externalize_list)
+
 # Calls on L{CommonHomeChild} objects
-PoddingConduit._make_simple_homechild_action("countobjects", "countObjectResources")
-PoddingConduit._make_simple_homechild_action("listobjects", "listObjectResources")
-PoddingConduit._make_simple_homechild_action("resourceuidforname", "resourceUIDForName")
-PoddingConduit._make_simple_homechild_action("resourcenameforuid", "resourceNameForUID")
-PoddingConduit._make_simple_homechild_action("movehere", "moveObjectResourceHere")
-PoddingConduit._make_simple_homechild_action("moveaway", "moveObjectResourceAway")
-PoddingConduit._make_simple_homechild_action("synctoken", "syncToken")
-PoddingConduit._make_simple_homechild_action("resourcenamessincerevision", "resourceNamesSinceRevision", transform_send=PoddingConduit._to_tuple)
-PoddingConduit._make_simple_homechild_action("search", "search")
+PoddingConduit._make_share_homechild_action("countobjects", "countObjectResources")
+PoddingConduit._make_share_homechild_action("listobjects", "listObjectResources")
+PoddingConduit._make_share_homechild_action("resourceuidforname", "resourceUIDForName")
+PoddingConduit._make_share_homechild_action("resourcenameforuid", "resourceNameForUID")
+PoddingConduit._make_share_homechild_action("movehere", "moveObjectResourceHere")
+PoddingConduit._make_share_homechild_action("moveaway", "moveObjectResourceAway")
+PoddingConduit._make_share_homechild_action("synctoken", "syncToken")
+PoddingConduit._make_share_homechild_action("resourcenamessincerevision", "resourceNamesSinceRevision", transform_send=PoddingConduit._to_tuple)
+PoddingConduit._make_share_homechild_action("search", "search")
 
 # Calls on L{CommonObjectResource} objects
-PoddingConduit._make_simple_object_action("loadallobjects", "loadAllObjects", transform_recv=PoddingConduit._to_externalize)
-PoddingConduit._make_simple_object_action("loadallobjectswithnames", "loadAllObjectsWithNames", transform_recv=PoddingConduit._to_externalize)
-PoddingConduit._make_simple_object_action("objectwith", "objectWith", transform_recv=PoddingConduit._to_externalize)
-PoddingConduit._make_simple_object_action("create", "create", transform_recv=PoddingConduit._to_externalize)
-PoddingConduit._make_simple_object_action("setcomponent", "setComponent")
-PoddingConduit._make_simple_object_action("component", "component", transform_recv=PoddingConduit._to_string)
-PoddingConduit._make_simple_object_action("remove", "remove")
+PoddingConduit._make_share_object_action("loadallobjects", "loadAllObjects", transform_recv=PoddingConduit._to_externalize_list)
+PoddingConduit._make_share_object_action("loadallobjectswithnames", "loadAllObjectsWithNames", transform_recv=PoddingConduit._to_externalize_list)
+PoddingConduit._make_share_object_action("objectwith", "objectWith", transform_recv=PoddingConduit._to_externalize)
+PoddingConduit._make_share_object_action("create", "create", transform_recv=PoddingConduit._to_externalize)
+PoddingConduit._make_share_object_action("setcomponent", "setComponent")
+PoddingConduit._make_share_object_action("component", "component", transform_recv=PoddingConduit._to_string)
+PoddingConduit._make_share_object_action("remove", "remove")

Added: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/migration.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/migration.py	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/migration.py	2014-01-22 04:07:03 UTC (rev 12418)
@@ -0,0 +1,335 @@
+##
+# Copyright (c) 2013-2014 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+from txdav.common.icommondatastore import CommonStoreError
+from twisted.internet.defer import inlineCallbacks
+from txdav.common.datastore.sql_tables import _HOME_STATUS_NORMAL, \
+    _MIGRATION_STATUS_MIGRATING, _MIGRATION_STATUS_MIGRATED, \
+    _MIGRATION_STATUS_NONE
+
+"""
+Support for cross-pod migration of users.
+
+Migration will be divided into four steps:
+
+1. Initial data-only sync.
+    This will sync the actual CommonObjectResource data and owned collection bind information
+    from the source pod to the destination pod. This must be able to execute whilst the
+    service is running. The sync'd data will be store on the destination pod under an
+    "inactive" home - i.e., one that can never be accessed via public store apis.
+
+2. Incremental data-only sync.
+    Will the update destination's previous sync'd state to match the current source state.
+    This can be run multiple times prior to step 3 - with the last one ideally right before
+    step 3 is done. This must be able to execute whilst the service is running.
+
+3. Migration commit.
+    1. The service is shut down.
+    2. A final incremental data-sync is done.
+    3. A sync/update of sharing state is done on both the source and destination (this may
+        require creating external shares on the source for sharee's who remain on that pod,
+        and converting external shares to internal ones on the destination pod for sharee's
+        that are now on the same pod as the migrated owner).
+    4. The source home is marked as inactive.
+    5. The destination home is marked as active.
+    6. The service is brought back up.
+
+4. Data clean-up.
+    The inactive home and associated owned data on the source pod is removed. This must be
+    able to execute whilst the service is running.
+
+X. A step that can be used any time prior to step 3 that stops the current migration. That
+    should simply involve removing the local inactive homes. There are no changes on the old
+    pod until step 3.
+
+During migration we will create an "inactive" set of home collections for the user being
+migrated on their new pod. An "inactive" home is one where the MIGRATION value is non-zero.
+There will be two types - one for use during migration, and one for use
+after migration, with the later being used to mark the data on the old pod as inactive
+prior to deletion (step 4).
+
+The migration process will be driven by calls on the new pod, which will then use cross-pod
+request to sync data and trigger state changes on the old pod.
+
+"""
+
+class UserAlreadyOnThisPod(CommonStoreError):
+    pass
+
+
+
+class UserAlreadyBeingMigrated(CommonStoreError):
+    pass
+
+
+
+class UserNotAlreadyBeingMigrated(CommonStoreError):
+    pass
+
+
+
+class MigrationController(object):
+    """
+    Class that manages each of the four steps. Will assume that this is being run on the new pod,
+    pulling data for a user whose directory record still points to the old pod for the first three
+    steps.
+
+    TODO: For now this only works with calendars.
+    """
+
+    def __init__(self, store, homeTypes=None):
+        self.store = store
+        self.homeTypes = homeTypes
+
+
+    def migrationTypes(self, txn):
+        return txn.homeTypes if self.homeTypes is None else self.homeTypes
+
+
+    @inlineCallbacks
+    def step1(self, user):
+        """
+        Carry out step 1 migration: full sync of user data to this pod.
+
+        @param user: GUID of the user to migrate
+        @type user: C{str}
+        """
+
+        # Validity checks
+        self.verifyNonLocalUser(user)
+
+        txn = self.store.newTransaction()
+
+        # Can't already have a migrating home prior to step 1
+        for home_type in self.migrationTypes(txn):
+            home = yield txn.homeWithUID(home_type, user, create=False, migration=_MIGRATION_STATUS_MIGRATING)
+            if home is not None:
+                raise UserAlreadyBeingMigrated("Inactive {} home exists".format(txn.homeClass(home_type)))
+        home = yield txn.notificationsWithUID(user, create=False, migration=_MIGRATION_STATUS_MIGRATING)
+        if home is not None:
+            raise UserAlreadyBeingMigrated("Inactive notifications exists")
+
+        # Can't already have a migrated homes prior to step 1 - i.e., a step 4 is
+        # still in progress on this pod. We can't migrate the user back until that is done.
+        for home_type in self.migrationTypes(txn):
+            home = yield txn.homeWithUID(home_type, user, create=False, migration=_MIGRATION_STATUS_MIGRATED)
+            if home is not None:
+                raise UserAlreadyBeingMigrated("Inactive {} home exists".format(txn.homeClass(home_type)))
+        home = yield txn.notificationsWithUID(user, create=False, migration=_MIGRATION_STATUS_MIGRATED)
+        if home is not None:
+            raise UserAlreadyBeingMigrated("Inactive notifications exists")
+
+        # Create the new invalid homes
+        for home_type in self.migrationTypes(txn):
+            home = yield txn.homeWithUID(home_type, user, create=True, migration=_MIGRATION_STATUS_MIGRATING)
+        home = yield txn.notificationsWithUID(user, create=True, migration=_MIGRATION_STATUS_MIGRATING)
+        yield txn.commit()
+
+        # Now we do a full sync - but in fact we can simply do the incremental sync,
+        # however that will get everything we need for step 1
+        yield self.syncHomes(user)
+
+
+    @inlineCallbacks
+    def step2(self, user):
+        """
+        Carry out step 2 migration: incremental sync of user data to this pod.
+
+        @param user: GUID of the user to migrate
+        @type user: C{str}
+        """
+
+        # Validity checks
+        self.verifyNonLocalUser(user)
+
+        # Must already have a migrating home prior to step 2
+        txn = self.store.newTransaction()
+        yield self.validMigratingUser(txn, user)
+        yield txn.commit()
+
+        # Now make each home sync
+        yield self.syncHomes(user)
+
+
+    @inlineCallbacks
+    def step3(self, user):
+        """
+        Carry out step 2 migration: incremental sync of user data to this pod.
+
+        @param user: GUID of the user to migrate
+        @type user: C{str}
+        """
+
+        # Validity checks
+        self.verifyNonLocalUser(user)
+
+        # Must already have a migrating home prior to step 3
+        txn = self.store.newTransaction()
+        yield self.validMigratingUser(txn, user)
+        yield txn.commit()
+
+        # Step 3.2 Now make each home sync
+        yield self.syncHomes(user, final=True)
+
+        # Up to this point everything has been non-destructive in that all the existing data is intact.
+        # After this point we will start changing the existing data and we really want those changes to
+        # be consistent across the pods (i.e., transactional). There shouldn't be a lot of SQL changes
+        # that need to be done so we can do this all in one transaction on the local pod. Need to figure
+        # out how to recover the remote pod, because we can't keep transactions open across multiple
+        # cross-pod calls.
+
+        # Step 3.3 Sync the sharing state
+        yield self.syncSharingState(user)
+
+        # Step 3.4 Deactivate user on old pod
+        yield self.deactivateUserOnPod(user)
+
+        # Step 3.5 Activate user on new pod
+        yield self.activateUserOnPod(user)
+
+
+    @inlineCallbacks
+    def syncHomes(self, user, final=False):
+        """
+        Synchronize the contents of the user home from their current pod to this one. If L{final} is C{False},
+        treat this as a step 1/step 2 full/incremental sync. If L{final} is C{True}, then this is the step 3
+        sync where we need to sync additional meta-data and the notifications collection content.
+
+        @param user: GUID of the user to migrate
+        @type user: C{str}
+        @param final: if C{True}, indicates if this is the step 3 sync
+        @type final: C{bool}
+        """
+
+        txn = self.store.newTransaction()
+        for home_type in self.migrationTypes(txn):
+            home = yield txn.homeWithUID(home_type, user, migration=_MIGRATION_STATUS_MIGRATING)
+            yield home.migrateToThisPod(user, final)
+
+        if final:
+            home = yield txn.notificationsWithUID(user, migration=_MIGRATION_STATUS_MIGRATING)
+            yield home.migrateToThisPod(user, final)
+        yield txn.commit()
+
+
+    @inlineCallbacks
+    def syncSharingState(self, user):
+        """
+        Synchronize the sharing state for the user being migrated. This involves:
+
+        1. For each item being shared by this user, make a bind entry in the new Pod
+            for each share, adjusting for whether the sharee is now local or external
+            the pod. The old pod bind entry will be removed in step 4, no clean-up needed.
+
+        2. For each item shared to this user, make a bind entry in the new pod, adjusting
+            for whether the sharer is local or external to the pod. If the sharer is local
+            to the old pod, adjust their bind row to point to the user on the new pod. If
+            the sharer is on some other pod (not the new or old one) then the existing bind
+            rows on that pod should still be valid - no change needed.
+
+        @param user: GUID of the user to migrate
+        @type user: C{str}
+        """
+
+        # TODO:
+        pass
+
+
+    @inlineCallbacks
+    def deactivateUserOnOtherPod(self, user):
+        """
+        Deactivate the user on their old Pod by adjusting migration state.
+        This method makes a cross-pod call to the other pod and calls its
+        L{deactivateUserOnThisPod} method.
+
+        @param user: GUID of the user to migrate
+        @type user: C{str}
+        """
+
+        # TODO: Do a cross-pod call to tell the other Pod to deactivate
+        pass
+
+
+    @inlineCallbacks
+    def deactivateUserOnThisPod(self, user):
+        """
+        Deactivate the user on their old Pod (this one). Called as a result of the cross-pod
+        call from the new pod L{deactivateUserOnOtherPod}.
+
+        @param user: GUID of the user to migrate
+        @type user: C{str}
+        """
+
+        txn = self.store.newTransaction()
+        for home_type in self.migrationTypes(txn):
+            home = yield txn.homeWithUID(home_type, user, migration=_MIGRATION_STATUS_NONE)
+            yield home.updateDetails(newMigration=_MIGRATION_STATUS_MIGRATED)
+
+        home = yield txn.notificationsWithUID(user, migration=_MIGRATION_STATUS_NONE)
+        yield home.updateDetails(newMigration=_MIGRATION_STATUS_MIGRATED)
+
+        yield txn.commit()
+
+
+    @inlineCallbacks
+    def activateUserOnThisPod(self, user):
+        """
+        Activate the user on their new Pod (this one) by adjusting migration state
+        on each home. We also need to force their state to normal to properly indicate
+        they are now hosted on this pod.
+
+        @param user: GUID of the user to migrate
+        @type user: C{str}
+        """
+
+        yield self.changeActivateState(user, _MIGRATION_STATUS_NONE, _HOME_STATUS_NORMAL)
+
+        txn = self.store.newTransaction()
+        for home_type in self.migrationTypes(txn):
+            home = yield txn.homeWithUID(home_type, user, migration=_MIGRATION_STATUS_MIGRATING)
+            yield home.updateDetails(newStatus=_HOME_STATUS_NORMAL, newMigration=_MIGRATION_STATUS_NONE)
+
+        home = yield txn.notificationsWithUID(user, migration=_MIGRATION_STATUS_MIGRATING)
+        yield home.updateDetails(newStatus=_HOME_STATUS_NORMAL, newMigration=_MIGRATION_STATUS_NONE)
+
+        yield txn.commit()
+
+
+    def verifyNonLocalUser(self, user):
+        record = self.store.directoryService().recordWithGUID(user)
+        if record.thisServer():
+            raise UserAlreadyOnThisPod("User being migrated is already hosted on this pod")
+
+
+    @inlineCallbacks
+    def validMigratingUser(self, txn, user):
+        """
+        Determine if the specified user is already setup to migrate (i.e., step 1 has been done).
+
+        @param txn: transaction to use
+        @type txn: L{txdav.common.datastore.sql.CommonStoreTransaction}
+        @param user: the user GUID to migrate
+        @type user: C{str}
+
+        @raise: L{UserNotAlreadyBeingMigrated} if invalid
+        """
+
+        for home_type in self.migrationTypes(txn):
+            home = yield txn.homeWithUID(home_type, user, create=False, migration=_MIGRATION_STATUS_MIGRATING)
+            if home is None:
+                raise UserNotAlreadyBeingMigrated("Inactive {} home does not exist".format(txn.homeClass(home_type)))
+        home = yield txn.notificationsWithUID(user, create=False, migration=_MIGRATION_STATUS_MIGRATING)
+        if home is None:
+            raise UserNotAlreadyBeingMigrated("Inactive notifications does not exist")

Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/test_conduit.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/test_conduit.py	2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/test_conduit.py	2014-01-22 04:07:03 UTC (rev 12418)
@@ -41,7 +41,8 @@
 from txdav.common.datastore.podding.resource import ConduitResource
 from txdav.common.datastore.podding.test.util import MultiStoreConduitTest, \
     FakeConduitRequest
-from txdav.common.datastore.sql_tables import _BIND_STATUS_ACCEPTED
+from txdav.common.datastore.sql_tables import _BIND_STATUS_ACCEPTED, \
+    _MIGRATION_STATUS_MIGRATING
 from txdav.common.datastore.test.util import populateCalendarsFrom, CommonCommonTests
 from txdav.common.icommondatastore import ObjectResourceNameAlreadyExistsError, \
     ObjectResourceNameNotAllowedError
@@ -123,19 +124,19 @@
     }
 
 
-    def test_validRequst(self):
+    def test_validShareRequest(self):
         """
         Cross-pod request fails when there is no shared secret header present.
         """
 
         conduit = PoddingConduit(self.storeUnderTest())
-        r1, r2 = conduit.validRequst("user01", "puser02")
+        r1, r2 = conduit.validShareRequest("user01", "puser02")
         self.assertTrue(r1 is not None)
         self.assertTrue(r2 is not None)
 
-        self.assertRaises(DirectoryRecordNotFoundError, conduit.validRequst, "bogus01", "user02")
-        self.assertRaises(DirectoryRecordNotFoundError, conduit.validRequst, "user01", "bogus02")
-        self.assertRaises(FailedCrossPodRequestError, conduit.validRequst, "user01", "user02")
+        self.assertRaises(DirectoryRecordNotFoundError, conduit.validShareRequest, "bogus01", "user02")
+        self.assertRaises(DirectoryRecordNotFoundError, conduit.validShareRequest, "user01", "bogus02")
+        self.assertRaises(FailedCrossPodRequestError, conduit.validShareRequest, "user01", "user02")
 
 
 
@@ -145,7 +146,7 @@
 
         @inlineCallbacks
         def send_fake(self, txn, ownerUID, shareeUID):
-            _ignore_owner, sharee = self.validRequst(ownerUID, shareeUID)
+            _ignore_owner, sharee = self.validShareRequest(ownerUID, shareeUID)
             action = {
                 "action": "fake",
                 "echo": "bravo"
@@ -202,9 +203,9 @@
 
 
 
-class TestConduitAPI(MultiStoreConduitTest):
+class TestConduitAPIForSharing(MultiStoreConduitTest):
     """
-    Test that the conduit api works.
+    Test that the conduit api works for sharing.
     """
 
     nowYear = {"now": DateTime.getToday().getYear()}
@@ -1081,3 +1082,25 @@
         attachment = yield ManagedAttachment.load(self.transactionUnderTest(), resourceID, managedID)
         self.assertTrue(attachment is None)
         yield self.commit()
+
+
+
+class TestConduitAPIForMigration(MultiStoreConduitTest):
+    """
+    Test that the conduit api works for migration.
+    """
+
+    @inlineCallbacks
+    def test_loadallobjects(self):
+        """
+        Test that action=loadallobjects works.
+        """
+
+        yield self.homeUnderTest(txn=self.newOtherTransaction(), name="puser01", create=True)
+        yield self.otherCommit()
+
+        remote_home = yield self.homeUnderTest(name="puser01", create=True)
+        remote_home._migration = _MIGRATION_STATUS_MIGRATING
+
+        results = yield remote_home.loadChildren()
+        self.assertEqual(set([result.name() for result in results]), set(("calendar", "tasks", "inbox",)))

Added: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/test_migration.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/test_migration.py	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/test_migration.py	2014-01-22 04:07:03 UTC (rev 12418)
@@ -0,0 +1,200 @@
+##
+# Copyright (c) 2005-2014 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twisted.internet.defer import inlineCallbacks
+
+from txdav.common.datastore.podding.test.util import MultiStoreConduitTest
+from txdav.common.datastore.sql_tables import _MIGRATION_STATUS_MIGRATING
+from txdav.common.datastore.podding.migration import MigrationController, \
+    UserAlreadyBeingMigrated
+from txdav.common.datastore.sql import ECALENDARTYPE
+
+class TestCalendarMigration(MultiStoreConduitTest):
+    """
+    Test that the migration api works for migration.
+    """
+
+    @inlineCallbacks
+    def _provision_remote(self):
+        """
+        Provision the user account on the remote pod.
+        """
+        yield self.homeUnderTest(txn=self.otherTransactionUnderTest(), name="puser01", create=True)
+        yield self.addressbookHomeUnderTest(txn=self.otherTransactionUnderTest(), name="puser01", create=True)
+        yield self.otherCommit()
+
+
+    @inlineCallbacks
+    def test_step1_ok(self):
+        """
+        Test that step1 works.
+        """
+
+        yield self._provision_remote()
+
+        migrator = MigrationController(self.storeUnderTest(), homeTypes=(ECALENDARTYPE,))
+        yield migrator.step1("puser01")
+
+        # Verify local home is not visible to normal api calls
+        local_home = yield self.homeUnderTest(name="puser01")
+        self.assertTrue(local_home is not None)
+        self.assertTrue(local_home.external())
+        yield self.commit()
+
+        # Verify local migrating items exist
+        local_home = yield self.homeUnderTest(name="puser01", migration=_MIGRATION_STATUS_MIGRATING)
+        self.assertTrue(local_home is not None)
+        self.assertTrue(not local_home.external())
+
+        results = yield local_home.loadChildren()
+        self.assertEqual(set([result.name() for result in results]), set(("calendar", "tasks", "inbox",)))
+
+
+    @inlineCallbacks
+    def test_step1_twice(self):
+        """
+        Test that step1 fails a second time.
+        """
+
+        yield self._provision_remote()
+
+        migrator = MigrationController(self.storeUnderTest(), homeTypes=(ECALENDARTYPE,))
+        yield migrator.step1("puser01")
+
+        # Verify local migrating items exist
+        local_home = yield self.homeUnderTest(name="puser01", migration=_MIGRATION_STATUS_MIGRATING)
+        self.assertTrue(local_home is not None)
+        self.assertTrue(not local_home.external())
+        yield self.commit()
+
+        migrator = MigrationController(self.storeUnderTest(), homeTypes=(ECALENDARTYPE,))
+        yield self.failUnlessFailure(migrator.step1("puser01"), UserAlreadyBeingMigrated)
+
+
+    @inlineCallbacks
+    def test_step2_no_change(self):
+        """
+        Test that step1 fails a second time.
+        """
+
+        yield self._provision_remote()
+
+        migrator = MigrationController(self.storeUnderTest(), homeTypes=(ECALENDARTYPE,))
+        yield migrator.step1("puser01")
+
+        # Verify local migrating items exist
+        local_home = yield self.homeUnderTest(name="puser01", migration=_MIGRATION_STATUS_MIGRATING)
+        results = yield local_home.loadChildren()
+        self.assertEqual(set([result.name() for result in results]), set(("calendar", "tasks", "inbox",)))
+        rids = dict([(result.id(), result.external_id()) for result in results])
+
+        migrator = MigrationController(self.storeUnderTest(), homeTypes=(ECALENDARTYPE,))
+        yield migrator.step2("puser01")
+
+        # Verify local migrating items exist
+        local_home = yield self.homeUnderTest(name="puser01", migration=_MIGRATION_STATUS_MIGRATING)
+        results = yield local_home.loadChildren()
+        self.assertEqual(set([result.name() for result in results]), set(("calendar", "tasks", "inbox",)))
+        rids2 = dict([(result.id(), result.external_id()) for result in results])
+        self.assertEqual(rids, rids2)
+
+
+    @inlineCallbacks
+    def test_step2_changes(self):
+        """
+        Test that step1 fails a second time.
+        """
+
+        yield self._provision_remote()
+
+        migrator = MigrationController(self.storeUnderTest(), homeTypes=(ECALENDARTYPE,))
+        yield migrator.step1("puser01")
+
+        local_home = yield self.homeUnderTest(name="puser01", migration=_MIGRATION_STATUS_MIGRATING)
+        results = yield local_home.loadChildren()
+        self.assertEqual(set([result.name() for result in results]), set(("calendar", "tasks", "inbox",)))
+        rids = dict([(result.id(), result.external_id()) for result in results])
+
+        # Create new calendar
+        txn = self.otherTransactionUnderTest()
+        remote_home = yield self.homeUnderTest(txn, name="puser01")
+        new_calendar = yield remote_home.createCalendarWithName("new_calendar")
+        external_id = new_calendar.id()
+        yield self.otherCommit()
+
+        migrator = MigrationController(self.storeUnderTest(), homeTypes=(ECALENDARTYPE,))
+        yield migrator.step2("puser01")
+
+        local_home = yield self.homeUnderTest(name="puser01", migration=_MIGRATION_STATUS_MIGRATING)
+        new_calendar = yield local_home.calendarWithName("new_calendar")
+        rids[new_calendar.id()] = external_id
+
+        results = yield local_home.loadChildren()
+        self.assertEqual(set([result.name() for result in results]), set(("new_calendar", "calendar", "tasks", "inbox",)))
+        rids2 = dict([(result.id(), result.external_id()) for result in results])
+        self.assertEqual(rids, rids2)
+
+        # Remove old calendar
+        txn = self.otherTransactionUnderTest()
+        remote_home = yield self.homeUnderTest(txn, name="puser01")
+        old_calendar = yield remote_home.calendarWithName("calendar")
+        external_id = old_calendar.id()
+        yield old_calendar.remove()
+        del rids[dict([(v, k) for k, v in rids.items()])[external_id]]
+        yield self.otherCommit()
+
+        migrator = MigrationController(self.storeUnderTest(), homeTypes=(ECALENDARTYPE,))
+        yield migrator.step2("puser01")
+
+        local_home = yield self.homeUnderTest(name="puser01", migration=_MIGRATION_STATUS_MIGRATING)
+        results = yield local_home.loadChildren()
+        self.assertEqual(set([result.name() for result in results]), set(("new_calendar", "tasks", "inbox",)))
+        rids2 = dict([(result.id(), result.external_id()) for result in results])
+        self.assertEqual(rids, rids2)
+        tasks = yield local_home.calendarWithName("tasks")
+        self.assertFalse(tasks.isUsedForFreeBusy())
+
+        # Add, remove, rename, prop change
+        txn = self.otherTransactionUnderTest()
+        remote_home = yield self.homeUnderTest(txn, name="puser01")
+
+        new_calendar = yield remote_home.createCalendarWithName("new_calendar2")
+        external_id2 = new_calendar.id()
+
+        old_calendar = yield remote_home.calendarWithName("new_calendar")
+        external_id = old_calendar.id()
+        yield old_calendar.remove()
+        del rids[dict([(v, k) for k, v in rids.items()])[external_id]]
+
+        tasks = yield remote_home.calendarWithName("tasks")
+        yield tasks.rename("todos")
+        yield tasks.setUsedForFreeBusy(True)
+
+        yield self.otherCommit()
+
+        migrator = MigrationController(self.storeUnderTest(), homeTypes=(ECALENDARTYPE,))
+        yield migrator.step2("puser01")
+
+        local_home = yield self.homeUnderTest(name="puser01", migration=_MIGRATION_STATUS_MIGRATING)
+        new_calendar = yield local_home.calendarWithName("new_calendar2")
+        rids[new_calendar.id()] = external_id2
+
+        results = yield local_home.loadChildren()
+        self.assertEqual(set([result.name() for result in results]), set(("new_calendar2", "todos", "inbox",)))
+        rids2 = dict([(result.id(), result.external_id()) for result in results])
+        self.assertEqual(rids, rids2)
+        tasks = yield local_home.calendarWithName("todos")
+        self.assertTrue(tasks.isUsedForFreeBusy())

Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/util.py	2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/util.py	2014-01-22 04:07:03 UTC (rev 12418)
@@ -172,6 +172,7 @@
         store = yield builder.buildStore(self, self.notifierFactory, directory)
         store.queryCacher = None     # Cannot use query caching
         store.conduit = self.makeConduit(store)
+        store.name = "internal" if internal else "external"
         returnValue(store)
 
 

Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql.py	2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql.py	2014-01-22 04:07:03 UTC (rev 12418)
@@ -64,7 +64,9 @@
 from txdav.common.datastore.sql_tables import _BIND_MODE_OWN, \
     _BIND_STATUS_ACCEPTED, _BIND_STATUS_DECLINED, _BIND_STATUS_INVALID, \
     _BIND_STATUS_INVITED, _BIND_MODE_DIRECT, _BIND_STATUS_DELETED, \
-    _BIND_MODE_INDIRECT, _HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL
+    _BIND_MODE_INDIRECT, _HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL, \
+    _MIGRATION_STATUS_NONE, _MIGRATION_STATUS_MIGRATING, \
+    _MIGRATION_STATUS_MIGRATED
 from txdav.common.datastore.sql_tables import schema, splitSQLString
 from txdav.common.icommondatastore import ConcurrentModification, \
     RecordNotAllowedError, ExternalShareFailed, ShareNotAllowed, \
@@ -473,8 +475,23 @@
                  enableCalendars, enableAddressBooks,
                  notifierFactories, label, migrating=False, disableCache=False):
         self._store = store
-        self._calendarHomes = {}
-        self._addressbookHomes = {}
+        self._homes = {
+            _MIGRATION_STATUS_NONE: {
+                ECALENDARTYPE : {},
+                EADDRESSBOOKTYPE: {},
+                ENOTIFICATIONTYPE: {}
+            },
+            _MIGRATION_STATUS_MIGRATING: {
+                ECALENDARTYPE : {},
+                EADDRESSBOOKTYPE: {},
+                ENOTIFICATIONTYPE: {}
+            },
+            _MIGRATION_STATUS_MIGRATED: {
+                ECALENDARTYPE : {},
+                EADDRESSBOOKTYPE: {},
+                ENOTIFICATIONTYPE: {}
+            },
+        }
         self._notificationHomes = {}
         self._notifierFactories = notifierFactories
         self._notifiedAlready = set()
@@ -583,16 +600,27 @@
         ).on(self)
 
 
-    def _determineMemo(self, storeType, uid, create=False): #@UnusedVariable
+    def _determineMemo(self, storeType, uid, create=False, migration=_MIGRATION_STATUS_NONE): #@UnusedVariable
         """
         Determine the memo dictionary to use for homeWithUID.
         """
-        if storeType == ECALENDARTYPE:
-            return self._calendarHomes
-        else:
-            return self._addressbookHomes
+        return self._homes[migration][storeType]
 
 
+    def homeTypes(self):
+        """
+        Return list of all supported home types.
+        """
+        return self._homeClass.keys()
+
+
+    def homeClass(self, storeType):
+        """
+        Return list of all supported home types.
+        """
+        return self._homeClass.get(storeType)
+
+
     @inlineCallbacks
     def homes(self, storeType):
         """
@@ -609,19 +637,19 @@
 
 
     @memoizedKey("uid", _determineMemo)
-    def homeWithUID(self, storeType, uid, create=False):
-        if storeType not in (ECALENDARTYPE, EADDRESSBOOKTYPE):
+    def homeWithUID(self, storeType, uid, create=False, migration=_MIGRATION_STATUS_NONE):
+        if storeType not in self._homeClass:
             raise RuntimeError("Unknown home type.")
 
-        return self._homeClass[storeType].homeWithUID(self, uid, create)
+        return self._homeClass[storeType].homeWithUID(self, uid, create, migration)
 
 
-    def calendarHomeWithUID(self, uid, create=False):
-        return self.homeWithUID(ECALENDARTYPE, uid, create=create)
+    def calendarHomeWithUID(self, uid, create=False, migration=_MIGRATION_STATUS_NONE):
+        return self.homeWithUID(ECALENDARTYPE, uid, create=create, migration=migration)
 
 
-    def addressbookHomeWithUID(self, uid, create=False):
-        return self.homeWithUID(EADDRESSBOOKTYPE, uid, create=create)
+    def addressbookHomeWithUID(self, uid, create=False, migration=_MIGRATION_STATUS_NONE):
+        return self.homeWithUID(EADDRESSBOOKTYPE, uid, create=create, migration=migration)
 
 
     @inlineCallbacks
@@ -629,8 +657,7 @@
         """
         Load a calendar or addressbook home by its integer resource ID.
         """
-        uid = (yield self._homeClass[storeType]
-               .homeUIDWithResourceID(self, rid))
+        uid = (yield self._homeClass[storeType].homeUIDWithResourceID(self, rid))
         if uid:
             result = (yield self.homeWithUID(storeType, uid, create))
         else:
@@ -646,12 +673,19 @@
         return self.homeWithResourceID(EADDRESSBOOKTYPE, rid)
 
 
-    @memoizedKey("uid", "_notificationHomes")
-    def notificationsWithUID(self, uid, create=True):
+    def _determineNotificationMemo(self, uid, create=False, migration=_MIGRATION_STATUS_NONE): #@UnusedVariable
         """
+        Determine the memo dictionary to use for homeWithUID.
+        """
+        return self._homes[migration][ENOTIFICATIONTYPE]
+
+
+    @memoizedKey("uid", _determineNotificationMemo)
+    def notificationsWithUID(self, uid, create=True, migration=_MIGRATION_STATUS_NONE):
+        """
         Implement notificationsWithUID.
         """
-        return NotificationCollection.notificationsWithUID(self, uid, create)
+        return NotificationCollection.notificationsWithUID(self, uid, create, migration)
 
 
     @memoizedKey("rid", "_notificationHomes")
@@ -1590,6 +1624,8 @@
 class CommonHome(SharingHomeMixIn):
     log = Logger()
 
+    _storeType = "home"
+
     # All these need to be initialized by derived classes for each store type
     _homeType = None
     _homeTable = None
@@ -1602,11 +1638,9 @@
     _dataVersionKey = None
     _dataVersionValue = None
 
-    _cacher = None  # Initialize in derived classes
-
     @classmethod
     @inlineCallbacks
-    def makeClass(cls, transaction, ownerUID, no_cache=False):
+    def makeClass(cls, transaction, homeData, metadataData):
         """
         Build the actual home class taking into account the possibility that we might need to
         switch in the external version of the class.
@@ -1615,19 +1649,100 @@
         @type transaction: L{CommonStoreTransaction}
         @param ownerUID: owner UID of home to load
         @type ownerUID: C{str}
+        @param migration: migration status for home to load
+        @type ownerUID: C{int}
         @param no_cache: should cached query be used
         @type no_cache: C{bool}
         """
-        home = cls(transaction, ownerUID)
-        actualHome = yield home.initFromStore(no_cache)
-        returnValue(actualHome)
 
+        resourceID = homeData[cls.homeColumns().index(cls._homeSchema.RESOURCE_ID)]
+        ownerUID = homeData[cls.homeColumns().index(cls._homeSchema.OWNER_UID)]
+        status = homeData[cls.homeColumns().index(cls._homeSchema.STATUS)]
+        migration = homeData[cls.homeColumns().index(cls._homeSchema.MIGRATION)]
 
-    def __init__(self, transaction, ownerUID):
+        # If the status is external we need to convert this object to a CommonHomeExternal class which will
+        # have the right behavior for non-hosted external users.
+        if status == _HOME_STATUS_EXTERNAL and migration == _MIGRATION_STATUS_NONE:
+            home = cls._externalClass(transaction, ownerUID, resourceID)
+        else:
+            home = cls(transaction, ownerUID, migration=migration)
+
+        for attr, value in zip(cls.homeAttributes(), homeData):
+            setattr(home, attr, value)
+
+        for attr, value in zip(cls.metadataAttributes(), metadataData):
+            setattr(home, attr, value)
+
+        yield home._loadPropertyStore()
+
+        for factory_type, factory in transaction._notifierFactories.items():
+            home.addNotifier(factory_type, factory.newNotifier(home))
+
+        yield home.made()
+
+        returnValue(home)
+
+
+    @classmethod
+    @inlineCallbacks
+    def _getDBData(cls, transaction, ownerUID, migration=_MIGRATION_STATUS_NONE, no_cache=False):
+        """
+        Given a set of identifying information, load the metadataData rows for the object.
+
+        @param transaction: transaction
+        @type transaction: L{CommonStoreTransaction}
+        @param ownerUID: owner UID of home to load
+        @type ownerUID: C{str}
+        @param migration: migration status for home to load
+        @type ownerUID: C{int}
+        @param no_cache: should cached query be used
+        @type no_cache: C{bool}
+        """
+
+        queryCacher = transaction._queryCacher
+        homeData = None
+        if queryCacher:
+            cacheKey = queryCacher.keyForHomeData(cls._homeType, ownerUID, migration)
+            homeData = yield queryCacher.get(cacheKey)
+
+        if homeData is None:
+            homeData = yield cls._homeColumnsFromOwnerQuery.on(transaction, ownerUID=ownerUID, migration=migration)
+            if homeData:
+                homeData = homeData[0]
+                if not no_cache and queryCacher:
+                    yield queryCacher.set(cacheKey, homeData)
+
+        if not homeData:
+            returnValue(None)
+
+        resourceID = homeData[cls.homeColumns().index(cls._homeSchema.RESOURCE_ID)]
+
+        metadataData = None
+        if queryCacher:
+            # Get cached copy
+            cacheKey = queryCacher.keyForHomeMetaData(resourceID)
+            metadataData = yield queryCacher.get(cacheKey)
+
+        if metadataData is None:
+            # Don't have a cached copy
+            metadataData = (yield cls._metaDataQuery.on(transaction, resourceID=resourceID))
+            if metadataData:
+                metadataData = metadataData[0]
+            else:
+                metadataData = None
+            if queryCacher:
+                # Cache the metadataData
+                yield queryCacher.setAfterCommit(transaction, cacheKey, metadataData)
+
+        returnValue((homeData, metadataData))
+
+
+    def __init__(self, transaction, ownerUID, migration=_MIGRATION_STATUS_NONE):
         self._txn = transaction
         self._ownerUID = ownerUID
         self._resourceID = None
         self._status = _HOME_STATUS_NORMAL
+        self._migration = migration
         self._dataVersion = None
         self._childrenLoaded = False
         self._children = {}
@@ -1636,8 +1751,6 @@
         self._created = None
         self._modified = None
         self._syncTokenRevision = None
-        if transaction._disableCache:
-            self._cacher = _EmptyCacher()
 
 
     @classmethod
@@ -1650,6 +1763,13 @@
         CommonStoreTransaction._homeClass[cls._homeType] = cls
 
 
+    def made(self):
+        """
+        Called after class has been built. This is here to allow sub-classes to do their own initialization stuff.
+        """
+        return succeed(None)
+
+
     def quotaAllowedBytes(self):
         return self._txn.store().quota
 
@@ -1660,7 +1780,8 @@
         return Select(
             cls.homeColumns(),
             From=home,
-            Where=home.OWNER_UID == Parameter("ownerUID")
+            Where=(home.OWNER_UID == Parameter("ownerUID")).And(
+                   home.MIGRATION == Parameter("migration"))
         )
 
 
@@ -1692,6 +1813,7 @@
             cls._homeSchema.RESOURCE_ID,
             cls._homeSchema.OWNER_UID,
             cls._homeSchema.STATUS,
+            cls._homeSchema.MIGRATION,
         )
 
 
@@ -1707,6 +1829,7 @@
             "_resourceID",
             "_ownerUID",
             "_status",
+            "_migration",
         )
 
 
@@ -1742,66 +1865,6 @@
         )
 
 
-    @inlineCallbacks
-    def initFromStore(self, no_cache=False):
-        """
-        Initialize this object from the store. We read in and cache all the
-        extra meta-data from the DB to avoid having to do DB queries for those
-        individually later.
-        """
-        result = yield self._cacher.get(self._ownerUID)
-        if result is None:
-            result = yield self._homeColumnsFromOwnerQuery.on(self._txn, ownerUID=self._ownerUID)
-            if result:
-                result = result[0]
-                if not no_cache:
-                    yield self._cacher.set(self._ownerUID, result)
-
-        if result:
-            for attr, value in zip(self.homeAttributes(), result):
-                setattr(self, attr, value)
-
-            # STOP! If the status is external we need to convert this object to a CommonHomeExternal class which will
-            # have the right behavior for non-hosted external users.
-            if self._status == _HOME_STATUS_EXTERNAL:
-                actualHome = self._externalClass(self._txn, self._ownerUID, self._resourceID)
-            else:
-                actualHome = self
-            yield actualHome.initMetaDataFromStore()
-            yield actualHome._loadPropertyStore()
-
-            for factory_type, factory in self._txn._notifierFactories.items():
-                actualHome.addNotifier(factory_type, factory.newNotifier(actualHome))
-
-            returnValue(actualHome)
-        else:
-            returnValue(None)
-
-
-    @inlineCallbacks
-    def initMetaDataFromStore(self):
-        """
-        Load up the metadata and property store
-        """
-
-        queryCacher = self._txn._queryCacher
-        if queryCacher:
-            # Get cached copy
-            cacheKey = queryCacher.keyForHomeMetaData(self._resourceID)
-            data = yield queryCacher.get(cacheKey)
-        else:
-            data = None
-        if data is None:
-            # Don't have a cached copy
-            data = (yield self._metaDataQuery.on(self._txn, resourceID=self._resourceID))[0]
-            if queryCacher:
-                # Cache the data
-                yield queryCacher.setAfterCommit(self._txn, cacheKey, data)
-
-        for attr, value in zip(self.metadataAttributes(), data):
-            setattr(self, attr, value)
-
-
     @classmethod
     @inlineCallbacks
     def listHomes(cls, txn):
@@ -1810,9 +1873,11 @@
 
         @return: an iterable of C{str}s.
         """
+        ch = cls._homeSchema
         rows = yield Select(
-            [cls._homeSchema.OWNER_UID],
+            [ch.OWNER_UID],
             From=cls._homeSchema,
+            Where=(ch.MIGRATION == _MIGRATION_STATUS_NONE)
         ).on(txn)
         rids = [row[0] for row in rows]
         returnValue(rids)
@@ -1820,9 +1885,11 @@
 
     @classmethod
     @inlineCallbacks
-    def homeWithUID(cls, txn, uid, create=False):
-        homeObject = yield cls.makeClass(txn, uid)
-        if homeObject is not None:
+    def homeWithUID(cls, txn, uid, create=False, migration=_MIGRATION_STATUS_NONE):
+        dbData = yield cls._getDBData(txn, uid, migration)
+        if dbData is not None:
+            homeData, metadataData = dbData
+            homeObject = yield cls.makeClass(txn, homeData, metadataData)
             returnValue(homeObject)
         else:
             if not create:
@@ -1847,6 +1914,7 @@
                     {
                         cls._homeSchema.OWNER_UID: uid,
                         cls._homeSchema.STATUS: state,
+                        cls._homeSchema.MIGRATION: migration,
                         cls._homeSchema.DATAVERSION: cls._dataVersionValue,
                     },
                     Return=cls._homeSchema.RESOURCE_ID
@@ -1856,8 +1924,10 @@
                 yield savepoint.rollback(txn)
 
                 # Retry the query - row may exist now, if not re-raise
-                homeObject = yield cls.makeClass(txn, uid)
-                if homeObject:
+                dbData = yield cls._getDBData(txn, uid, migration)
+                if dbData:
+                    homeData, metadataData = dbData
+                    homeObject = yield cls.makeClass(txn, homeData, metadataData)
                     returnValue(homeObject)
                 else:
                     raise
@@ -1867,8 +1937,11 @@
                 # Note that we must not cache the owner_uid->resource_id
                 # mapping in _cacher when creating as we don't want that to appear
                 # until AFTER the commit
-                home = yield cls.makeClass(txn, uid, no_cache=True)
-                yield home.createdHome()
+                dbData = yield cls._getDBData(txn, uid, migration, no_cache=True)
+                homeData, metadataData = dbData
+                home = yield cls.makeClass(txn, homeData, metadataData)
+                if migration == _MIGRATION_STATUS_NONE:
+                    yield home.createdHome()
                 returnValue(home)
 
 
@@ -1886,6 +1959,10 @@
         return "<%s: %s, %s>" % (self.__class__.__name__, self._resourceID, self._ownerUID)
 
 
+    def cacheKey(self):
+        return "{}.{}".format(self._migration, self._ownerUID)
+
+
     def id(self):
         """
         Retrieve the store identifier for this home.
@@ -1905,6 +1982,32 @@
         return self._ownerUID
 
 
+    @inlineCallbacks
+    def updateDetails(self, newUID=None, newStatus=None, newMigration=None):
+        """
+        Change any of the uid, status, or migration of this home.
+        """
+        ch = self._homeSchema
+        columns = {}
+        if newUID is not None:
+            columns[ch.OWNER_UID] = newUID
+        if newStatus is not None:
+            columns[ch.STATUS] = newStatus
+        if newMigration is not None:
+            columns[ch.MIGRATION] = newMigration
+        yield Update(
+            columns,
+            Where=(ch.OWNER_UID == self._ownerUID)
+        ).on(self._txn)
+
+        if newUID is not None:
+            self._ownerUID = newUID
+        if newStatus is not None:
+            self._status = newStatus
+        if newStatus is not None:
+            self._migration = newMigration
+
+
     def external(self):
         """
         Is this an external home.
@@ -2497,6 +2600,10 @@
         per transaction.
         """
 
+        # Migrating resources never send notifications
+        if self._migration != _MIGRATION_STATUS_NONE:
+            returnValue(None)
+
         if self._txn.isNotifiedAlready(self):
             returnValue(None)
         self._txn.notificationAddedForObject(self)
@@ -2588,7 +2695,55 @@
         returnValue((ownerHome, ownerName))
 
 
+    #
+    # Migration related
+    #
 
+    @inlineCallbacks
+    def migrateToThisPod(self, user, final):
+        """
+        Migrate data for the specified user (on a different pod) to this pod. L{final} is
+        C{True} when this is the final incremental sync (where we need to sync everything).
+        When C{False} we only sync owned child collections (without properties).
+
+        @param user: the user to migrate
+        @type user: C{str}
+        @param final: whether or not this is the final sync
+        @type final: C{bool}
+        """
+
+        assert self._migration == _MIGRATION_STATUS_MIGRATING
+
+        # Get external home for the user (create if needed)
+        otherHome = yield self._txn.homeWithUID(self._homeType, user, create=True)
+        assert otherHome._status == _HOME_STATUS_EXTERNAL
+
+        # Force the external home to look like it is migrating. This will enable certain external API calls
+        # that are normally disabled for sharing (e.g., ability to load all child resources).
+        otherHome._migration = _MIGRATION_STATUS_MIGRATING
+
+        local_children = yield self.loadChildren()
+        local_children = dict([(child.external_id(), child) for child in local_children if child.owned()])
+
+        # Get list of owned child collections
+        remote_children = yield otherHome.loadChildren()
+        remote_children = dict([(child.id(), child) for child in remote_children if child.owned()])
+
+        # Remove local ones to longer present on remote
+        for external_id in set(local_children.keys()) - set(remote_children.keys()):
+            child = local_children[external_id]
+            yield child.remove()
+            del local_children[external_id]
+
+        # Sync each one on the remote side
+        for remote_child in remote_children.values():
+            local_child = local_children.get(remote_child.id())
+            if local_child is None:
+                local_child = yield self.createChildWithName(remote_child.name(), remote_child.id())
+            yield local_child.migrateToThisPod(remote_child, final)
+
+
+
 class _SharedSyncLogic(object):
     """
     Logic for maintaining sync-token shared between notification collections and
@@ -3187,9 +3342,9 @@
         """
 
         if not self.direct() and self.shareStatus() != _BIND_STATUS_ACCEPTED:
-            if self.external():
+            ownerView = yield self.ownerView()
+            if ownerView.external():
                 yield self._replyExternalInvite(_BIND_STATUS_ACCEPTED, summary)
-            ownerView = yield self.ownerView()
             yield ownerView.updateShare(self, status=_BIND_STATUS_ACCEPTED)
             yield self.newShare(displayname=summary)
             if not ownerView.external():
@@ -3203,9 +3358,9 @@
         """
 
         if not self.direct() and self.shareStatus() != _BIND_STATUS_DECLINED:
-            if self.external():
+            ownerView = yield self.ownerView()
+            if ownerView.external():
                 yield self._replyExternalInvite(_BIND_STATUS_DECLINED)
-            ownerView = yield self.ownerView()
             yield ownerView.updateShare(self, status=_BIND_STATUS_DECLINED)
             if not ownerView.external():
                 yield self._sendReplyNotification(ownerView)
@@ -3865,17 +4020,38 @@
         """
 
         return (
-            cls._bindSchema.BIND_MODE,
             cls._bindSchema.HOME_RESOURCE_ID,
             cls._bindSchema.RESOURCE_ID,
             cls._bindSchema.EXTERNAL_ID,
             cls._bindSchema.RESOURCE_NAME,
+            cls._bindSchema.BIND_MODE,
             cls._bindSchema.BIND_STATUS,
             cls._bindSchema.BIND_REVISION,
             cls._bindSchema.MESSAGE
         )
 
+
+    @classmethod
+    def bindAttributes(cls):
+        """
+        Return a list of attribute names for retrieval of during creation. This allows
+        different child classes to have their own type specific data, but still make use of the
+        common base logic.
+        """
+
+        return (
+            "_resourceID",  # This is actually ignored - it should be the home id
+            "_resourceID",
+            "_externalID",
+            "_name",
+            "_bindMode",
+            "_bindStatus",
+            "_bindRevision",
+            "_bindMessage",
+        )
+
     bindColumnCount = 8
+    bindColumnSyncStart = 3     # Starting column for bind attributes to sync
 
     @classmethod
     def additionalBindColumns(cls):
@@ -3945,6 +4121,8 @@
     """
     log = Logger()
 
+    _storeType = "child"
+
     compareAttributes = (
         "_name",
         "_home",
@@ -3985,8 +4163,16 @@
         @rtype: L{CommonHomeChild}
         """
 
-        bindMode, _ignore_homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = bindData
+        bindData = dict(zip(cls.bindColumns(), bindData))
 
+        resourceID = bindData[cls._bindSchema.RESOURCE_ID]
+        externalID = bindData[cls._bindSchema.EXTERNAL_ID]
+        name = bindData[cls._bindSchema.RESOURCE_NAME]
+        bindMode = bindData[cls._bindSchema.BIND_MODE]
+        bindStatus = bindData[cls._bindSchema.BIND_STATUS]
+        bindRevision = bindData[cls._bindSchema.BIND_REVISION]
+        bindMessage = bindData[cls._bindSchema.MESSAGE]
+
         if ownerHome is None:
             if bindMode == _BIND_MODE_OWN:
                 ownerHome = home
@@ -3996,7 +4182,9 @@
         else:
             ownerName = None
 
-        c = cls._externalClass if ownerHome.external() else cls
+        # The actual class depends on whether the owner home is external or not. We need to be careful here,
+        # because we need to determine the proper class from the ownerHome, not the home that called this method.
+        c = ownerHome._childClass._externalClass if ownerHome.external() else ownerHome._childClass
         child = c(
             home=home,
             name=name,
@@ -4302,6 +4490,36 @@
         returnValue(child)
 
 
+    def externalize(self):
+        """
+        Create a dictionary mapping key attributes so this object can be sent over a cross-pod call
+        and reconstituted at the other end. Note that the other end may have a different schema so
+        the attributes may not match exactly and will need to be processed accordingly.
+        """
+        serialized = {}
+        serialized["bind"] = dict([(attr[1:], getattr(self, attr, None)) for attr in self.bindAttributes()])
+        serialized["additionalBind"] = dict([(attr[1:], getattr(self, attr, None)) for attr in self.additionalBindAttributes()])
+        serialized["metadata"] = dict([(attr[1:], getattr(self, attr, None)) for attr in self.metadataAttributes()])
+        return serialized
+
+
+    @classmethod
+    @inlineCallbacks
+    def internalize(cls, parent, mapping):
+        """
+        Given a mapping generated by L{externalize}, convert the values into an array of database
+        like items that conforms to the ordering of L{_allColumns} so it can be fed into L{makeClass}.
+        Note that there may be a schema mismatch with the external data, so treat missing items as
+        C{None} and ignore extra items.
+        """
+
+        bind = [mapping["bind"].get(row[1:]) for row in cls.bindAttributes()]
+        additionalBind = [mapping["additionalBind"].get(row[1:]) for row in cls.additionalBindAttributes()]
+        metadata = [mapping["metadata"].get(row[1:]) for row in cls.metadataAttributes()]
+        child = yield cls.makeClass(parent, bind, additionalBind, metadata)
+        returnValue(child)
+
+
     @classproperty
     def _metadataByIDQuery(cls): #@NoSelf
         """
@@ -5051,6 +5269,10 @@
             a child resource being added, changed or removed.
         @type property_change: C{bool}
         """
+        # Migrating resources never send notifications
+        if self.ownerHome()._migration != _MIGRATION_STATUS_NONE:
+            returnValue(None)
+
         if self._txn.isNotifiedAlready(self):
             returnValue(None)
         self._txn.notificationAddedForObject(self)
@@ -5121,13 +5343,72 @@
             log.debug("CommonHomeChild.bumpModified failed")
 
 
+    #
+    # Migration related
+    #
 
+    @inlineCallbacks
+    def migrateToThisPod(self, remote, final):
+        """
+        Migrate data for the specified user (on a different pod) to this pod. L{final} is
+        C{True} when this is the final incremental sync (where we need to sync everything).
+        When C{False} we only sync owned child collections (without properties).
+
+        @param remote: the remote collection to migrate
+        @type remote: L{CommonHomeCHild}
+        @param final: whether or not this is the final sync
+        @type final: C{bool}
+        """
+
+        assert self.ownerHome()._migration == _MIGRATION_STATUS_MIGRATING
+        assert remote.ownerHome().external()
+
+        # Need to sync bind data if different
+        remote_items = dict([(col, getattr(remote, attr)) for col, attr in zip(remote.bindColumns(), remote.bindAttributes())[self.bindColumnSyncStart:]])
+        remote_items.update(dict([(col, getattr(remote, attr)) for col, attr in zip(remote.additionalBindColumns(), remote.additionalBindAttributes())]))
+
+        local_items = dict([(col, getattr(self, attr)) for col, attr in zip(self.bindColumns(), self.bindAttributes())[self.bindColumnSyncStart:]])
+        local_items.update(dict([(col, getattr(self, attr)) for col, attr in zip(self.additionalBindColumns(), self.additionalBindAttributes())]))
+
+        # Regular bind info first (note we ignore the first few)
+        updates = {}
+        for k, v in remote_items.items():
+            if v != local_items[k]:
+                updates[k] = v
+
+        if updates:
+            yield self.invalidateQueryCache()
+
+            cb = self._bindSchema
+            yield Update(
+                columnMap=updates,
+                Where=(cb.RESOURCE_ID == self._resourceID).And
+                      (cb.HOME_RESOURCE_ID == self._home._resourceID)
+            ).on(self._txn)
+
+            # Only do name update if different
+            if self._name != remote._name:
+                # update memos
+                del self._home._children[self._name]
+                self._home._children[remote._name] = self
+                yield self._renameSyncToken()
+
+            # Sync the attributes on this object
+            for attr in remote.bindAttributes()[self.bindColumnSyncStart:]:
+                setattr(self, attr, getattr(remote, attr))
+            for attr in remote.additionalBindAttributes():
+                setattr(self, attr, getattr(remote, attr))
+
+
+
 class CommonObjectResource(FancyEqMixin, object):
     """
     Base class for object resources.
     """
     log = Logger()
 
+    _storeType = "resource"
+
     compareAttributes = (
         "_name",
         "_parentCollection",
@@ -5770,11 +6051,12 @@
     _homeSchema = schema.NOTIFICATION_HOME
 
 
-    def __init__(self, txn, uid, resourceID):
+    def __init__(self, txn, uid, resourceID, migration=_MIGRATION_STATUS_NONE):
 
         self._txn = txn
         self._uid = uid
         self._resourceID = resourceID
+        self._migration = migration
         self._dataVersion = None
         self._notifications = {}
         self._notificationNames = None
@@ -5785,15 +6067,23 @@
         self._notifiers = dict([(factory_name, factory.newNotifier(self),) for factory_name, factory in txn._notifierFactories.items()])
 
     _resourceIDFromUIDQuery = Select(
-        [_homeSchema.RESOURCE_ID], From=_homeSchema,
-        Where=_homeSchema.OWNER_UID == Parameter("uid"))
+        [_homeSchema.RESOURCE_ID],
+        From=_homeSchema,
+        Where=(_homeSchema.OWNER_UID == Parameter("uid")).And(
+               _homeSchema.MIGRATION == Parameter("migration"))
+    )
 
     _UIDFromResourceIDQuery = Select(
-        [_homeSchema.OWNER_UID], From=_homeSchema,
-        Where=_homeSchema.RESOURCE_ID == Parameter("rid"))
+        [_homeSchema.OWNER_UID],
+        From=_homeSchema,
+        Where=_homeSchema.RESOURCE_ID == Parameter("rid")
+    )
 
     _provisionNewNotificationsQuery = Insert(
-        {_homeSchema.OWNER_UID: Parameter("uid")},
+        {
+            _homeSchema.OWNER_UID: Parameter("uid"),
+            _homeSchema.MIGRATION: Parameter("migration"),
+        },
         Return=_homeSchema.RESOURCE_ID
     )
 
@@ -5809,8 +6099,8 @@
 
     @classmethod
     @inlineCallbacks
-    def notificationsWithUID(cls, txn, uid, create):
-        rows = yield cls._resourceIDFromUIDQuery.on(txn, uid=uid)
+    def notificationsWithUID(cls, txn, uid, create, migration=_MIGRATION_STATUS_NONE):
+        rows = yield cls._resourceIDFromUIDQuery.on(txn, uid=uid, migration=migration)
 
         if rows:
             resourceID = rows[0][0]
@@ -5822,7 +6112,7 @@
                 raise DirectoryRecordNotFoundError("Cannot create home for UID since no directory record exists: {}".format(uid))
 
             state = _HOME_STATUS_NORMAL if record.thisServer() else _HOME_STATUS_EXTERNAL
-            if state == _HOME_STATUS_EXTERNAL:
+            if state == _HOME_STATUS_EXTERNAL and migration == _MIGRATION_STATUS_NONE:
                 raise RecordNotAllowedError("Cannot store notifications for external user: {}".format(uid))
 
             # Use savepoint so we can do a partial rollback if there is a race
@@ -5832,7 +6122,7 @@
 
             try:
                 resourceID = str((
-                    yield cls._provisionNewNotificationsQuery.on(txn, uid=uid)
+                    yield cls._provisionNewNotificationsQuery.on(txn, uid=uid, migration=migration)
                 )[0][0])
             except Exception:
                 # FIXME: Really want to trap the pg.DatabaseError but in a non-
@@ -5840,7 +6130,7 @@
                 yield savepoint.rollback(txn)
 
                 # Retry the query - row may exist now, if not re-raise
-                rows = yield cls._resourceIDFromUIDQuery.on(txn, uid=uid)
+                rows = yield cls._resourceIDFromUIDQuery.on(txn, uid=uid, migration=migration)
                 if rows:
                     resourceID = rows[0][0]
                     created = False
@@ -5851,7 +6141,7 @@
                 yield savepoint.release(txn)
         else:
             returnValue(None)
-        collection = cls(txn, uid, resourceID)
+        collection = cls(txn, uid, resourceID, migration)
         yield collection._loadPropertyStore()
         if created:
             yield collection._initSyncToken()
@@ -5922,6 +6212,32 @@
         return self._uid
 
 
+    @inlineCallbacks
+    def updateDetails(self, newUID=None, newStatus=None, newMigration=None):
+        """
+        Change any of the uid, status, or migration of this home.
+        """
+        ch = self._homeSchema
+        columns = {}
+        if newUID is not None:
+            columns[ch.OWNER_UID] = newUID
+        if newStatus is not None:
+            columns[ch.STATUS] = newStatus
+        if newMigration is not None:
+            columns[ch.MIGRATION] = newMigration
+        yield Update(
+            columns,
+            Where=(ch.OWNER_UID == self._ownerUID)
+        ).on(self._txn)
+
+        if newUID is not None:
+            self._ownerUID = newUID
+        if newStatus is not None:
+            self._status = newStatus
+        if newStatus is not None:
+            self._migration = newMigration
+
+
     def owned(self):
         return True
 
@@ -6090,6 +6406,11 @@
         the resource has changed.  We ensure we only do this once per object
         per transaction.
         """
+
+        # Migrating resources never send notifications
+        if self._migration != _MIGRATION_STATUS_NONE:
+            returnValue(None)
+
         if self._txn.isNotifiedAlready(self):
             returnValue(None)
         self._txn.notificationAddedForObject(self)

Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_external.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_external.py	2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_external.py	2014-01-22 04:07:03 UTC (rev 12418)
@@ -26,7 +26,8 @@
 from txdav.base.propertystore.sql import PropertyStore
 from txdav.common.datastore.sql import CommonHome, CommonHomeChild, \
     CommonObjectResource
-from txdav.common.datastore.sql_tables import _HOME_STATUS_EXTERNAL
+from txdav.common.datastore.sql_tables import _HOME_STATUS_EXTERNAL, \
+    _MIGRATION_STATUS_MIGRATING
 from txdav.common.icommondatastore import NonExistentExternalShare, \
     ExternalShareFailed
 
@@ -38,9 +39,15 @@
     A CommonHome for a user not hosted on this system, but on another pod. This is needed to provide a
     "reference" to the external user so we can share with them. Actual operations to list child resources, etc
     are all stubbed out since no data for the user is actually hosted in this store.
+
+    For sharing, we actually load child resources that are of the "internal" class as those know how to
+    proxy sharing calls to the remote side.
+
+    For migration, we want the child resources to always be external.
     """
 
     def __init__(self, transaction, ownerUID, resourceID):
+        self._childClass = self._childClass._externalClass
         super(CommonHomeExternal, self).__init__(transaction, ownerUID)
         self._resourceID = resourceID
         self._status = _HOME_STATUS_EXTERNAL
@@ -69,13 +76,20 @@
         raise AssertionError("CommonHomeExternal: not supported")
 
 
+    @inlineCallbacks
     def loadChildren(self):
         """
         No children.
         """
-        raise AssertionError("CommonHomeExternal: not supported")
 
+        # Only available if migrating
+        if self._migration != _MIGRATION_STATUS_MIGRATING:
+            raise AssertionError("CommonHomeExternal: not supported")
 
+        results = yield super(CommonHomeExternal, self).loadChildren()
+        returnValue(results)
+
+
     def listChildren(self):
         """
         No children.
@@ -209,9 +223,22 @@
     specific apis to the other pod using cross-pod requests.
     """
 
+    @classmethod
+    @inlineCallbacks
+    def loadAllObjects(cls, home):
+        mapping_list = yield home._txn.store().conduit.send_loadchildren(home)
+
+        results = []
+        if mapping_list:
+            for mapping in mapping_list:
+                child = yield cls.internalize(home, mapping)
+                results.append(child)
+        returnValue(results)
+
+
     def external(self):
         """
-        Is this an external home.
+        Is this an external home child.
 
         @return: a string.
         """

Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/current-oracle-dialect.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/current-oracle-dialect.sql	2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/current-oracle-dialect.sql	2014-01-22 04:07:03 UTC (rev 12418)
@@ -17,9 +17,11 @@
 
 create table CALENDAR_HOME (
     "RESOURCE_ID" integer primary key,
-    "OWNER_UID" nvarchar2(255) unique,
+    "OWNER_UID" nvarchar2(255),
     "STATUS" integer default 0 not null,
-    "DATAVERSION" integer default 0 not null
+    "MIGRATION" integer default 0 not null,
+    "DATAVERSION" integer default 0 not null, 
+    unique("OWNER_UID", "MIGRATION")
 );
 
 create table HOME_STATUS (
@@ -29,6 +31,14 @@
 
 insert into HOME_STATUS (DESCRIPTION, ID) values ('normal', 0);
 insert into HOME_STATUS (DESCRIPTION, ID) values ('external', 1);
+create table MIGRATION_STATUS (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into MIGRATION_STATUS (DESCRIPTION, ID) values ('none', 0);
+insert into MIGRATION_STATUS (DESCRIPTION, ID) values ('migrating', 1);
+insert into MIGRATION_STATUS (DESCRIPTION, ID) values ('migrated', 2);
 create table CALENDAR (
     "RESOURCE_ID" integer primary key
 );
@@ -57,9 +67,11 @@
 
 create table NOTIFICATION_HOME (
     "RESOURCE_ID" integer primary key,
-    "OWNER_UID" nvarchar2(255) unique,
+    "OWNER_UID" nvarchar2(255),
     "STATUS" integer default 0 not null,
-    "DATAVERSION" integer default 0 not null
+    "MIGRATION" integer default 0 not null,
+    "DATAVERSION" integer default 0 not null, 
+    unique("OWNER_UID", "MIGRATION")
 );
 
 create table NOTIFICATION (
@@ -219,9 +231,11 @@
 create table ADDRESSBOOK_HOME (
     "RESOURCE_ID" integer primary key,
     "ADDRESSBOOK_PROPERTY_STORE_ID" integer not null,
-    "OWNER_UID" nvarchar2(255) unique,
+    "OWNER_UID" nvarchar2(255),
     "STATUS" integer default 0 not null,
-    "DATAVERSION" integer default 0 not null
+    "MIGRATION" integer default 0 not null,
+    "DATAVERSION" integer default 0 not null, 
+    unique("OWNER_UID", "MIGRATION")
 );
 
 create table ADDRESSBOOK_HOME_METADATA (
@@ -400,7 +414,7 @@
     "VALUE" nvarchar2(255)
 );
 
-insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '33');
+insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '34');
 insert into CALENDARSERVER (NAME, VALUE) values ('CALENDAR-DATAVERSION', '5');
 insert into CALENDARSERVER (NAME, VALUE) values ('ADDRESSBOOK-DATAVERSION', '2');
 insert into CALENDARSERVER (NAME, VALUE) values ('NOTIFICATION-DATAVERSION', '1');

Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/current.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/current.sql	2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/current.sql	2014-01-22 04:07:03 UTC (rev 12418)
@@ -53,9 +53,12 @@
 
 create table CALENDAR_HOME (
   RESOURCE_ID      integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
-  OWNER_UID        varchar(255) not null unique,                                -- implicit index
+  OWNER_UID        varchar(255) not null,
   STATUS           integer      default 0 not null,                             -- enum HOME_STATUS
-  DATAVERSION      integer      default 0 not null
+  MIGRATION        integer      default 0 not null,                             -- enum MIGRATION_STATUS
+  DATAVERSION      integer      default 0 not null,
+  
+  unique(OWNER_UID, MIGRATION) -- implicit index
 );
 
 -- Enumeration of statuses
@@ -69,6 +72,16 @@
 insert into HOME_STATUS values (1, 'external');
 
 
+create table MIGRATION_STATUS (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into MIGRATION_STATUS values (0, 'none' );
+insert into MIGRATION_STATUS values (1, 'migrating');
+insert into MIGRATION_STATUS values (2, 'migrated');
+
+
 --------------
 -- Calendar --
 --------------
@@ -123,9 +136,12 @@
 
 create table NOTIFICATION_HOME (
   RESOURCE_ID integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
-  OWNER_UID   varchar(255) not null unique,                                -- implicit index
+  OWNER_UID   varchar(255) not null,
   STATUS      integer      default 0 not null,                             -- enum HOME_STATUS
-  DATAVERSION integer      default 0 not null
+  MIGRATION   integer      default 0 not null,                             -- enum MIGRATION_STATUS
+  DATAVERSION integer      default 0 not null,
+  
+  unique(OWNER_UID, MIGRATION) -- implicit index
 );
 
 create table NOTIFICATION (
@@ -398,9 +414,12 @@
 create table ADDRESSBOOK_HOME (
   RESOURCE_ID      				integer			primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
   ADDRESSBOOK_PROPERTY_STORE_ID	integer      	default nextval('RESOURCE_ID_SEQ') not null, 	-- implicit index
-  OWNER_UID        				varchar(255) 	not null unique,                                -- implicit index
+  OWNER_UID        				varchar(255) 	not null,
   STATUS           				integer      	default 0 not null,                             -- enum HOME_STATUS
-  DATAVERSION      				integer      	default 0 not null
+  MIGRATION        				integer 	    default 0 not null,                             -- enum MIGRATION_STATUS
+  DATAVERSION      				integer      	default 0 not null,
+  
+  unique(OWNER_UID, MIGRATION) -- implicit index
 );
 
 
@@ -751,7 +770,7 @@
   VALUE                         varchar(255)
 );
 
-insert into CALENDARSERVER values ('VERSION', '33');
+insert into CALENDARSERVER values ('VERSION', '34');
 insert into CALENDARSERVER values ('CALENDAR-DATAVERSION', '5');
 insert into CALENDARSERVER values ('ADDRESSBOOK-DATAVERSION', '2');
 insert into CALENDARSERVER values ('NOTIFICATION-DATAVERSION', '1');

Added: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/old/oracle-dialect/v33.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/old/oracle-dialect/v33.sql	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/old/oracle-dialect/v33.sql	2014-01-22 04:07:03 UTC (rev 12418)
@@ -0,0 +1,540 @@
+create sequence RESOURCE_ID_SEQ;
+create sequence INSTANCE_ID_SEQ;
+create sequence ATTACHMENT_ID_SEQ;
+create sequence REVISION_SEQ;
+create sequence WORKITEM_SEQ;
+create table NODE_INFO (
+    "HOSTNAME" nvarchar2(255),
+    "PID" integer not null,
+    "PORT" integer not null,
+    "TIME" timestamp default CURRENT_TIMESTAMP at time zone 'UTC' not null, 
+    primary key("HOSTNAME", "PORT")
+);
+
+create table NAMED_LOCK (
+    "LOCK_NAME" nvarchar2(255) primary key
+);
+
+create table CALENDAR_HOME (
+    "RESOURCE_ID" integer primary key,
+    "OWNER_UID" nvarchar2(255) unique,
+    "STATUS" integer default 0 not null,
+    "DATAVERSION" integer default 0 not null
+);
+
+create table HOME_STATUS (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into HOME_STATUS (DESCRIPTION, ID) values ('normal', 0);
+insert into HOME_STATUS (DESCRIPTION, ID) values ('external', 1);
+create table CALENDAR (
+    "RESOURCE_ID" integer primary key
+);
+
+create table CALENDAR_HOME_METADATA (
+    "RESOURCE_ID" integer primary key references CALENDAR_HOME on delete cascade,
+    "QUOTA_USED_BYTES" integer default 0 not null,
+    "DEFAULT_EVENTS" integer default null references CALENDAR on delete set null,
+    "DEFAULT_TASKS" integer default null references CALENDAR on delete set null,
+    "DEFAULT_POLLS" integer default null references CALENDAR on delete set null,
+    "ALARM_VEVENT_TIMED" nclob default null,
+    "ALARM_VEVENT_ALLDAY" nclob default null,
+    "ALARM_VTODO_TIMED" nclob default null,
+    "ALARM_VTODO_ALLDAY" nclob default null,
+    "AVAILABILITY" nclob default null,
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table CALENDAR_METADATA (
+    "RESOURCE_ID" integer primary key references CALENDAR on delete cascade,
+    "SUPPORTED_COMPONENTS" nvarchar2(255) default null,
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table NOTIFICATION_HOME (
+    "RESOURCE_ID" integer primary key,
+    "OWNER_UID" nvarchar2(255) unique,
+    "STATUS" integer default 0 not null,
+    "DATAVERSION" integer default 0 not null
+);
+
+create table NOTIFICATION (
+    "RESOURCE_ID" integer primary key,
+    "NOTIFICATION_HOME_RESOURCE_ID" integer not null references NOTIFICATION_HOME,
+    "NOTIFICATION_UID" nvarchar2(255),
+    "NOTIFICATION_TYPE" nvarchar2(255),
+    "NOTIFICATION_DATA" nclob,
+    "MD5" nchar(32),
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    unique("NOTIFICATION_UID", "NOTIFICATION_HOME_RESOURCE_ID")
+);
+
+create table CALENDAR_BIND (
+    "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+    "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+    "EXTERNAL_ID" integer default null,
+    "CALENDAR_RESOURCE_NAME" nvarchar2(255),
+    "BIND_MODE" integer not null,
+    "BIND_STATUS" integer not null,
+    "BIND_REVISION" integer default 0 not null,
+    "MESSAGE" nclob,
+    "TRANSP" integer default 0 not null,
+    "ALARM_VEVENT_TIMED" nclob default null,
+    "ALARM_VEVENT_ALLDAY" nclob default null,
+    "ALARM_VTODO_TIMED" nclob default null,
+    "ALARM_VTODO_ALLDAY" nclob default null,
+    "TIMEZONE" nclob default null, 
+    primary key("CALENDAR_HOME_RESOURCE_ID", "CALENDAR_RESOURCE_ID"), 
+    unique("CALENDAR_HOME_RESOURCE_ID", "CALENDAR_RESOURCE_NAME")
+);
+
+create table CALENDAR_BIND_MODE (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('own', 0);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('read', 1);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('write', 2);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('direct', 3);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('indirect', 4);
+create table CALENDAR_BIND_STATUS (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('invited', 0);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('accepted', 1);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('declined', 2);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('invalid', 3);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('deleted', 4);
+create table CALENDAR_TRANSP (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_TRANSP (DESCRIPTION, ID) values ('opaque', 0);
+insert into CALENDAR_TRANSP (DESCRIPTION, ID) values ('transparent', 1);
+create table CALENDAR_OBJECT (
+    "RESOURCE_ID" integer primary key,
+    "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+    "RESOURCE_NAME" nvarchar2(255),
+    "ICALENDAR_TEXT" nclob,
+    "ICALENDAR_UID" nvarchar2(255),
+    "ICALENDAR_TYPE" nvarchar2(255),
+    "ATTACHMENTS_MODE" integer default 0 not null,
+    "DROPBOX_ID" nvarchar2(255),
+    "ORGANIZER" nvarchar2(255),
+    "RECURRANCE_MIN" date,
+    "RECURRANCE_MAX" date,
+    "ACCESS" integer default 0 not null,
+    "SCHEDULE_OBJECT" integer default 0,
+    "SCHEDULE_TAG" nvarchar2(36) default null,
+    "SCHEDULE_ETAGS" nclob default null,
+    "PRIVATE_COMMENTS" integer default 0 not null,
+    "MD5" nchar(32),
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    unique("CALENDAR_RESOURCE_ID", "RESOURCE_NAME")
+);
+
+create table CALENDAR_OBJECT_ATTACHMENTS_MO (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_OBJECT_ATTACHMENTS_MO (DESCRIPTION, ID) values ('none', 0);
+insert into CALENDAR_OBJECT_ATTACHMENTS_MO (DESCRIPTION, ID) values ('read', 1);
+insert into CALENDAR_OBJECT_ATTACHMENTS_MO (DESCRIPTION, ID) values ('write', 2);
+create table CALENDAR_ACCESS_TYPE (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(32) unique
+);
+
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('', 0);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('public', 1);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('private', 2);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('confidential', 3);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('restricted', 4);
+create table TIME_RANGE (
+    "INSTANCE_ID" integer primary key,
+    "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+    "CALENDAR_OBJECT_RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+    "FLOATING" integer not null,
+    "START_DATE" timestamp not null,
+    "END_DATE" timestamp not null,
+    "FBTYPE" integer not null,
+    "TRANSPARENT" integer not null
+);
+
+create table FREE_BUSY_TYPE (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('unknown', 0);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('free', 1);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy', 2);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy-unavailable', 3);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy-tentative', 4);
+create table TRANSPARENCY (
+    "TIME_RANGE_INSTANCE_ID" integer not null references TIME_RANGE on delete cascade,
+    "USER_ID" nvarchar2(255),
+    "TRANSPARENT" integer not null
+);
+
+create table ATTACHMENT (
+    "ATTACHMENT_ID" integer primary key,
+    "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+    "DROPBOX_ID" nvarchar2(255),
+    "CONTENT_TYPE" nvarchar2(255),
+    "SIZE" integer not null,
+    "MD5" nchar(32),
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "PATH" nvarchar2(1024)
+);
+
+create table ATTACHMENT_CALENDAR_OBJECT (
+    "ATTACHMENT_ID" integer not null references ATTACHMENT on delete cascade,
+    "MANAGED_ID" nvarchar2(255),
+    "CALENDAR_OBJECT_RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade, 
+    primary key("ATTACHMENT_ID", "CALENDAR_OBJECT_RESOURCE_ID"), 
+    unique("MANAGED_ID", "CALENDAR_OBJECT_RESOURCE_ID")
+);
+
+create table RESOURCE_PROPERTY (
+    "RESOURCE_ID" integer not null,
+    "NAME" nvarchar2(255),
+    "VALUE" nclob,
+    "VIEWER_UID" nvarchar2(255), 
+    primary key("RESOURCE_ID", "NAME", "VIEWER_UID")
+);
+
+create table ADDRESSBOOK_HOME (
+    "RESOURCE_ID" integer primary key,
+    "ADDRESSBOOK_PROPERTY_STORE_ID" integer not null,
+    "OWNER_UID" nvarchar2(255) unique,
+    "STATUS" integer default 0 not null,
+    "DATAVERSION" integer default 0 not null
+);
+
+create table ADDRESSBOOK_HOME_METADATA (
+    "RESOURCE_ID" integer primary key references ADDRESSBOOK_HOME on delete cascade,
+    "QUOTA_USED_BYTES" integer default 0 not null,
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table SHARED_ADDRESSBOOK_BIND (
+    "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+    "OWNER_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+    "EXTERNAL_ID" integer default null,
+    "ADDRESSBOOK_RESOURCE_NAME" nvarchar2(255),
+    "BIND_MODE" integer not null,
+    "BIND_STATUS" integer not null,
+    "BIND_REVISION" integer default 0 not null,
+    "MESSAGE" nclob, 
+    primary key("ADDRESSBOOK_HOME_RESOURCE_ID", "OWNER_HOME_RESOURCE_ID"), 
+    unique("ADDRESSBOOK_HOME_RESOURCE_ID", "ADDRESSBOOK_RESOURCE_NAME")
+);
+
+create table ADDRESSBOOK_OBJECT (
+    "RESOURCE_ID" integer primary key,
+    "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+    "RESOURCE_NAME" nvarchar2(255),
+    "VCARD_TEXT" nclob,
+    "VCARD_UID" nvarchar2(255),
+    "KIND" integer not null,
+    "MD5" nchar(32),
+    "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    unique("ADDRESSBOOK_HOME_RESOURCE_ID", "RESOURCE_NAME"), 
+    unique("ADDRESSBOOK_HOME_RESOURCE_ID", "VCARD_UID")
+);
+
+create table ADDRESSBOOK_OBJECT_KIND (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('person', 0);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('group', 1);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('resource', 2);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('location', 3);
+create table ABO_MEMBERS (
+    "GROUP_ID" integer not null,
+    "ADDRESSBOOK_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+    "MEMBER_ID" integer not null,
+    "REVISION" integer not null,
+    "REMOVED" integer default 0 not null,
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    primary key("GROUP_ID", "MEMBER_ID", "REVISION")
+);
+
+create table ABO_FOREIGN_MEMBERS (
+    "GROUP_ID" integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+    "ADDRESSBOOK_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+    "MEMBER_ADDRESS" nvarchar2(255), 
+    primary key("GROUP_ID", "MEMBER_ADDRESS")
+);
+
+create table SHARED_GROUP_BIND (
+    "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+    "GROUP_RESOURCE_ID" integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+    "EXTERNAL_ID" integer default null,
+    "GROUP_ADDRESSBOOK_NAME" nvarchar2(255),
+    "BIND_MODE" integer not null,
+    "BIND_STATUS" integer not null,
+    "BIND_REVISION" integer default 0 not null,
+    "MESSAGE" nclob, 
+    primary key("ADDRESSBOOK_HOME_RESOURCE_ID", "GROUP_RESOURCE_ID"), 
+    unique("ADDRESSBOOK_HOME_RESOURCE_ID", "GROUP_ADDRESSBOOK_NAME")
+);
+
+create table CALENDAR_OBJECT_REVISIONS (
+    "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+    "CALENDAR_RESOURCE_ID" integer references CALENDAR,
+    "CALENDAR_NAME" nvarchar2(255) default null,
+    "RESOURCE_NAME" nvarchar2(255),
+    "REVISION" integer not null,
+    "DELETED" integer not null,
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table ADDRESSBOOK_OBJECT_REVISIONS (
+    "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+    "OWNER_HOME_RESOURCE_ID" integer references ADDRESSBOOK_HOME,
+    "ADDRESSBOOK_NAME" nvarchar2(255) default null,
+    "OBJECT_RESOURCE_ID" integer default 0,
+    "RESOURCE_NAME" nvarchar2(255),
+    "REVISION" integer not null,
+    "DELETED" integer not null,
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table NOTIFICATION_OBJECT_REVISIONS (
+    "NOTIFICATION_HOME_RESOURCE_ID" integer not null references NOTIFICATION_HOME on delete cascade,
+    "RESOURCE_NAME" nvarchar2(255),
+    "REVISION" integer not null,
+    "DELETED" integer not null,
+    "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    unique("NOTIFICATION_HOME_RESOURCE_ID", "RESOURCE_NAME")
+);
+
+create table APN_SUBSCRIPTIONS (
+    "TOKEN" nvarchar2(255),
+    "RESOURCE_KEY" nvarchar2(255),
+    "MODIFIED" integer not null,
+    "SUBSCRIBER_GUID" nvarchar2(255),
+    "USER_AGENT" nvarchar2(255) default null,
+    "IP_ADDR" nvarchar2(255) default null, 
+    primary key("TOKEN", "RESOURCE_KEY")
+);
+
+create table IMIP_TOKENS (
+    "TOKEN" nvarchar2(255),
+    "ORGANIZER" nvarchar2(255),
+    "ATTENDEE" nvarchar2(255),
+    "ICALUID" nvarchar2(255),
+    "ACCESSED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    primary key("ORGANIZER", "ATTENDEE", "ICALUID")
+);
+
+create table IMIP_INVITATION_WORK (
+    "WORK_ID" integer primary key not null,
+    "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "FROM_ADDR" nvarchar2(255),
+    "TO_ADDR" nvarchar2(255),
+    "ICALENDAR_TEXT" nclob
+);
+
+create table IMIP_POLLING_WORK (
+    "WORK_ID" integer primary key not null,
+    "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table IMIP_REPLY_WORK (
+    "WORK_ID" integer primary key not null,
+    "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "ORGANIZER" nvarchar2(255),
+    "ATTENDEE" nvarchar2(255),
+    "ICALENDAR_TEXT" nclob
+);
+
+create table PUSH_NOTIFICATION_WORK (
+    "WORK_ID" integer primary key not null,
+    "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "PUSH_ID" nvarchar2(255),
+    "PRIORITY" integer not null
+);
+
+create table GROUP_CACHER_POLLING_WORK (
+    "WORK_ID" integer primary key not null,
+    "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table CALENDAR_OBJECT_SPLITTER_WORK (
+    "WORK_ID" integer primary key not null,
+    "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    "RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade
+);
+
+create table FIND_MIN_VALID_REVISION_WORK (
+    "WORK_ID" integer primary key not null,
+    "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table REVISION_CLEANUP_WORK (
+    "WORK_ID" integer primary key not null,
+    "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table CALENDARSERVER (
+    "NAME" nvarchar2(255) primary key,
+    "VALUE" nvarchar2(255)
+);
+
+insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '33');
+insert into CALENDARSERVER (NAME, VALUE) values ('CALENDAR-DATAVERSION', '5');
+insert into CALENDARSERVER (NAME, VALUE) values ('ADDRESSBOOK-DATAVERSION', '2');
+insert into CALENDARSERVER (NAME, VALUE) values ('NOTIFICATION-DATAVERSION', '1');
+insert into CALENDARSERVER (NAME, VALUE) values ('MIN-VALID-REVISION', '1');
+create index CALENDAR_HOME_METADAT_3cb9049e on CALENDAR_HOME_METADATA (
+    DEFAULT_EVENTS
+);
+
+create index CALENDAR_HOME_METADAT_d55e5548 on CALENDAR_HOME_METADATA (
+    DEFAULT_TASKS
+);
+
+create index CALENDAR_HOME_METADAT_910264ce on CALENDAR_HOME_METADATA (
+    DEFAULT_POLLS
+);
+
+create index NOTIFICATION_NOTIFICA_f891f5f9 on NOTIFICATION (
+    NOTIFICATION_HOME_RESOURCE_ID
+);
+
+create index CALENDAR_BIND_RESOURC_e57964d4 on CALENDAR_BIND (
+    CALENDAR_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_CALEN_a9a453a9 on CALENDAR_OBJECT (
+    CALENDAR_RESOURCE_ID,
+    ICALENDAR_UID
+);
+
+create index CALENDAR_OBJECT_CALEN_96e83b73 on CALENDAR_OBJECT (
+    CALENDAR_RESOURCE_ID,
+    RECURRANCE_MAX
+);
+
+create index CALENDAR_OBJECT_ICALE_82e731d5 on CALENDAR_OBJECT (
+    ICALENDAR_UID
+);
+
+create index CALENDAR_OBJECT_DROPB_de041d80 on CALENDAR_OBJECT (
+    DROPBOX_ID
+);
+
+create index TIME_RANGE_CALENDAR_R_beb6e7eb on TIME_RANGE (
+    CALENDAR_RESOURCE_ID
+);
+
+create index TIME_RANGE_CALENDAR_O_acf37bd1 on TIME_RANGE (
+    CALENDAR_OBJECT_RESOURCE_ID
+);
+
+create index TRANSPARENCY_TIME_RAN_5f34467f on TRANSPARENCY (
+    TIME_RANGE_INSTANCE_ID
+);
+
+create index ATTACHMENT_CALENDAR_H_0078845c on ATTACHMENT (
+    CALENDAR_HOME_RESOURCE_ID
+);
+
+create index ATTACHMENT_DROPBOX_ID_5073cf23 on ATTACHMENT (
+    DROPBOX_ID
+);
+
+create index ATTACHMENT_CALENDAR_O_81508484 on ATTACHMENT_CALENDAR_OBJECT (
+    CALENDAR_OBJECT_RESOURCE_ID
+);
+
+create index SHARED_ADDRESSBOOK_BI_e9a2e6d4 on SHARED_ADDRESSBOOK_BIND (
+    OWNER_HOME_RESOURCE_ID
+);
+
+create index ABO_MEMBERS_ADDRESSBO_4effa879 on ABO_MEMBERS (
+    ADDRESSBOOK_ID
+);
+
+create index ABO_MEMBERS_MEMBER_ID_8d66adcf on ABO_MEMBERS (
+    MEMBER_ID
+);
+
+create index ABO_FOREIGN_MEMBERS_A_1fd2c5e9 on ABO_FOREIGN_MEMBERS (
+    ADDRESSBOOK_ID
+);
+
+create index SHARED_GROUP_BIND_RES_cf52f95d on SHARED_GROUP_BIND (
+    GROUP_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_REVIS_3a3956c4 on CALENDAR_OBJECT_REVISIONS (
+    CALENDAR_HOME_RESOURCE_ID,
+    CALENDAR_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_REVIS_6d9d929c on CALENDAR_OBJECT_REVISIONS (
+    CALENDAR_RESOURCE_ID,
+    RESOURCE_NAME,
+    DELETED,
+    REVISION
+);
+
+create index CALENDAR_OBJECT_REVIS_265c8acf on CALENDAR_OBJECT_REVISIONS (
+    CALENDAR_RESOURCE_ID,
+    REVISION
+);
+
+create index ADDRESSBOOK_OBJECT_RE_2bfcf757 on ADDRESSBOOK_OBJECT_REVISIONS (
+    ADDRESSBOOK_HOME_RESOURCE_ID,
+    OWNER_HOME_RESOURCE_ID
+);
+
+create index ADDRESSBOOK_OBJECT_RE_00fe8288 on ADDRESSBOOK_OBJECT_REVISIONS (
+    OWNER_HOME_RESOURCE_ID,
+    RESOURCE_NAME,
+    DELETED,
+    REVISION
+);
+
+create index ADDRESSBOOK_OBJECT_RE_45004780 on ADDRESSBOOK_OBJECT_REVISIONS (
+    OWNER_HOME_RESOURCE_ID,
+    REVISION
+);
+
+create index NOTIFICATION_OBJECT_R_036a9cee on NOTIFICATION_OBJECT_REVISIONS (
+    NOTIFICATION_HOME_RESOURCE_ID,
+    REVISION
+);
+
+create index APN_SUBSCRIPTIONS_RES_9610d78e on APN_SUBSCRIPTIONS (
+    RESOURCE_KEY
+);
+
+create index IMIP_TOKENS_TOKEN_e94b918f on IMIP_TOKENS (
+    TOKEN
+);
+
+create index CALENDAR_OBJECT_SPLIT_af71dcda on CALENDAR_OBJECT_SPLITTER_WORK (
+    RESOURCE_ID
+);
+

Added: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/old/postgres-dialect/v33.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/old/postgres-dialect/v33.sql	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/old/postgres-dialect/v33.sql	2014-01-22 04:07:03 UTC (rev 12418)
@@ -0,0 +1,758 @@
+-- -*- test-case-name: txdav.caldav.datastore.test.test_sql,txdav.carddav.datastore.test.test_sql -*-
+
+----
+-- Copyright (c) 2010-2014 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+
+-----------------
+-- Resource ID --
+-----------------
+
+create sequence RESOURCE_ID_SEQ;
+
+
+-------------------------
+-- Cluster Bookkeeping --
+-------------------------
+
+-- Information about a process connected to this database.
+
+-- Note that this must match the node info schema in twext.enterprise.queue.
+create table NODE_INFO (
+  HOSTNAME  varchar(255) not null,
+  PID       integer      not null,
+  PORT      integer      not null,
+  TIME      timestamp    not null default timezone('UTC', CURRENT_TIMESTAMP),
+
+  primary key (HOSTNAME, PORT)
+);
+
+-- Unique named locks.  This table should always be empty, but rows are
+-- temporarily created in order to prevent undesirable concurrency.
+create table NAMED_LOCK (
+    LOCK_NAME varchar(255) primary key
+);
+
+
+-------------------
+-- Calendar Home --
+-------------------
+
+create table CALENDAR_HOME (
+  RESOURCE_ID      integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  OWNER_UID        varchar(255) not null unique,                                -- implicit index
+  STATUS           integer      default 0 not null,                             -- enum HOME_STATUS
+  DATAVERSION      integer      default 0 not null
+);
+
+-- Enumeration of statuses
+
+create table HOME_STATUS (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into HOME_STATUS values (0, 'normal' );
+insert into HOME_STATUS values (1, 'external');
+
+
+--------------
+-- Calendar --
+--------------
+
+create table CALENDAR (
+  RESOURCE_ID integer   primary key default nextval('RESOURCE_ID_SEQ') -- implicit index
+);
+
+
+----------------------------
+-- Calendar Home Metadata --
+----------------------------
+
+create table CALENDAR_HOME_METADATA (
+  RESOURCE_ID              integer     primary key references CALENDAR_HOME on delete cascade, -- implicit index
+  QUOTA_USED_BYTES         integer     default 0 not null,
+  DEFAULT_EVENTS           integer     default null references CALENDAR on delete set null,
+  DEFAULT_TASKS            integer     default null references CALENDAR on delete set null,
+  DEFAULT_POLLS            integer     default null references CALENDAR on delete set null,
+  ALARM_VEVENT_TIMED       text        default null,
+  ALARM_VEVENT_ALLDAY      text        default null,
+  ALARM_VTODO_TIMED        text        default null,
+  ALARM_VTODO_ALLDAY       text        default null,
+  AVAILABILITY             text        default null,
+  CREATED                  timestamp   default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                 timestamp   default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+create index CALENDAR_HOME_METADATA_DEFAULT_EVENTS on
+	CALENDAR_HOME_METADATA(DEFAULT_EVENTS);
+create index CALENDAR_HOME_METADATA_DEFAULT_TASKS on
+	CALENDAR_HOME_METADATA(DEFAULT_TASKS);
+create index CALENDAR_HOME_METADATA_DEFAULT_POLLS on
+	CALENDAR_HOME_METADATA(DEFAULT_POLLS);
+
+
+-----------------------
+-- Calendar Metadata --
+-----------------------
+
+create table CALENDAR_METADATA (
+  RESOURCE_ID           integer      primary key references CALENDAR on delete cascade, -- implicit index
+  SUPPORTED_COMPONENTS  varchar(255) default null,
+  CREATED               timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED              timestamp    default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+---------------------------
+-- Sharing Notifications --
+---------------------------
+
+create table NOTIFICATION_HOME (
+  RESOURCE_ID integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  OWNER_UID   varchar(255) not null unique,                                -- implicit index
+  STATUS      integer      default 0 not null,                             -- enum HOME_STATUS
+  DATAVERSION integer      default 0 not null
+);
+
+create table NOTIFICATION (
+  RESOURCE_ID                   integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  NOTIFICATION_HOME_RESOURCE_ID integer      not null references NOTIFICATION_HOME,
+  NOTIFICATION_UID              varchar(255) not null,
+  NOTIFICATION_TYPE             varchar(255) not null,
+  NOTIFICATION_DATA             text         not null,
+  MD5                           char(32)     not null,
+  CREATED                       timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                      timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+
+  unique(NOTIFICATION_UID, NOTIFICATION_HOME_RESOURCE_ID) -- implicit index
+);
+
+create index NOTIFICATION_NOTIFICATION_HOME_RESOURCE_ID on
+	NOTIFICATION(NOTIFICATION_HOME_RESOURCE_ID);
+
+
+-------------------
+-- Calendar Bind --
+-------------------
+
+-- Joins CALENDAR_HOME and CALENDAR
+
+create table CALENDAR_BIND (
+  CALENDAR_HOME_RESOURCE_ID integer      not null references CALENDAR_HOME,
+  CALENDAR_RESOURCE_ID      integer      not null references CALENDAR on delete cascade,
+  EXTERNAL_ID			    integer      default null,
+  CALENDAR_RESOURCE_NAME    varchar(255) not null,
+  BIND_MODE                 integer      not null, -- enum CALENDAR_BIND_MODE
+  BIND_STATUS               integer      not null, -- enum CALENDAR_BIND_STATUS
+  BIND_REVISION				integer      default 0 not null,
+  MESSAGE                   text,
+  TRANSP                    integer      default 0 not null, -- enum CALENDAR_TRANSP
+  ALARM_VEVENT_TIMED        text         default null,
+  ALARM_VEVENT_ALLDAY       text         default null,
+  ALARM_VTODO_TIMED         text         default null,
+  ALARM_VTODO_ALLDAY        text         default null,
+  TIMEZONE                  text         default null,
+
+  primary key(CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_ID), -- implicit index
+  unique(CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_NAME)     -- implicit index
+);
+
+create index CALENDAR_BIND_RESOURCE_ID on
+	CALENDAR_BIND(CALENDAR_RESOURCE_ID);
+
+-- Enumeration of calendar bind modes
+
+create table CALENDAR_BIND_MODE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_BIND_MODE values (0, 'own'  );
+insert into CALENDAR_BIND_MODE values (1, 'read' );
+insert into CALENDAR_BIND_MODE values (2, 'write');
+insert into CALENDAR_BIND_MODE values (3, 'direct');
+insert into CALENDAR_BIND_MODE values (4, 'indirect');
+
+-- Enumeration of statuses
+
+create table CALENDAR_BIND_STATUS (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_BIND_STATUS values (0, 'invited' );
+insert into CALENDAR_BIND_STATUS values (1, 'accepted');
+insert into CALENDAR_BIND_STATUS values (2, 'declined');
+insert into CALENDAR_BIND_STATUS values (3, 'invalid');
+insert into CALENDAR_BIND_STATUS values (4, 'deleted');
+
+
+-- Enumeration of transparency
+
+create table CALENDAR_TRANSP (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_TRANSP values (0, 'opaque' );
+insert into CALENDAR_TRANSP values (1, 'transparent');
+
+
+---------------------
+-- Calendar Object --
+---------------------
+
+create table CALENDAR_OBJECT (
+  RESOURCE_ID          integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  CALENDAR_RESOURCE_ID integer      not null references CALENDAR on delete cascade,
+  RESOURCE_NAME        varchar(255) not null,
+  ICALENDAR_TEXT       text         not null,
+  ICALENDAR_UID        varchar(255) not null,
+  ICALENDAR_TYPE       varchar(255) not null,
+  ATTACHMENTS_MODE     integer      default 0 not null, -- enum CALENDAR_OBJECT_ATTACHMENTS_MODE
+  DROPBOX_ID           varchar(255),
+  ORGANIZER            varchar(255),
+  RECURRANCE_MIN       date,        -- minimum date that recurrences have been expanded to.
+  RECURRANCE_MAX       date,        -- maximum date that recurrences have been expanded to.
+  ACCESS               integer      default 0 not null,
+  SCHEDULE_OBJECT      boolean      default false,
+  SCHEDULE_TAG         varchar(36)  default null,
+  SCHEDULE_ETAGS       text         default null,
+  PRIVATE_COMMENTS     boolean      default false not null,
+  MD5                  char(32)     not null,
+  CREATED              timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED             timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+
+  unique (CALENDAR_RESOURCE_ID, RESOURCE_NAME) -- implicit index
+
+  -- since the 'inbox' is a 'calendar resource' for the purpose of storing
+  -- calendar objects, this constraint has to be selectively enforced by the
+  -- application layer.
+
+  -- unique(CALENDAR_RESOURCE_ID, ICALENDAR_UID)
+);
+
+create index CALENDAR_OBJECT_CALENDAR_RESOURCE_ID_AND_ICALENDAR_UID on
+  CALENDAR_OBJECT(CALENDAR_RESOURCE_ID, ICALENDAR_UID);
+
+create index CALENDAR_OBJECT_CALENDAR_RESOURCE_ID_RECURRANCE_MAX on
+  CALENDAR_OBJECT(CALENDAR_RESOURCE_ID, RECURRANCE_MAX);
+
+create index CALENDAR_OBJECT_ICALENDAR_UID on
+  CALENDAR_OBJECT(ICALENDAR_UID);
+
+create index CALENDAR_OBJECT_DROPBOX_ID on
+  CALENDAR_OBJECT(DROPBOX_ID);
+
+-- Enumeration of attachment modes
+
+create table CALENDAR_OBJECT_ATTACHMENTS_MODE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_OBJECT_ATTACHMENTS_MODE values (0, 'none' );
+insert into CALENDAR_OBJECT_ATTACHMENTS_MODE values (1, 'read' );
+insert into CALENDAR_OBJECT_ATTACHMENTS_MODE values (2, 'write');
+
+
+-- Enumeration of calendar access types
+
+create table CALENDAR_ACCESS_TYPE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(32) not null unique
+);
+
+insert into CALENDAR_ACCESS_TYPE values (0, ''             );
+insert into CALENDAR_ACCESS_TYPE values (1, 'public'       );
+insert into CALENDAR_ACCESS_TYPE values (2, 'private'      );
+insert into CALENDAR_ACCESS_TYPE values (3, 'confidential' );
+insert into CALENDAR_ACCESS_TYPE values (4, 'restricted'   );
+
+
+-----------------
+-- Instance ID --
+-----------------
+
+create sequence INSTANCE_ID_SEQ;
+
+
+----------------
+-- Time Range --
+----------------
+
+create table TIME_RANGE (
+  INSTANCE_ID                 integer        primary key default nextval('INSTANCE_ID_SEQ'), -- implicit index
+  CALENDAR_RESOURCE_ID        integer        not null references CALENDAR on delete cascade,
+  CALENDAR_OBJECT_RESOURCE_ID integer        not null references CALENDAR_OBJECT on delete cascade,
+  FLOATING                    boolean        not null,
+  START_DATE                  timestamp      not null,
+  END_DATE                    timestamp      not null,
+  FBTYPE                      integer        not null,
+  TRANSPARENT                 boolean        not null
+);
+
+create index TIME_RANGE_CALENDAR_RESOURCE_ID on
+  TIME_RANGE(CALENDAR_RESOURCE_ID);
+create index TIME_RANGE_CALENDAR_OBJECT_RESOURCE_ID on
+  TIME_RANGE(CALENDAR_OBJECT_RESOURCE_ID);
+
+
+-- Enumeration of free/busy types
+
+create table FREE_BUSY_TYPE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into FREE_BUSY_TYPE values (0, 'unknown'         );
+insert into FREE_BUSY_TYPE values (1, 'free'            );
+insert into FREE_BUSY_TYPE values (2, 'busy'            );
+insert into FREE_BUSY_TYPE values (3, 'busy-unavailable');
+insert into FREE_BUSY_TYPE values (4, 'busy-tentative'  );
+
+
+------------------
+-- Transparency --
+------------------
+
+create table TRANSPARENCY (
+  TIME_RANGE_INSTANCE_ID      integer      not null references TIME_RANGE on delete cascade,
+  USER_ID                     varchar(255) not null,
+  TRANSPARENT                 boolean      not null
+);
+
+create index TRANSPARENCY_TIME_RANGE_INSTANCE_ID on
+  TRANSPARENCY(TIME_RANGE_INSTANCE_ID);
+
+
+----------------
+-- Attachment --
+----------------
+
+create sequence ATTACHMENT_ID_SEQ;
+
+create table ATTACHMENT (
+  ATTACHMENT_ID               integer           primary key default nextval('ATTACHMENT_ID_SEQ'), -- implicit index
+  CALENDAR_HOME_RESOURCE_ID   integer           not null references CALENDAR_HOME,
+  DROPBOX_ID                  varchar(255),
+  CONTENT_TYPE                varchar(255)      not null,
+  SIZE                        integer           not null,
+  MD5                         char(32)          not null,
+  CREATED                     timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                    timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+  PATH                        varchar(1024)     not null
+);
+
+create index ATTACHMENT_CALENDAR_HOME_RESOURCE_ID on
+  ATTACHMENT(CALENDAR_HOME_RESOURCE_ID);
+
+create index ATTACHMENT_DROPBOX_ID on
+  ATTACHMENT(DROPBOX_ID);
+
+-- Many-to-many relationship between attachments and calendar objects
+create table ATTACHMENT_CALENDAR_OBJECT (
+  ATTACHMENT_ID                  integer      not null references ATTACHMENT on delete cascade,
+  MANAGED_ID                     varchar(255) not null,
+  CALENDAR_OBJECT_RESOURCE_ID    integer      not null references CALENDAR_OBJECT on delete cascade,
+
+  primary key (ATTACHMENT_ID, CALENDAR_OBJECT_RESOURCE_ID), -- implicit index
+  unique (MANAGED_ID, CALENDAR_OBJECT_RESOURCE_ID) --implicit index
+);
+
+create index ATTACHMENT_CALENDAR_OBJECT_CALENDAR_OBJECT_RESOURCE_ID on
+	ATTACHMENT_CALENDAR_OBJECT(CALENDAR_OBJECT_RESOURCE_ID);
+
+-----------------------
+-- Resource Property --
+-----------------------
+
+create table RESOURCE_PROPERTY (
+  RESOURCE_ID integer      not null, -- foreign key: *.RESOURCE_ID
+  NAME        varchar(255) not null,
+  VALUE       text         not null, -- FIXME: xml?
+  VIEWER_UID  varchar(255),
+
+  primary key (RESOURCE_ID, NAME, VIEWER_UID) -- implicit index
+);
+
+
+----------------------
+-- AddressBook Home --
+----------------------
+
+create table ADDRESSBOOK_HOME (
+  RESOURCE_ID      				integer			primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  ADDRESSBOOK_PROPERTY_STORE_ID	integer      	default nextval('RESOURCE_ID_SEQ') not null, 	-- implicit index
+  OWNER_UID        				varchar(255) 	not null unique,                                -- implicit index
+  STATUS           				integer      	default 0 not null,                             -- enum HOME_STATUS
+  DATAVERSION      				integer      	default 0 not null
+);
+
+
+-------------------------------
+-- AddressBook Home Metadata --
+-------------------------------
+
+create table ADDRESSBOOK_HOME_METADATA (
+  RESOURCE_ID      integer      primary key references ADDRESSBOOK_HOME on delete cascade, -- implicit index
+  QUOTA_USED_BYTES integer      default 0 not null,
+  CREATED          timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED         timestamp    default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+-----------------------------
+-- Shared AddressBook Bind --
+-----------------------------
+
+-- Joins sharee ADDRESSBOOK_HOME and owner ADDRESSBOOK_HOME
+
+create table SHARED_ADDRESSBOOK_BIND (
+  ADDRESSBOOK_HOME_RESOURCE_ID			integer			not null references ADDRESSBOOK_HOME,
+  OWNER_HOME_RESOURCE_ID    			integer      	not null references ADDRESSBOOK_HOME on delete cascade,
+  EXTERNAL_ID			                integer         default null,
+  ADDRESSBOOK_RESOURCE_NAME    			varchar(255) 	not null,
+  BIND_MODE                    			integer      	not null,	-- enum CALENDAR_BIND_MODE
+  BIND_STATUS                  			integer      	not null,	-- enum CALENDAR_BIND_STATUS
+  BIND_REVISION				   			integer      	default 0 not null,
+  MESSAGE                      			text,                  		-- FIXME: xml?
+
+  primary key (ADDRESSBOOK_HOME_RESOURCE_ID, OWNER_HOME_RESOURCE_ID), -- implicit index
+  unique (ADDRESSBOOK_HOME_RESOURCE_ID, ADDRESSBOOK_RESOURCE_NAME)     -- implicit index
+);
+
+create index SHARED_ADDRESSBOOK_BIND_RESOURCE_ID on
+  SHARED_ADDRESSBOOK_BIND(OWNER_HOME_RESOURCE_ID);
+
+
+------------------------
+-- AddressBook Object --
+------------------------
+
+create table ADDRESSBOOK_OBJECT (
+  RESOURCE_ID             		integer   		primary key default nextval('RESOURCE_ID_SEQ'),    -- implicit index
+  ADDRESSBOOK_HOME_RESOURCE_ID 	integer      	not null references ADDRESSBOOK_HOME on delete cascade,
+  RESOURCE_NAME           		varchar(255) 	not null,
+  VCARD_TEXT              		text         	not null,
+  VCARD_UID               		varchar(255) 	not null,
+  KIND 			  		  		integer      	not null,  -- enum ADDRESSBOOK_OBJECT_KIND
+  MD5                     		char(32)     	not null,
+  CREATED                 		timestamp    	default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                		timestamp    	default timezone('UTC', CURRENT_TIMESTAMP),
+
+  unique (ADDRESSBOOK_HOME_RESOURCE_ID, RESOURCE_NAME), -- implicit index
+  unique (ADDRESSBOOK_HOME_RESOURCE_ID, VCARD_UID)      -- implicit index
+);
+
+
+-----------------------------
+-- AddressBook Object kind --
+-----------------------------
+
+create table ADDRESSBOOK_OBJECT_KIND (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into ADDRESSBOOK_OBJECT_KIND values (0, 'person');
+insert into ADDRESSBOOK_OBJECT_KIND values (1, 'group' );
+insert into ADDRESSBOOK_OBJECT_KIND values (2, 'resource');
+insert into ADDRESSBOOK_OBJECT_KIND values (3, 'location');
+
+
+----------------------------------
+-- Revisions, forward reference --
+----------------------------------
+
+create sequence REVISION_SEQ;
+
+---------------------------------
+-- Address Book Object Members --
+---------------------------------
+
+create table ABO_MEMBERS (
+    GROUP_ID   		integer		not null, -- references ADDRESSBOOK_OBJECT on delete cascade,	-- AddressBook Object's (kind=='group') RESOURCE_ID
+ 	ADDRESSBOOK_ID	integer		not null references ADDRESSBOOK_HOME on delete cascade,
+    MEMBER_ID      	integer		not null, -- references ADDRESSBOOK_OBJECT,						-- member AddressBook Object's RESOURCE_ID
+  	REVISION        integer   	default nextval('REVISION_SEQ') not null,
+  	REMOVED        	boolean		default false not null,
+	MODIFIED        timestamp	default timezone('UTC', CURRENT_TIMESTAMP),
+
+    primary key (GROUP_ID, MEMBER_ID, REVISION) -- implicit index
+);
+
+create index ABO_MEMBERS_ADDRESSBOOK_ID on
+	ABO_MEMBERS(ADDRESSBOOK_ID);
+create index ABO_MEMBERS_MEMBER_ID on
+	ABO_MEMBERS(MEMBER_ID);
+
+------------------------------------------
+-- Address Book Object Foreign Members  --
+------------------------------------------
+
+create table ABO_FOREIGN_MEMBERS (
+    GROUP_ID              integer      not null references ADDRESSBOOK_OBJECT on delete cascade,	-- AddressBook Object's (kind=='group') RESOURCE_ID
+ 	ADDRESSBOOK_ID		  integer      not null references ADDRESSBOOK_HOME on delete cascade,
+    MEMBER_ADDRESS  	  varchar(255) not null, 													-- member AddressBook Object's 'calendar' address
+
+    primary key (GROUP_ID, MEMBER_ADDRESS) -- implicit index
+);
+
+create index ABO_FOREIGN_MEMBERS_ADDRESSBOOK_ID on
+	ABO_FOREIGN_MEMBERS(ADDRESSBOOK_ID);
+
+-----------------------
+-- Shared Group Bind --
+-----------------------
+
+-- Joins ADDRESSBOOK_HOME and ADDRESSBOOK_OBJECT (kind == group)
+
+create table SHARED_GROUP_BIND (	
+  ADDRESSBOOK_HOME_RESOURCE_ID 		integer      not null references ADDRESSBOOK_HOME,
+  GROUP_RESOURCE_ID      			integer      not null references ADDRESSBOOK_OBJECT on delete cascade,
+  EXTERNAL_ID			            integer      default null,
+  GROUP_ADDRESSBOOK_NAME			varchar(255) not null,
+  BIND_MODE                    		integer      not null, -- enum CALENDAR_BIND_MODE
+  BIND_STATUS                  		integer      not null, -- enum CALENDAR_BIND_STATUS
+  BIND_REVISION				   		integer      default 0 not null,
+  MESSAGE                      		text,                  -- FIXME: xml?
+
+  primary key (ADDRESSBOOK_HOME_RESOURCE_ID, GROUP_RESOURCE_ID), -- implicit index
+  unique (ADDRESSBOOK_HOME_RESOURCE_ID, GROUP_ADDRESSBOOK_NAME)  -- implicit index
+);
+
+create index SHARED_GROUP_BIND_RESOURCE_ID on
+  SHARED_GROUP_BIND(GROUP_RESOURCE_ID);
+
+
+---------------
+-- Revisions --
+---------------
+
+-- create sequence REVISION_SEQ;
+
+
+-------------------------------
+-- Calendar Object Revisions --
+-------------------------------
+
+create table CALENDAR_OBJECT_REVISIONS (
+  CALENDAR_HOME_RESOURCE_ID integer      not null references CALENDAR_HOME,
+  CALENDAR_RESOURCE_ID      integer      references CALENDAR,
+  CALENDAR_NAME             varchar(255) default null,
+  RESOURCE_NAME             varchar(255),
+  REVISION                  integer      default nextval('REVISION_SEQ') not null,
+  DELETED                   boolean      not null,
+  MODIFIED                  timestamp    default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+create index CALENDAR_OBJECT_REVISIONS_HOME_RESOURCE_ID_CALENDAR_RESOURCE_ID
+  on CALENDAR_OBJECT_REVISIONS(CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_ID);
+
+create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_RESOURCE_NAME_DELETED_REVISION
+  on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, RESOURCE_NAME, DELETED, REVISION);
+
+create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_REVISION
+  on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, REVISION);
+
+
+----------------------------------
+-- AddressBook Object Revisions --
+----------------------------------
+
+create table ADDRESSBOOK_OBJECT_REVISIONS (
+  ADDRESSBOOK_HOME_RESOURCE_ID 	integer		 not null references ADDRESSBOOK_HOME,
+  OWNER_HOME_RESOURCE_ID    	integer      references ADDRESSBOOK_HOME,
+  ADDRESSBOOK_NAME             	varchar(255) default null,
+  OBJECT_RESOURCE_ID			integer		 default 0,
+  RESOURCE_NAME                	varchar(255),
+  REVISION                     	integer      default nextval('REVISION_SEQ') not null,
+  DELETED                      	boolean   	 not null,
+  MODIFIED                  	timestamp 	 default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_HOME_RESOURCE_ID_OWNER_HOME_RESOURCE_ID
+  on ADDRESSBOOK_OBJECT_REVISIONS(ADDRESSBOOK_HOME_RESOURCE_ID, OWNER_HOME_RESOURCE_ID);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_OWNER_HOME_RESOURCE_ID_RESOURCE_NAME_DELETED_REVISION
+  on ADDRESSBOOK_OBJECT_REVISIONS(OWNER_HOME_RESOURCE_ID, RESOURCE_NAME, DELETED, REVISION);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_OWNER_HOME_RESOURCE_ID_REVISION
+  on ADDRESSBOOK_OBJECT_REVISIONS(OWNER_HOME_RESOURCE_ID, REVISION);
+
+
+-----------------------------------
+-- Notification Object Revisions --
+-----------------------------------
+
+create table NOTIFICATION_OBJECT_REVISIONS (
+  NOTIFICATION_HOME_RESOURCE_ID integer      not null references NOTIFICATION_HOME on delete cascade,
+  RESOURCE_NAME                 varchar(255),
+  REVISION                      integer      default nextval('REVISION_SEQ') not null,
+  DELETED                       boolean      not null,
+  MODIFIED                  	timestamp	 default timezone('UTC', CURRENT_TIMESTAMP),
+
+  unique(NOTIFICATION_HOME_RESOURCE_ID, RESOURCE_NAME) -- implicit index
+);
+
+create index NOTIFICATION_OBJECT_REVISIONS_RESOURCE_ID_REVISION
+  on NOTIFICATION_OBJECT_REVISIONS(NOTIFICATION_HOME_RESOURCE_ID, REVISION);
+
+
+-------------------------------------------
+-- Apple Push Notification Subscriptions --
+-------------------------------------------
+
+create table APN_SUBSCRIPTIONS (
+  TOKEN                         varchar(255) not null,
+  RESOURCE_KEY                  varchar(255) not null,
+  MODIFIED                      integer      not null,
+  SUBSCRIBER_GUID               varchar(255) not null,
+  USER_AGENT                    varchar(255) default null,
+  IP_ADDR                       varchar(255) default null,
+
+  primary key (TOKEN, RESOURCE_KEY) -- implicit index
+);
+
+create index APN_SUBSCRIPTIONS_RESOURCE_KEY
+   on APN_SUBSCRIPTIONS(RESOURCE_KEY);
+
+   
+-----------------
+-- IMIP Tokens --
+-----------------
+
+create table IMIP_TOKENS (
+  TOKEN                         varchar(255) not null,
+  ORGANIZER                     varchar(255) not null,
+  ATTENDEE                      varchar(255) not null,
+  ICALUID                       varchar(255) not null,
+  ACCESSED                      timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+
+  primary key (ORGANIZER, ATTENDEE, ICALUID) -- implicit index
+);
+
+create index IMIP_TOKENS_TOKEN
+   on IMIP_TOKENS(TOKEN);
+
+   
+----------------
+-- Work Items --
+----------------
+
+create sequence WORKITEM_SEQ;
+
+
+---------------------------
+-- IMIP Inivitation Work --
+---------------------------
+
+create table IMIP_INVITATION_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  NOT_BEFORE                    timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  FROM_ADDR                     varchar(255) not null,
+  TO_ADDR                       varchar(255) not null,
+  ICALENDAR_TEXT                text         not null
+);
+
+
+-----------------------
+-- IMIP Polling Work --
+-----------------------
+
+create table IMIP_POLLING_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  NOT_BEFORE                    timestamp    default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+---------------------
+-- IMIP Reply Work --
+---------------------
+
+create table IMIP_REPLY_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  NOT_BEFORE                    timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  ORGANIZER                     varchar(255) not null,
+  ATTENDEE                      varchar(255) not null,
+  ICALENDAR_TEXT                text         not null
+);
+
+
+------------------------
+-- Push Notifications --
+------------------------
+
+create table PUSH_NOTIFICATION_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  NOT_BEFORE                    timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  PUSH_ID                       varchar(255) not null,
+  PRIORITY                      integer      not null -- 1:low 5:medium 10:high
+);
+
+-----------------
+-- GroupCacher --
+-----------------
+
+create table GROUP_CACHER_POLLING_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  NOT_BEFORE                    timestamp    default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+--------------------------
+-- Object Splitter Work --
+--------------------------
+
+create table CALENDAR_OBJECT_SPLITTER_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  NOT_BEFORE                    timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  RESOURCE_ID                   integer      not null references CALENDAR_OBJECT on delete cascade
+);
+
+create index CALENDAR_OBJECT_SPLITTER_WORK_RESOURCE_ID on
+	CALENDAR_OBJECT_SPLITTER_WORK(RESOURCE_ID);
+
+---------------------------
+-- Revision Cleaner Work --
+---------------------------
+
+create table FIND_MIN_VALID_REVISION_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  NOT_BEFORE                    timestamp    default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+create table REVISION_CLEANUP_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+  NOT_BEFORE                    timestamp    default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+--------------------
+-- Schema Version --
+--------------------
+
+create table CALENDARSERVER (
+  NAME                          varchar(255) primary key, -- implicit index
+  VALUE                         varchar(255)
+);
+
+insert into CALENDARSERVER values ('VERSION', '33');
+insert into CALENDARSERVER values ('CALENDAR-DATAVERSION', '5');
+insert into CALENDARSERVER values ('ADDRESSBOOK-DATAVERSION', '2');
+insert into CALENDARSERVER values ('NOTIFICATION-DATAVERSION', '1');
+insert into CALENDARSERVER values ('MIN-VALID-REVISION', '1');

Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_32_to_33.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_32_to_33.sql	2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_32_to_33.sql	2014-01-22 04:07:03 UTC (rev 12418)
@@ -15,7 +15,7 @@
 ----
 
 ---------------------------------------------------
--- Upgrade database schema from VERSION 30 to 31 --
+-- Upgrade database schema from VERSION 32 to 33 --
 ---------------------------------------------------
 
 -- Add timestamp to revision tables --
@@ -42,4 +42,4 @@
  
 -- Update version --
 
-update CALENDARSERVER set VALUE = '31' where NAME = 'VERSION';
+update CALENDARSERVER set VALUE = '33' where NAME = 'VERSION';

Added: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_33_to_34.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_33_to_34.sql	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_33_to_34.sql	2014-01-22 04:07:03 UTC (rev 12418)
@@ -0,0 +1,58 @@
+----
+-- Copyright (c) 2012-2013 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+---------------------------------------------------
+-- Upgrade database schema from VERSION 33 to 34 --
+---------------------------------------------------
+
+-- Add timestamp to revision tables --
+
+-- Add migration column to tables --
+
+alter table CALENDAR_HOME
+  drop unique ("OWNER_UID");
+alter table CALENDAR_HOME
+  add ("MIGRATION" integer default 0 not null);
+alter table CALENDAR_HOME
+  add unique(OWNER_UID, MIGRATION);
+
+alter table NOTIFICATION_HOME
+  drop unique ("OWNER_UID");
+alter table NOTIFICATION_HOME
+  add ("MIGRATION" integer default 0 not null);
+alter table NOTIFICATION_HOME
+  add unique(OWNER_UID, MIGRATION);
+
+alter table ADDRESSBOOK_HOME
+  drop unique ("OWNER_UID");
+alter table ADDRESSBOOK_HOME
+  add ("MIGRATION" integer default 0 not null);
+alter table ADDRESSBOOK_HOME
+  add unique(OWNER_UID, MIGRATION);
+
+
+create table MIGRATION_STATUS (
+    "ID" integer primary key,
+    "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into MIGRATION_STATUS (DESCRIPTION, ID) values ('none', 0);
+insert into MIGRATION_STATUS (DESCRIPTION, ID) values ('migrating', 1);
+insert into MIGRATION_STATUS (DESCRIPTION, ID) values ('migrated', 2);
+ 
+-- Update version --
+
+update CALENDARSERVER set VALUE = '34' where NAME = 'VERSION';

Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_32_to_33.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_32_to_33.sql	2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_32_to_33.sql	2014-01-22 04:07:03 UTC (rev 12418)
@@ -15,7 +15,7 @@
 ----
 
 ---------------------------------------------------
--- Upgrade database schema from VERSION 30 to 31 --
+-- Upgrade database schema from VERSION 32 to 33 --
 ---------------------------------------------------
 
 -- Add timestamp to revision tables --

Added: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_33_to_34.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_33_to_34.sql	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_33_to_34.sql	2014-01-22 04:07:03 UTC (rev 12418)
@@ -0,0 +1,50 @@
+----
+-- Copyright (c) 2012-2013 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+---------------------------------------------------
+-- Upgrade database schema from VERSION 33 to 34 --
+---------------------------------------------------
+
+-- Add migration column to tables --
+
+alter table CALENDAR_HOME
+  drop constraint CALENDAR_HOME_OWNER_UID_KEY,
+  add column MIGRATION        integer default 0 not null,
+  add unique(OWNER_UID, MIGRATION);
+
+alter table NOTIFICATION_HOME
+  drop constraint NOTIFICATION_HOME_OWNER_UID_KEY,
+  add column MIGRATION        integer default 0 not null,
+  add unique(OWNER_UID, MIGRATION);
+
+alter table ADDRESSBOOK_HOME
+  drop constraint ADDRESSBOOK_HOME_OWNER_UID_KEY,
+  add column MIGRATION        integer default 0 not null,
+  add unique(OWNER_UID, MIGRATION);
+
+
+create table MIGRATION_STATUS (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into MIGRATION_STATUS values (0, 'none' );
+insert into MIGRATION_STATUS values (1, 'migrating');
+insert into MIGRATION_STATUS values (2, 'migrated');
+
+-- Update version --
+
+update CALENDARSERVER set VALUE = '34' where NAME = 'VERSION';

Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_tables.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_tables.py	2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_tables.py	2014-01-22 04:07:03 UTC (rev 12418)
@@ -138,10 +138,20 @@
     schema.HOME_STATUS.ID
 )
 
-
 _HOME_STATUS_NORMAL = _homeStatus('normal')
 _HOME_STATUS_EXTERNAL = _homeStatus('external')
 
+
+_migrationStatus = _schemaConstants(
+    schema.MIGRATION_STATUS.DESCRIPTION,
+    schema.MIGRATION_STATUS.ID
+)
+
+_MIGRATION_STATUS_NONE = _migrationStatus('none')
+_MIGRATION_STATUS_MIGRATING = _migrationStatus('migrating')
+_MIGRATION_STATUS_MIGRATED = _migrationStatus('migrated')
+
+
 _bindStatus = _schemaConstants(
     schema.CALENDAR_BIND_STATUS.DESCRIPTION,
     schema.CALENDAR_BIND_STATUS.ID

Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/test/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/test/util.py	2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/test/util.py	2014-01-22 04:07:03 UTC (rev 12418)
@@ -60,7 +60,7 @@
 from txdav.base.propertystore.base import PropertyName
 from txdav.caldav.icalendarstore import ComponentUpdateState
 from txdav.common.datastore.sql import CommonDataStore, current_sql_schema
-from txdav.common.datastore.sql_tables import schema
+from txdav.common.datastore.sql_tables import schema, _MIGRATION_STATUS_NONE
 from txdav.common.icommondatastore import NoSuchHomeChildError
 
 from zope.interface.exceptions import BrokenMethodImplementation, \
@@ -342,10 +342,8 @@
         yield cleanupTxn.commit()
 
         # Deal with memcached items that must be cleared
-        from txdav.caldav.datastore.sql import CalendarHome
-        CalendarHome._cacher.flushAll()
-        from txdav.carddav.datastore.sql import AddressBookHome
-        AddressBookHome._cacher.flushAll()
+        if storeToClean.queryCacher:
+            storeToClean.queryCacher.flushAll()
         from txdav.base.propertystore.sql import PropertyStore
         PropertyStore._cacher.flushAll()
 
@@ -723,13 +721,13 @@
 
 
     @inlineCallbacks
-    def homeUnderTest(self, txn=None, name="home1", create=False):
+    def homeUnderTest(self, txn=None, name="home1", create=False, migration=_MIGRATION_STATUS_NONE):
         """
         Get the calendar home detailed by C{requirements['home1']}.
         """
         if txn is None:
             txn = self.transactionUnderTest()
-        returnValue((yield txn.calendarHomeWithUID(name, create=create)))
+        returnValue((yield txn.calendarHomeWithUID(name, create=create, migration=migration)))
 
 
     @inlineCallbacks
@@ -752,13 +750,13 @@
                      .calendarObjectWithName(name)))
 
 
-    def addressbookHomeUnderTest(self, txn=None, name="home1"):
+    def addressbookHomeUnderTest(self, txn=None, name="home1", create=False, migration=_MIGRATION_STATUS_NONE):
         """
         Get the addressbook home detailed by C{requirements['home1']}.
         """
         if txn is None:
             txn = self.transactionUnderTest()
-        return txn.addressbookHomeWithUID(name)
+        return txn.addressbookHomeWithUID(name, create=create, migration=migration)
 
 
     @inlineCallbacks
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20140312/61056be2/attachment.html>


More information about the calendarserver-changes mailing list