<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>[12418] CalendarServer/branches/users/cdaboo/pod-migration/txdav</title>
</head>
<body>
<style type="text/css"><!--
#msg dl.meta { border: 1px #006 solid; background: #369; padding: 6px; color: #fff; }
#msg dl.meta dt { float: left; width: 6em; font-weight: bold; }
#msg dt:after { content:':';}
#msg dl, #msg dt, #msg ul, #msg li, #header, #footer, #logmsg { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt; }
#msg dl a { font-weight: bold}
#msg dl a:link { color:#fc3; }
#msg dl a:active { color:#ff0; }
#msg dl a:visited { color:#cc6; }
h3 { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt; font-weight: bold; }
#msg pre { overflow: auto; background: #ffc; border: 1px #fa0 solid; padding: 6px; }
#logmsg { background: #ffc; border: 1px #fa0 solid; padding: 1em 1em 0 1em; }
#logmsg p, #logmsg pre, #logmsg blockquote { margin: 0 0 1em 0; }
#logmsg p, #logmsg li, #logmsg dt, #logmsg dd { line-height: 14pt; }
#logmsg h1, #logmsg h2, #logmsg h3, #logmsg h4, #logmsg h5, #logmsg h6 { margin: .5em 0; }
#logmsg h1:first-child, #logmsg h2:first-child, #logmsg h3:first-child, #logmsg h4:first-child, #logmsg h5:first-child, #logmsg h6:first-child { margin-top: 0; }
#logmsg ul, #logmsg ol { padding: 0; list-style-position: inside; margin: 0 0 0 1em; }
#logmsg ul { text-indent: -1em; padding-left: 1em; }#logmsg ol { text-indent: -1.5em; padding-left: 1.5em; }
#logmsg > ul, #logmsg > ol { margin: 0 0 1em 0; }
#logmsg pre { background: #eee; padding: 1em; }
#logmsg blockquote { border: 1px solid #fa0; border-left-width: 10px; padding: 1em 1em 0 1em; background: white;}
#logmsg dl { margin: 0; }
#logmsg dt { font-weight: bold; }
#logmsg dd { margin: 0; padding: 0 0 0.5em 0; }
#logmsg dd:before { content:'\00bb';}
#logmsg table { border-spacing: 0px; border-collapse: collapse; border-top: 4px solid #fa0; border-bottom: 1px solid #fa0; background: #fff; }
#logmsg table th { text-align: left; font-weight: normal; padding: 0.2em 0.5em; border-top: 1px dotted #fa0; }
#logmsg table td { text-align: right; border-top: 1px dotted #fa0; padding: 0.2em 0.5em; }
#logmsg table thead th { text-align: center; border-bottom: 1px solid #fa0; }
#logmsg table th.Corner { text-align: left; }
#logmsg hr { border: none 0; border-top: 2px dashed #fa0; height: 1px; }
#header, #footer { color: #fff; background: #636; border: 1px #300 solid; padding: 6px; }
#patch { width: 100%; }
#patch h4 {font-family: verdana,arial,helvetica,sans-serif;font-size:10pt;padding:8px;background:#369;color:#fff;margin:0;}
#patch .propset h4, #patch .binary h4 {margin:0;}
#patch pre {padding:0;line-height:1.2em;margin:0;}
#patch .diff {width:100%;background:#eee;padding: 0 0 10px 0;overflow:auto;}
#patch .propset .diff, #patch .binary .diff {padding:10px 0;}
#patch span {display:block;padding:0 10px;}
#patch .modfile, #patch .addfile, #patch .delfile, #patch .propset, #patch .binary, #patch .copfile {border:1px solid #ccc;margin:10px 0;}
#patch ins {background:#dfd;text-decoration:none;display:block;padding:0 10px;}
#patch del {background:#fdd;text-decoration:none;display:block;padding:0 10px;}
#patch .lines, .info {color:#888;background:#fff;}
--></style>
<div id="msg">
<dl class="meta">
<dt>Revision</dt> <dd><a href="http://trac.calendarserver.org//changeset/12418">12418</a></dd>
<dt>Author</dt> <dd>cdaboo@apple.com</dd>
<dt>Date</dt> <dd>2014-01-21 20:07:03 -0800 (Tue, 21 Jan 2014)</dd>
</dl>
<h3>Log Message</h3>
<pre>Checkpoint: cross-pod migration. Currently supports home child sync without data.</pre>
<h3>Modified Paths</h3>
<ul>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavbasedatastoreutilpy">CalendarServer/branches/users/cdaboo/pod-migration/txdav/base/datastore/util.py</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcaldavdatastoreschedulepy">CalendarServer/branches/users/cdaboo/pod-migration/txdav/caldav/datastore/schedule.py</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcaldavdatastoresqlpy">CalendarServer/branches/users/cdaboo/pod-migration/txdav/caldav/datastore/sql.py</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcarddavdatastoresqlpy">CalendarServer/branches/users/cdaboo/pod-migration/txdav/carddav/datastore/sql.py</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcarddavdatastoresql_externalpy">CalendarServer/branches/users/cdaboo/pod-migration/txdav/carddav/datastore/sql_external.py</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastorefilepy">CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/file.py</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastorepoddingconduitpy">CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/conduit.py</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastorepoddingtesttest_conduitpy">CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/test_conduit.py</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastorepoddingtestutilpy">CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/util.py</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresqlpy">CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql.py</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_externalpy">CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_external.py</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_schemacurrentoracledialectsql">CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/current-oracle-dialect.sql</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_schemacurrentsql">CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/current.sql</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_schemaupgradesoracledialectupgrade_from_32_to_33sql">CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_32_to_33.sql</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_schemaupgradespostgresdialectupgrade_from_32_to_33sql">CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_32_to_33.sql</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_tablespy">CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_tables.py</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoretestutilpy">CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/test/util.py</a></li>
</ul>
<h3>Added Paths</h3>
<ul>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastorepoddingmigrationpy">CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/migration.py</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastorepoddingtesttest_migrationpy">CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/test_migration.py</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_schemaoldoracledialectv33sql">CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/old/oracle-dialect/v33.sql</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_schemaoldpostgresdialectv33sql">CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/old/postgres-dialect/v33.sql</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_schemaupgradesoracledialectupgrade_from_33_to_34sql">CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_33_to_34.sql</a></li>
<li><a href="#CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_schemaupgradespostgresdialectupgrade_from_33_to_34sql">CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_33_to_34.sql</a></li>
</ul>
</div>
<div id="patch">
<h3>Diff</h3>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavbasedatastoreutilpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/base/datastore/util.py (12417 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/base/datastore/util.py        2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/base/datastore/util.py        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -104,6 +104,12 @@
</span><span class="cx"> return "objectWithExternalID:%s:%s" % (homeResourceID, externalID)
</span><span class="cx">
</span><span class="cx">
</span><ins>+ # Home
+
+ def keyForHomeData(self, homeType, ownerUID, migration):
+ return "homeData:%s.%s.%s" % (homeType, migration, ownerUID)
+
+
</ins><span class="cx"> # Home metadata (Created/Modified)
</span><span class="cx">
</span><span class="cx"> def keyForHomeMetaData(self, homeResourceID):
</span></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcaldavdatastoreschedulepy"></a>
<div class="modfile"><h4>Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/caldav/datastore/schedule.py (12417 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/caldav/datastore/schedule.py        2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/caldav/datastore/schedule.py        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -21,6 +21,7 @@
</span><span class="cx"> from twisted.python.util import FancyEqMixin
</span><span class="cx"> from twisted.python.components import proxyForInterface
</span><span class="cx"> from twisted.internet.defer import inlineCallbacks, returnValue
</span><ins>+from txdav.common.datastore.sql_tables import _MIGRATION_STATUS_NONE
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -41,10 +42,10 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><del>- def calendarHomeWithUID(self, uid, create=False):
</del><ins>+ def calendarHomeWithUID(self, uid, create=False, migration=_MIGRATION_STATUS_NONE):
</ins><span class="cx"> # FIXME: 'create' flag
</span><span class="cx"> newHome = yield super(ImplicitTransaction, self
</span><del>- ).calendarHomeWithUID(uid, create)
</del><ins>+ ).calendarHomeWithUID(uid, create=create, migration=migration)
</ins><span class="cx"> # return ImplicitCalendarHome(newHome, self)
</span><span class="cx"> if newHome is None:
</span><span class="cx"> returnValue(None)
</span></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcaldavdatastoresqlpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/caldav/datastore/sql.py (12417 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/caldav/datastore/sql.py        2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/caldav/datastore/sql.py        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -55,7 +55,6 @@
</span><span class="cx"> pyCalendarTodatetime, parseSQLDateToPyCalendar
</span><span class="cx"> from twistedcaldav.ical import Component, InvalidICalendarDataError, Property
</span><span class="cx"> from twistedcaldav.instance import InvalidOverriddenInstanceError
</span><del>-from twistedcaldav.memcacher import Memcacher
</del><span class="cx">
</span><span class="cx"> from txdav.base.propertystore.base import PropertyName
</span><span class="cx"> from txdav.caldav.datastore.query.builder import buildExpression
</span><span class="lines">@@ -415,8 +414,6 @@
</span><span class="cx"> _notifierPrefix = "CalDAV"
</span><span class="cx"> _dataVersionKey = "CALENDAR-DATAVERSION"
</span><span class="cx">
</span><del>- _cacher = Memcacher("SQL.calhome", pickle=True, key_normalization=False)
-
</del><span class="cx"> _componentCalendarName = {
</span><span class="cx"> "VEVENT": "calendar",
</span><span class="cx"> "VTODO": "tasks",
</span><span class="lines">@@ -517,7 +514,10 @@
</span><span class="cx"> Where=rp.RESOURCE_ID == self._resourceID
</span><span class="cx"> ).on(self._txn)
</span><span class="cx">
</span><del>- yield self._cacher.delete(str(self._ownerUID))
</del><ins>+ queryCacher = self._txn._queryCacher
+ if queryCacher:
+ cacheKey = queryCacher.keyForHomeData(self._homeType, self._ownerUID, self._migration)
+ yield queryCacher.delete(cacheKey)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcarddavdatastoresqlpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/carddav/datastore/sql.py (12417 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/carddav/datastore/sql.py        2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/carddav/datastore/sql.py        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -41,7 +41,6 @@
</span><span class="cx">
</span><span class="cx"> from twistedcaldav import carddavxml, customxml
</span><span class="cx"> from twistedcaldav.config import config
</span><del>-from twistedcaldav.memcacher import Memcacher
</del><span class="cx"> from twistedcaldav.vcard import Component as VCard, InvalidVCardDataError, Property, \
</span><span class="cx"> vCardProductID
</span><span class="cx">
</span><span class="lines">@@ -58,7 +57,8 @@
</span><span class="cx"> from txdav.common.datastore.sql_tables import _ABO_KIND_PERSON, \
</span><span class="cx"> _ABO_KIND_GROUP, _ABO_KIND_RESOURCE, _ABO_KIND_LOCATION, schema, \
</span><span class="cx"> _BIND_MODE_OWN, _BIND_MODE_WRITE, _BIND_STATUS_ACCEPTED, \
</span><del>- _BIND_STATUS_INVITED, _BIND_MODE_INDIRECT, _BIND_STATUS_DECLINED
</del><ins>+ _BIND_STATUS_INVITED, _BIND_MODE_INDIRECT, _BIND_STATUS_DECLINED, \
+ _MIGRATION_STATUS_NONE, _MIGRATION_STATUS_MIGRATING, _HOME_STATUS_EXTERNAL
</ins><span class="cx"> from txdav.common.icommondatastore import InternalDataStoreError, \
</span><span class="cx"> InvalidUIDError, UIDExistsError, ObjectResourceTooBigError, \
</span><span class="cx"> InvalidObjectResourceError, InvalidComponentForStoreError, \
</span><span class="lines">@@ -84,12 +84,11 @@
</span><span class="cx">
</span><span class="cx"> _notifierPrefix = "CardDAV"
</span><span class="cx"> _dataVersionKey = "ADDRESSBOOK-DATAVERSION"
</span><del>- _cacher = Memcacher("SQL.adbkhome", pickle=True, key_normalization=False)
</del><span class="cx">
</span><span class="cx">
</span><del>- def __init__(self, transaction, ownerUID):
</del><ins>+ def __init__(self, transaction, ownerUID, migration=_MIGRATION_STATUS_NONE):
</ins><span class="cx">
</span><del>- super(AddressBookHome, self).__init__(transaction, ownerUID)
</del><ins>+ super(AddressBookHome, self).__init__(transaction, ownerUID, migration=migration)
</ins><span class="cx"> self._addressbookPropertyStoreID = None
</span><span class="cx"> self._addressbook = None
</span><span class="cx">
</span><span class="lines">@@ -116,6 +115,8 @@
</span><span class="cx"> return (
</span><span class="cx"> cls._homeSchema.RESOURCE_ID,
</span><span class="cx"> cls._homeSchema.OWNER_UID,
</span><ins>+ cls._homeSchema.STATUS,
+ cls._homeSchema.MIGRATION,
</ins><span class="cx"> cls._homeSchema.ADDRESSBOOK_PROPERTY_STORE_ID,
</span><span class="cx"> )
</span><span class="cx">
</span><span class="lines">@@ -131,40 +132,37 @@
</span><span class="cx"> return (
</span><span class="cx"> "_resourceID",
</span><span class="cx"> "_ownerUID",
</span><ins>+ "_status",
+ "_migration",
</ins><span class="cx"> "_addressbookPropertyStoreID",
</span><span class="cx"> )
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><del>- def initFromStore(self, no_cache=False):
</del><ins>+ def made(self):
</ins><span class="cx"> """
</span><del>- Initialize this object from the store. We read in and cache all the
- extra meta-data from the DB to avoid having to do DB queries for those
- individually later.
</del><ins>+ Create the default owned address book.
</ins><span class="cx"> """
</span><span class="cx">
</span><del>- result = yield super(AddressBookHome, self).initFromStore(no_cache)
- if result is not None:
- # Created owned address book
- addressbook = AddressBook(
- home=self,
- name="addressbook",
- resourceID=self._resourceID,
- mode=_BIND_MODE_OWN,
- status=_BIND_STATUS_ACCEPTED,
- )
- yield addressbook._loadPropertyStore()
</del><ins>+ # Created owned address book
+ c = AddressBook._externalClass if self.external() else AddressBook
+ addressbook = c(
+ home=self,
+ name="addressbook",
+ resourceID=self._resourceID,
+ mode=_BIND_MODE_OWN,
+ status=_BIND_STATUS_ACCEPTED,
+ )
+ yield addressbook._loadPropertyStore()
</ins><span class="cx">
</span><del>- # Extra check for shared
- invites = yield addressbook.sharingInvites()
- if len(invites) != 0:
- addressbook._bindMessage = "shared"
</del><ins>+ # Extra check for shared
+ invites = yield addressbook.sharingInvites()
+ if len(invites) != 0:
+ addressbook._bindMessage = "shared"
</ins><span class="cx">
</span><del>- self._addressbook = addressbook
</del><ins>+ self._addressbook = addressbook
</ins><span class="cx">
</span><del>- returnValue(result)
</del><span class="cx">
</span><del>-
</del><span class="cx"> @inlineCallbacks
</span><span class="cx"> def remove(self):
</span><span class="cx"> ah = schema.ADDRESSBOOK_HOME
</span><span class="lines">@@ -194,7 +192,10 @@
</span><span class="cx"> )
</span><span class="cx"> ).on(self._txn)
</span><span class="cx">
</span><del>- yield self._cacher.delete(str(self._ownerUID))
</del><ins>+ queryCacher = self._txn._queryCacher
+ if queryCacher:
+ cacheKey = queryCacher.keyForHomeData(self._homeType, self._ownerUID, self._migration)
+ yield queryCacher.delete(cacheKey)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="lines">@@ -352,6 +353,49 @@
</span><span class="cx"> returnValue(result)
</span><span class="cx">
</span><span class="cx">
</span><ins>+ #
+ # Migration related
+ #
+
+ @inlineCallbacks
+ def migrateToThisPod(self, user, final):
+ """
+ Migrate data for the specified user (on a different pod) to this pod. L{final} is
+ C{True} when this is the final incremental sync (where we need to sync everything).
+ When C{False} we only sync owned child collections (without properties).
+
+ @param user: the user to migrate
+ @type user: C{str}
+ @param final: whether or not this is the final sync
+ @type final: C{bool}
+ """
+
+ assert self._migration == _MIGRATION_STATUS_MIGRATING
+
+ # Get external home for the user (create if needed)
+ otherHome = yield self._txn.homeWithUID(self._homeType, user, create=True)
+ assert otherHome._status == _HOME_STATUS_EXTERNAL
+
+ # Force the external home to look like it is migrating. This will enable certain external API calls
+ # that are normally disabled for sharing (e.g., ability to load all child resources).
+ otherHome._migration = _MIGRATION_STATUS_MIGRATING
+
+ local_children = yield self.loadChildren()
+ local_children = dict([(child.external_id(), child) for child in local_children if child.owned()])
+
+ # Get list of owned child collections
+ remote_children = yield otherHome.loadChildren()
+ remote_children = dict([(child.external_id(), child) for child in remote_children if child.owned()])
+
+ # For address books, there is only one owned child
+ assert len(local_children) == 1
+ assert len(remote_children) == 1
+
+ local_child = local_children.values()[0]
+ remote_child = remote_children.values()[0]
+ yield local_child.migrateToThisPod(remote_child, final)
+
+
</ins><span class="cx"> AddressBookHome._register(EADDRESSBOOKTYPE)
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -500,7 +544,9 @@
</span><span class="cx"> overallBindStatus = _BIND_STATUS_INVITED
</span><span class="cx"> minBindRevision = None
</span><span class="cx"> for row in rows:
</span><del>- bindMode, homeID, resourceGroupID, externalID, name, bindStatus, bindRevision, bindMessage = row[:cls.bindColumnCount] #@UnusedVariable
</del><ins>+ resourceGroupID = row[cls.bindColumns().index(cls._bindSchema.RESOURCE_ID)]
+ bindStatus = row[cls.bindColumns().index(cls._bindSchema.BIND_STATUS)]
+ bindRevision = row[cls.bindColumns().index(cls._bindSchema.BIND_REVISION)]
</ins><span class="cx"> if groupID is None:
</span><span class="cx"> groupID = resourceGroupID
</span><span class="cx"> minBindRevision = min(minBindRevision, bindRevision) if minBindRevision is not None else bindRevision
</span><span class="lines">@@ -1124,7 +1170,7 @@
</span><span class="cx"> home._txn, homeID=home._resourceID
</span><span class="cx"> )
</span><span class="cx"> for groupRow in groupRows:
</span><del>- bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = groupRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</del><ins>+ resourceID = groupRow[cls.bindColumns().index(cls._bindSchema.RESOURCE_ID)]
</ins><span class="cx"> ownerAddressBookID = yield AddressBookObject.ownerAddressBookIDFromGroupID(home._txn, resourceID)
</span><span class="cx"> ownerHome = yield home._txn.homeWithResourceID(home._homeType, ownerAddressBookID, create=True)
</span><span class="cx"> names |= set([ownerHome.uid()])
</span><span class="lines">@@ -1152,7 +1198,7 @@
</span><span class="cx"> )
</span><span class="cx"> # get ownerHomeIDs
</span><span class="cx"> for dataRow in dataRows:
</span><del>- bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = dataRow[:cls.bindColumnCount] #@UnusedVariable
</del><ins>+ resourceID = dataRow[cls.bindColumns().index(cls._bindSchema.RESOURCE_ID)]
</ins><span class="cx"> ownerHome = yield home.ownerHomeWithChildID(resourceID)
</span><span class="cx"> ownerHomeToDataRowMap[ownerHome] = dataRow
</span><span class="cx">
</span><span class="lines">@@ -1161,12 +1207,15 @@
</span><span class="cx"> home._txn, homeID=home._resourceID
</span><span class="cx"> )
</span><span class="cx"> for groupBindRow in groupBindRows:
</span><del>- bindMode, homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</del><ins>+ resourceID = dataRow[cls.bindColumns().index(cls._bindSchema.RESOURCE_ID)]
</ins><span class="cx"> ownerAddressBookID = yield AddressBookObject.ownerAddressBookIDFromGroupID(home._txn, resourceID)
</span><span class="cx"> ownerHome = yield home.ownerHomeWithChildID(ownerAddressBookID)
</span><span class="cx"> if ownerHome not in ownerHomeToDataRowMap:
</span><del>- groupBindRow[0] = _BIND_MODE_INDIRECT
- groupBindRow[3:7] = 4 * [None] # bindName, bindStatus, bindRevision, bindMessage
</del><ins>+ groupBindRow[cls.bindColumns().index(cls._bindSchema.BIND_MODE)] = _BIND_MODE_INDIRECT
+ groupBindRow[cls.bindColumns().index(cls._bindSchema.BIND_NAME)] = None
+ groupBindRow[cls.bindColumns().index(cls._bindSchema.BIND_STATUS)] = None
+ groupBindRow[cls.bindColumns().index(cls._bindSchema.BIND_REVISION)] = None
+ groupBindRow[cls.bindColumns().index(cls._bindSchema.MESSAGE)] = None
</ins><span class="cx"> ownerHomeToDataRowMap[ownerHome] = groupBindRow
</span><span class="cx">
</span><span class="cx"> if ownerHomeToDataRowMap:
</span><span class="lines">@@ -1404,7 +1453,8 @@
</span><span class="cx"> readWriteGroupIDs = set()
</span><span class="cx"> readOnlyGroupIDs = set()
</span><span class="cx"> for groupBindRow in groupBindRows:
</span><del>- bindMode, homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</del><ins>+ resourceID = groupBindRow[self.bindColumns().index(self._bindSchema.RESOURCE_ID)]
+ bindMode = groupBindRow[self.bindColumns().index(self._bindSchema.BIND_MODE)]
</ins><span class="cx"> if bindMode == _BIND_MODE_WRITE:
</span><span class="cx"> readWriteGroupIDs.add(resourceID)
</span><span class="cx"> else:
</span><span class="lines">@@ -1465,7 +1515,7 @@
</span><span class="cx"> readWriteGroupIDs = []
</span><span class="cx"> readOnlyGroupIDs = []
</span><span class="cx"> for groupBindRow in groupBindRows:
</span><del>- bindMode, homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</del><ins>+ #bindMode, homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</ins><span class="cx"> if bindMode == _BIND_MODE_WRITE:
</span><span class="cx"> readWriteGroupIDs.append(resourceID)
</span><span class="cx"> else:
</span><span class="lines">@@ -1898,12 +1948,12 @@
</span><span class="cx"> yield child._loadPropertyStore(propstore)
</span><span class="cx">
</span><span class="cx"> if groupBindData:
</span><del>- bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = groupBindData[:AddressBookObject.bindColumnCount] #@UnusedVariable
- child._bindMode = bindMode
- child._bindStatus = bindStatus
- child._bindMessage = bindMessage
- child._bindName = bindName
- child._bindRevision = bindRevision
</del><ins>+ bindData = dict(zip(cls.bindColumns(), groupBindData[:AddressBookObject.bindColumnCount]))
+ child._bindMode = bindData[cls._bindSchema.BIND_MODE]
+ child._bindStatus = bindData[cls._bindSchema.BIND_STATUS]
+ child._bindMessage = bindData[cls._bindSchema.MESSAGE]
+ child._bindName = bindData[cls._bindSchema.RESOURCE_NAME]
+ child._bindRevision = bindData[cls._bindSchema.BIND_REVISION]
</ins><span class="cx"> else:
</span><span class="cx"> invites = yield child.sharingInvites()
</span><span class="cx"> if len(invites):
</span><span class="lines">@@ -2195,7 +2245,8 @@
</span><span class="cx"> )
</span><span class="cx"> if groupBindRows:
</span><span class="cx"> groupBindRow = groupBindRows[0]
</span><del>- bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</del><ins>+ resourceID = groupBindRow[cls.bindColumns().index(cls._bindSchema.RESOURCE_ID)]
+ bindStatus = groupBindRow[cls.bindColumns().index(cls._bindSchema.BIND_STATUS)]
</ins><span class="cx">
</span><span class="cx"> if accepted is not None and (bindStatus == _BIND_STATUS_ACCEPTED) != bool(accepted):
</span><span class="cx"> returnValue(None)
</span></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcarddavdatastoresql_externalpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/carddav/datastore/sql_external.py (12417 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/carddav/datastore/sql_external.py        2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/carddav/datastore/sql_external.py        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -14,6 +14,7 @@
</span><span class="cx"> # See the License for the specific language governing permissions and
</span><span class="cx"> # limitations under the License.
</span><span class="cx"> ##
</span><ins>+from txdav.common.datastore.sql_tables import _MIGRATION_STATUS_MIGRATING
</ins><span class="cx"> """
</span><span class="cx"> SQL backend for CardDAV storage when resources are external.
</span><span class="cx"> """
</span><span class="lines">@@ -62,10 +63,14 @@
</span><span class="cx"> """
</span><span class="cx"> No children.
</span><span class="cx"> """
</span><del>- raise AssertionError("CommonHomeExternal: not supported")
</del><ins>+ # Only available if migrating
+ if self._migration != _MIGRATION_STATUS_MIGRATING:
+ raise AssertionError("CommonHomeExternal: not supported")
</ins><span class="cx">
</span><ins>+ return super(CommonHomeExternal, self).addressbook()
</ins><span class="cx">
</span><span class="cx">
</span><ins>+
</ins><span class="cx"> class AddressBookExternal(CommonHomeChildExternal, AddressBook):
</span><span class="cx"> """
</span><span class="cx"> SQL-based implementation of L{IAddressBook}.
</span></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastorefilepy"></a>
<div class="modfile"><h4>Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/file.py (12417 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/file.py        2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/file.py        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -304,15 +304,15 @@
</span><span class="cx"> CommonStoreTransaction._homeClass[EADDRESSBOOKTYPE] = AddressBookHome
</span><span class="cx">
</span><span class="cx">
</span><del>- def calendarHomeWithUID(self, uid, create=False):
- return self.homeWithUID(ECALENDARTYPE, uid, create=create)
</del><ins>+ def calendarHomeWithUID(self, uid, create=False, migration=None):
+ return self.homeWithUID(ECALENDARTYPE, uid, create=create, migration=migration)
</ins><span class="cx">
</span><span class="cx">
</span><del>- def addressbookHomeWithUID(self, uid, create=False):
- return self.homeWithUID(EADDRESSBOOKTYPE, uid, create=create)
</del><ins>+ def addressbookHomeWithUID(self, uid, create=False, migration=None):
+ return self.homeWithUID(EADDRESSBOOKTYPE, uid, create=create, migration=migration)
</ins><span class="cx">
</span><span class="cx">
</span><del>- def _determineMemo(self, storeType, uid, create=False):
</del><ins>+ def _determineMemo(self, storeType, uid, create=False, migration=None):
</ins><span class="cx"> """
</span><span class="cx"> Determine the memo dictionary to use for homeWithUID.
</span><span class="cx"> """
</span><span class="lines">@@ -335,7 +335,7 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @memoizedKey("uid", _determineMemo, deferredResult=False)
</span><del>- def homeWithUID(self, storeType, uid, create=False):
</del><ins>+ def homeWithUID(self, storeType, uid, create=False, migration=None):
</ins><span class="cx"> if uid.startswith("."):
</span><span class="cx"> return None
</span><span class="cx">
</span><span class="lines">@@ -346,7 +346,7 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @memoizedKey("uid", "_notificationHomes", deferredResult=False)
</span><del>- def notificationsWithUID(self, uid, home=None):
</del><ins>+ def notificationsWithUID(self, uid, home=None, migration=None):
</ins><span class="cx">
</span><span class="cx"> if home is None:
</span><span class="cx"> home = self.homeWithUID(self._notificationHomeType, uid, create=True)
</span></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastorepoddingconduitpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/conduit.py (12417 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/conduit.py        2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/conduit.py        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -76,34 +76,6 @@
</span><span class="cx"> self.store = store
</span><span class="cx">
</span><span class="cx">
</span><del>- def validRequst(self, source_guid, destination_guid):
- """
- Verify that the specified GUIDs are valid for the request and return the
- matching directory records.
-
- @param source_guid: GUID for the user on whose behalf the request is being made
- @type source_guid: C{str}
- @param destination_guid: GUID for the user to whom the request is being sent
- @type destination_guid: C{str}
-
- @return: C{tuple} of L{IStoreDirectoryRecord}
- """
-
- source = self.store.directoryService().recordWithUID(source_guid)
- if source is None:
- raise DirectoryRecordNotFoundError("Cross-pod source: {}".format(source_guid))
- if not source.thisServer():
- raise FailedCrossPodRequestError("Cross-pod source not on this server: {}".format(source_guid))
-
- destination = self.store.directoryService().recordWithUID(destination_guid)
- if destination is None:
- raise DirectoryRecordNotFoundError("Cross-pod destination: {}".format(destination_guid))
- if destination.thisServer():
- raise FailedCrossPodRequestError("Cross-pod destination on this server: {}".format(destination_guid))
-
- return (source, destination,)
-
-
</del><span class="cx"> @inlineCallbacks
</span><span class="cx"> def sendRequest(self, txn, recipient, data, stream=None, streamType=None):
</span><span class="cx">
</span><span class="lines">@@ -156,9 +128,37 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> #
</span><del>- # Invite related apis
</del><ins>+ # Sharing related apis
</ins><span class="cx"> #
</span><span class="cx">
</span><ins>+ def validShareRequest(self, source_guid, destination_guid):
+ """
+ Verify that the specified GUIDs are valid for the request and return the
+ matching directory records.
+
+ @param source_guid: GUID for the user on whose behalf the request is being made
+ @type source_guid: C{str}
+ @param destination_guid: GUID for the user to whom the request is being sent
+ @type destination_guid: C{str}
+
+ @return: C{tuple} of L{IStoreDirectoryRecord}
+ """
+
+ source = self.store.directoryService().recordWithUID(source_guid)
+ if source is None:
+ raise DirectoryRecordNotFoundError("Cross-pod source: {}".format(source_guid))
+ if not source.thisServer():
+ raise FailedCrossPodRequestError("Cross-pod source not on this server: {}".format(source_guid))
+
+ destination = self.store.directoryService().recordWithUID(destination_guid)
+ if destination is None:
+ raise DirectoryRecordNotFoundError("Cross-pod destination: {}".format(destination_guid))
+ if destination.thisServer():
+ raise FailedCrossPodRequestError("Cross-pod destination on this server: {}".format(destination_guid))
+
+ return (source, destination,)
+
+
</ins><span class="cx"> @inlineCallbacks
</span><span class="cx"> def send_shareinvite(self, txn, homeType, ownerUID, ownerID, ownerName, shareeUID, shareUID, bindMode, summary, copy_properties, supported_components):
</span><span class="cx"> """
</span><span class="lines">@@ -186,7 +186,7 @@
</span><span class="cx"> @type supported_components: C{str}
</span><span class="cx"> """
</span><span class="cx">
</span><del>- _ignore_sender, recipient = self.validRequst(ownerUID, shareeUID)
</del><ins>+ _ignore_sender, recipient = self.validShareRequest(ownerUID, shareeUID)
</ins><span class="cx">
</span><span class="cx"> action = {
</span><span class="cx"> "action": "shareinvite",
</span><span class="lines">@@ -260,7 +260,7 @@
</span><span class="cx"> @type shareUID: C{str}
</span><span class="cx"> """
</span><span class="cx">
</span><del>- _ignore_sender, recipient = self.validRequst(ownerUID, shareeUID)
</del><ins>+ _ignore_sender, recipient = self.validShareRequest(ownerUID, shareeUID)
</ins><span class="cx">
</span><span class="cx"> action = {
</span><span class="cx"> "action": "shareuninvite",
</span><span class="lines">@@ -290,7 +290,7 @@
</span><span class="cx"> # Create a share
</span><span class="cx"> shareeHome = yield txn.homeWithUID(message["type"], message["sharee"], create=True)
</span><span class="cx"> if shareeHome is None or shareeHome.external():
</span><del>- FailedCrossPodRequestError("Invalid sharee UID specified")
</del><ins>+ raise FailedCrossPodRequestError("Invalid sharee UID specified")
</ins><span class="cx">
</span><span class="cx"> try:
</span><span class="cx"> yield shareeHome.processExternalUninvite(
</span><span class="lines">@@ -299,7 +299,7 @@
</span><span class="cx"> message["share_id"],
</span><span class="cx"> )
</span><span class="cx"> except ExternalShareFailed as e:
</span><del>- FailedCrossPodRequestError(str(e))
</del><ins>+ raise FailedCrossPodRequestError(str(e))
</ins><span class="cx">
</span><span class="cx"> returnValue({
</span><span class="cx"> "result": "ok",
</span><span class="lines">@@ -325,7 +325,7 @@
</span><span class="cx"> @type summary: C{str}
</span><span class="cx"> """
</span><span class="cx">
</span><del>- _ignore_sender, recipient = self.validRequst(shareeUID, ownerUID)
</del><ins>+ _ignore_sender, recipient = self.validShareRequest(shareeUID, ownerUID)
</ins><span class="cx">
</span><span class="cx"> action = {
</span><span class="cx"> "action": "sharereply",
</span><span class="lines">@@ -357,7 +357,7 @@
</span><span class="cx"> # Create a share
</span><span class="cx"> ownerHome = yield txn.homeWithUID(message["type"], message["owner"])
</span><span class="cx"> if ownerHome is None or ownerHome.external():
</span><del>- FailedCrossPodRequestError("Invalid owner UID specified")
</del><ins>+ raise FailedCrossPodRequestError("Invalid owner UID specified")
</ins><span class="cx">
</span><span class="cx"> try:
</span><span class="cx"> yield ownerHome.processExternalReply(
</span><span class="lines">@@ -368,7 +368,7 @@
</span><span class="cx"> summary=message.get("summary")
</span><span class="cx"> )
</span><span class="cx"> except ExternalShareFailed as e:
</span><del>- FailedCrossPodRequestError(str(e))
</del><ins>+ raise FailedCrossPodRequestError(str(e))
</ins><span class="cx">
</span><span class="cx"> returnValue({
</span><span class="cx"> "result": "ok",
</span><span class="lines">@@ -398,7 +398,7 @@
</span><span class="cx">
</span><span class="cx"> actionName = "add-attachment"
</span><span class="cx"> shareeView = objectResource._parentCollection
</span><del>- action, recipient = self._send(actionName, shareeView, objectResource)
</del><ins>+ action, recipient = self._share_send(actionName, shareeView, objectResource)
</ins><span class="cx"> action["rids"] = rids
</span><span class="cx"> action["filename"] = filename
</span><span class="cx"> result = yield self.sendRequest(shareeView._txn, recipient, action, stream, content_type)
</span><span class="lines">@@ -418,7 +418,7 @@
</span><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> actionName = "add-attachment"
</span><del>- _ignore_shareeView, objectResource = yield self._recv(txn, message, actionName)
</del><ins>+ _ignore_shareeView, objectResource = yield self._share_recv(txn, message, actionName)
</ins><span class="cx"> try:
</span><span class="cx"> attachment, location = yield objectResource.addAttachment(
</span><span class="cx"> message["rids"],
</span><span class="lines">@@ -458,7 +458,7 @@
</span><span class="cx">
</span><span class="cx"> actionName = "update-attachment"
</span><span class="cx"> shareeView = objectResource._parentCollection
</span><del>- action, recipient = self._send(actionName, shareeView, objectResource)
</del><ins>+ action, recipient = self._share_send(actionName, shareeView, objectResource)
</ins><span class="cx"> action["managedID"] = managed_id
</span><span class="cx"> action["filename"] = filename
</span><span class="cx"> result = yield self.sendRequest(shareeView._txn, recipient, action, stream, content_type)
</span><span class="lines">@@ -478,7 +478,7 @@
</span><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> actionName = "update-attachment"
</span><del>- _ignore_shareeView, objectResource = yield self._recv(txn, message, actionName)
</del><ins>+ _ignore_shareeView, objectResource = yield self._share_recv(txn, message, actionName)
</ins><span class="cx"> try:
</span><span class="cx"> attachment, location = yield objectResource.updateAttachment(
</span><span class="cx"> message["managedID"],
</span><span class="lines">@@ -514,7 +514,7 @@
</span><span class="cx">
</span><span class="cx"> actionName = "remove-attachment"
</span><span class="cx"> shareeView = objectResource._parentCollection
</span><del>- action, recipient = self._send(actionName, shareeView, objectResource)
</del><ins>+ action, recipient = self._share_send(actionName, shareeView, objectResource)
</ins><span class="cx"> action["rids"] = rids
</span><span class="cx"> action["managedID"] = managed_id
</span><span class="cx"> result = yield self.sendRequest(shareeView._txn, recipient, action)
</span><span class="lines">@@ -534,7 +534,7 @@
</span><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> actionName = "remove-attachment"
</span><del>- _ignore_shareeView, objectResource = yield self._recv(txn, message, actionName)
</del><ins>+ _ignore_shareeView, objectResource = yield self._share_recv(txn, message, actionName)
</ins><span class="cx"> try:
</span><span class="cx"> yield objectResource.removeAttachment(
</span><span class="cx"> message["rids"],
</span><span class="lines">@@ -557,12 +557,16 @@
</span><span class="cx"> # Sharer data access related apis
</span><span class="cx"> #
</span><span class="cx">
</span><del>- def _send(self, action, parent, child=None):
</del><ins>+ def _share_send(self, action, parent, child=None):
</ins><span class="cx"> """
</span><span class="cx"> Base behavior for an operation on a L{CommonHomeChild}.
</span><span class="cx">
</span><del>- @param shareeView: sharee resource being operated on.
- @type shareeView: L{CommonHomeChildExternal}
</del><ins>+ @param action: remote API call to execute
+ @type action: C{str}
+ @param parent: home child resource
+ @type parent: L{CommonHomeChild}
+ @param child: object resource (or C{None} to operate on the parent only
+ @type child: L{CommonObjectResource}
</ins><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> homeType = parent.ownerHome()._homeType
</span><span class="lines">@@ -570,7 +574,7 @@
</span><span class="cx"> ownerID = parent.external_id()
</span><span class="cx"> shareeUID = parent.viewerHome().uid()
</span><span class="cx">
</span><del>- _ignore_sender, recipient = self.validRequst(shareeUID, ownerUID)
</del><ins>+ _ignore_sender, recipient = self.validShareRequest(shareeUID, ownerUID)
</ins><span class="cx">
</span><span class="cx"> result = {
</span><span class="cx"> "action": action,
</span><span class="lines">@@ -585,12 +589,16 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><del>- def _recv(self, txn, message, expected_action):
</del><ins>+ def _share_recv(self, txn, message, expected_action):
</ins><span class="cx"> """
</span><span class="cx"> Base behavior for sharer data access.
</span><span class="cx">
</span><del>- @param message: message arguments
</del><ins>+ @param txn: transaction
+ @type txn: L{CommonStoreTransaction}
+ @param message: JSON data to process
</ins><span class="cx"> @type message: C{dict}
</span><ins>+ @param expected_action: expected action in message
+ @type expected_action: C{str}
</ins><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> if message["action"] != expected_action:
</span><span class="lines">@@ -599,21 +607,21 @@
</span><span class="cx"> # Get a share
</span><span class="cx"> ownerHome = yield txn.homeWithUID(message["type"], message["owner"])
</span><span class="cx"> if ownerHome is None or ownerHome.external():
</span><del>- FailedCrossPodRequestError("Invalid owner UID specified")
</del><ins>+ raise FailedCrossPodRequestError("Invalid owner UID specified")
</ins><span class="cx">
</span><span class="cx"> shareeHome = yield txn.homeWithUID(message["type"], message["sharee"])
</span><span class="cx"> if shareeHome is None or not shareeHome.external():
</span><del>- FailedCrossPodRequestError("Invalid sharee UID specified")
</del><ins>+ raise FailedCrossPodRequestError("Invalid sharee UID specified")
</ins><span class="cx">
</span><span class="cx"> shareeView = yield shareeHome.childWithID(message["owner_id"])
</span><span class="cx"> if shareeView is None:
</span><del>- FailedCrossPodRequestError("Invalid shared resource specified")
</del><ins>+ raise FailedCrossPodRequestError("Invalid shared resource specified")
</ins><span class="cx">
</span><span class="cx"> resourceID = message.get("resource_id", None)
</span><span class="cx"> if resourceID is not None:
</span><span class="cx"> objectResource = yield shareeView.objectResourceWithID(resourceID)
</span><span class="cx"> if objectResource is None:
</span><del>- FailedCrossPodRequestError("Invalid owner shared object resource specified")
</del><ins>+ raise FailedCrossPodRequestError("Invalid owner shared object resource specified")
</ins><span class="cx"> else:
</span><span class="cx"> objectResource = None
</span><span class="cx">
</span><span class="lines">@@ -626,7 +634,7 @@
</span><span class="cx"> #
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><del>- def _simple_send(self, actionName, shareeView, objectResource=None, transform=None, args=None, kwargs=None):
</del><ins>+ def _simple_share_send(self, actionName, shareeView, objectResource=None, transform=None, args=None, kwargs=None):
</ins><span class="cx"> """
</span><span class="cx"> A simple send operation that returns a value.
</span><span class="cx">
</span><span class="lines">@@ -644,20 +652,20 @@
</span><span class="cx"> @type kwargs: C{dict}
</span><span class="cx"> """
</span><span class="cx">
</span><del>- action, recipient = self._send(actionName, shareeView, objectResource)
</del><ins>+ action, recipient = self._share_send(actionName, shareeView, objectResource)
</ins><span class="cx"> if args is not None:
</span><span class="cx"> action["arguments"] = args
</span><span class="cx"> if kwargs is not None:
</span><span class="cx"> action["keywords"] = kwargs
</span><span class="cx"> result = yield self.sendRequest(shareeView._txn, recipient, action)
</span><span class="cx"> if result["result"] == "ok":
</span><del>- returnValue(result["value"] if transform is None else transform(result["value"], shareeView, objectResource))
</del><ins>+ returnValue(result["value"] if transform is None else transform(result["value"]))
</ins><span class="cx"> elif result["result"] == "exception":
</span><span class="cx"> raise namedClass(result["class"])(result["message"])
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><del>- def _simple_recv(self, txn, actionName, message, method, onHomeChild=True, transform=None):
</del><ins>+ def _simple_share_recv(self, txn, actionName, message, method, onHomeChild=True, transform=None):
</ins><span class="cx"> """
</span><span class="cx"> A simple recv operation that returns a value. We also look for an optional set of arguments/keywords
</span><span class="cx"> and include those only if present.
</span><span class="lines">@@ -672,7 +680,7 @@
</span><span class="cx"> @type transform: C{callable}
</span><span class="cx"> """
</span><span class="cx">
</span><del>- shareeView, objectResource = yield self._recv(txn, message, actionName)
</del><ins>+ shareeView, objectResource = yield self._share_recv(txn, message, actionName)
</ins><span class="cx"> try:
</span><span class="cx"> if onHomeChild:
</span><span class="cx"> # Operate on the L{CommonHomeChild}
</span><span class="lines">@@ -693,7 +701,7 @@
</span><span class="cx">
</span><span class="cx"> returnValue({
</span><span class="cx"> "result": "ok",
</span><del>- "value": transform(value, shareeView, objectResource) if transform is not None else value,
</del><ins>+ "value": transform(value) if transform is not None else value,
</ins><span class="cx"> })
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -710,7 +718,7 @@
</span><span class="cx"> servertoserver,
</span><span class="cx"> event_details,
</span><span class="cx"> ):
</span><del>- action, recipient = self._send("freebusy", calresource)
</del><ins>+ action, recipient = self._share_send("freebusy", calresource)
</ins><span class="cx"> action["timerange"] = [timerange.start.getText(), timerange.end.getText()]
</span><span class="cx"> action["matchtotal"] = matchtotal
</span><span class="cx"> action["excludeuid"] = excludeuid
</span><span class="lines">@@ -735,7 +743,7 @@
</span><span class="cx"> @type message: C{dict}
</span><span class="cx"> """
</span><span class="cx">
</span><del>- shareeView, _ignore_objectResource = yield self._recv(txn, message, "freebusy")
</del><ins>+ shareeView, _ignore_objectResource = yield self._share_recv(txn, message, "freebusy")
</ins><span class="cx"> try:
</span><span class="cx"> # Operate on the L{CommonHomeChild}
</span><span class="cx"> fbinfo = [[], [], []]
</span><span class="lines">@@ -771,72 +779,269 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @staticmethod
</span><del>- def _to_tuple(value, shareeView, objectResource):
</del><ins>+ def _to_tuple(value):
</ins><span class="cx"> return tuple(value)
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @staticmethod
</span><del>- def _to_string(value, shareeView, objectResource):
</del><ins>+ def _to_string(value):
</ins><span class="cx"> return str(value)
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @staticmethod
</span><del>- def _to_externalize(value, shareeView, objectResource):
- if isinstance(value, shareeView._objectResourceClass):
- value = value.externalize()
- elif value is not None:
- value = [v.externalize() for v in value]
- return value
</del><ins>+ def _to_externalize(value):
+ return value.externalize() if value is not None else None
</ins><span class="cx">
</span><span class="cx">
</span><ins>+ @staticmethod
+ def _to_externalize_list(value):
+ return [v.externalize() for v in value] if value is not None else None
+
+
</ins><span class="cx"> @classmethod
</span><del>- def _make_simple_homechild_action(cls, action, method, transform_recv=None, transform_send=None):
</del><ins>+ def _make_share_homechild_action(cls, action, method, transform_recv=None, transform_send=None):
</ins><span class="cx"> setattr(
</span><span class="cx"> cls,
</span><span class="cx"> "send_{}".format(action),
</span><span class="cx"> lambda self, shareeView, *args, **kwargs:
</span><del>- self._simple_send(action, shareeView, transform=transform_send, args=args, kwargs=kwargs)
</del><ins>+ self._simple_share_send(action, shareeView, transform=transform_send, args=args, kwargs=kwargs)
</ins><span class="cx"> )
</span><span class="cx"> setattr(
</span><span class="cx"> cls,
</span><span class="cx"> "recv_{}".format(action),
</span><span class="cx"> lambda self, txn, message:
</span><del>- self._simple_recv(txn, action, message, method, transform=transform_recv)
</del><ins>+ self._simple_share_recv(txn, action, message, method, transform=transform_recv)
</ins><span class="cx"> )
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><del>- def _make_simple_object_action(cls, action, method, transform_recv=None, transform_send=None):
</del><ins>+ def _make_share_object_action(cls, action, method, transform_recv=None, transform_send=None):
</ins><span class="cx"> setattr(
</span><span class="cx"> cls,
</span><span class="cx"> "send_{}".format(action),
</span><span class="cx"> lambda self, shareeView, objectResource, *args, **kwargs:
</span><del>- self._simple_send(action, shareeView, objectResource, transform=transform_send, args=args, kwargs=kwargs)
</del><ins>+ self._simple_share_send(action, shareeView, objectResource, transform=transform_send, args=args, kwargs=kwargs)
</ins><span class="cx"> )
</span><span class="cx"> setattr(
</span><span class="cx"> cls,
</span><span class="cx"> "recv_{}".format(action),
</span><span class="cx"> lambda self, txn, message:
</span><del>- self._simple_recv(txn, action, message, method, onHomeChild=False, transform=transform_recv)
</del><ins>+ self._simple_share_recv(txn, action, message, method, onHomeChild=False, transform=transform_recv)
</ins><span class="cx"> )
</span><span class="cx">
</span><span class="cx">
</span><ins>+ #
+ # Migration data access related apis
+ #
+
+ def validMigrateRequest(self, destination_guid):
+ """
+ Verify that the specified GUIDs are valid for the request and return the
+ matching directory records.
+
+ @param destination_guid: GUID for the user to whom the request is being sent
+ @type destination_guid: C{str}
+
+ @return: C{tuple} of L{IStoreDirectoryRecord}
+ """
+
+ destination = self.store.directoryService().recordWithUID(destination_guid)
+ if destination is None:
+ raise DirectoryRecordNotFoundError("Cross-pod destination: {}".format(destination_guid))
+ if destination.thisServer():
+ raise FailedCrossPodRequestError("Cross-pod destination on this server: {}".format(destination_guid))
+
+ return destination
+
+
+ def _migrate_send(self, action, target):
+ """
+ Base behavior for an operation on a store resource. L{target} is the store object that is
+ the target of the remote API call. It can be an L{CommonHome}, an L{CommonHomeChild},
+ or an L{CommonObjectResource}.
+
+ @param action: remote API call to execute
+ @type action: C{str}
+ @param target: store object
+ @type target: L{CommonHome} or L{CommonHomeChild} or L{CommonObjectResource}
+ """
+
+ if target._storeType == "home":
+ home = target
+ child = None
+ resource = None
+ elif target._storeType == "child":
+ home = target.viewerHome()
+ child = target
+ resource = None
+ elif target._storeType == "resource":
+ home = target.parentCollection().viewerHome()
+ child = target.parentCollection()
+ resource = target
+
+ homeType = home._homeType
+ ownerUID = home.uid()
+ childID = child.external_id() if child else None
+ objectName = resource.name() if resource else None
+
+ recipient = self.validMigrateRequest(ownerUID)
+
+ result = {
+ "action": action,
+ "type": homeType,
+ "owner": ownerUID,
+ }
+
+ if childID:
+ result["child_id"] = childID
+ if objectName:
+ result["object_name"] = objectName
+
+ return result, recipient
+
+
+ @inlineCallbacks
+ def _migrate_recv(self, txn, message, expected_action):
+ """
+ Base behavior for migrate data access.
+
+ @param txn: transaction
+ @type txn: L{CommonStoreTransaction}
+ @param message: JSON data to process
+ @type message: C{dict}
+ @param expected_action: expected action in message
+ @type expected_action: C{str}
+ """
+
+ if message["action"] != expected_action:
+ raise FailedCrossPodRequestError("Wrong action '{}' for recv_{}".format(message["action"], expected_action))
+
+ # Get a home
+ ownerHome = yield txn.homeWithUID(message["type"], message["owner"])
+ if ownerHome is None or ownerHome.external():
+ raise FailedCrossPodRequestError("Invalid owner UID specified")
+
+ # Get child resource
+ if "child_id" in message:
+ child = yield ownerHome.childWithID(message["child_id"])
+ if child is None:
+ raise FailedCrossPodRequestError("Invalid child resource specified")
+
+ if "object_name" in message:
+ resource = yield child.objectResourceWithName(message["object_name"])
+ if resource is None:
+ raise FailedCrossPodRequestError("Invalid owner shared object resource specified")
+ target = resource
+ else:
+ target = child
+
+ else:
+ target = ownerHome
+
+ returnValue(target)
+
+
+ #
+ # Simple calls are ones where there is no argument and a single return value. We can simplify
+ # code generation for these by dynamically generating the appropriate class methods.
+ #
+
+ @inlineCallbacks
+ def _simple_migrate_send(self, actionName, target, transform=None, args=None, kwargs=None):
+ """
+ A simple send operation that returns a value.
+
+ @param actionName: name of the action.
+ @type actionName: C{str}
+ @param target: store object
+ @type target: L{CommonHome} or L{CommonHomeChild} or L{CommonObjectResource}
+ @param transform: a function used to convert the JSON result into return values.
+ @type transform: C{callable}
+ @param args: list of optional arguments.
+ @type args: C{list}
+ @param kwargs: optional keyword arguments.
+ @type kwargs: C{dict}
+ """
+
+ action, recipient = self._migrate_send(actionName, target)
+ if args is not None:
+ action["arguments"] = args
+ if kwargs is not None:
+ action["keywords"] = kwargs
+ result = yield self.sendRequest(target.transaction(), recipient, action)
+ if result["result"] == "ok":
+ returnValue(result["value"] if transform is None else transform(result["value"]))
+ elif result["result"] == "exception":
+ raise namedClass(result["class"])(result["message"])
+
+
+ @inlineCallbacks
+ def _simple_migrate_recv(self, txn, actionName, message, method, transform=None):
+ """
+ A simple recv operation that returns a value. We also look for an optional set of arguments/keywords
+ and include those only if present.
+
+ @param actionName: name of the action.
+ @type actionName: C{str}
+ @param message: message arguments
+ @type message: C{dict}
+ @param method: name of the method to execute on the shared resource to get the result.
+ @type method: C{str}
+ @param transform: method to call on returned JSON value to convert it to something useful.
+ @type transform: C{callable}
+ """
+
+ target = yield self._migrate_recv(txn, message, actionName)
+ try:
+ # Operate on the L{CommonHomeChild}
+ value = yield getattr(target, method)(*message.get("arguments", ()), **message.get("keywords", {}))
+ except Exception as e:
+ returnValue({
+ "result": "exception",
+ "class": ".".join((e.__class__.__module__, e.__class__.__name__,)),
+ "message": str(e),
+ })
+
+ returnValue({
+ "result": "ok",
+ "value": transform(value) if transform is not None else value,
+ })
+
+
+ @classmethod
+ def _make_migrate_action(cls, action, method, transform_recv=None, transform_send=None):
+ setattr(
+ cls,
+ "send_{}".format(action),
+ lambda self, target, *args, **kwargs:
+ self._simple_migrate_send(action, target, transform=transform_send, args=args, kwargs=kwargs)
+ )
+ setattr(
+ cls,
+ "recv_{}".format(action),
+ lambda self, txn, message:
+ self._simple_migrate_recv(txn, action, message, method, transform=transform_recv)
+ )
+
+# Migrate calls
+PoddingConduit._make_migrate_action("loadchildren", "loadChildren", transform_recv=PoddingConduit._to_externalize_list)
+
</ins><span class="cx"> # Calls on L{CommonHomeChild} objects
</span><del>-PoddingConduit._make_simple_homechild_action("countobjects", "countObjectResources")
-PoddingConduit._make_simple_homechild_action("listobjects", "listObjectResources")
-PoddingConduit._make_simple_homechild_action("resourceuidforname", "resourceUIDForName")
-PoddingConduit._make_simple_homechild_action("resourcenameforuid", "resourceNameForUID")
-PoddingConduit._make_simple_homechild_action("movehere", "moveObjectResourceHere")
-PoddingConduit._make_simple_homechild_action("moveaway", "moveObjectResourceAway")
-PoddingConduit._make_simple_homechild_action("synctoken", "syncToken")
-PoddingConduit._make_simple_homechild_action("resourcenamessincerevision", "resourceNamesSinceRevision", transform_send=PoddingConduit._to_tuple)
-PoddingConduit._make_simple_homechild_action("search", "search")
</del><ins>+PoddingConduit._make_share_homechild_action("countobjects", "countObjectResources")
+PoddingConduit._make_share_homechild_action("listobjects", "listObjectResources")
+PoddingConduit._make_share_homechild_action("resourceuidforname", "resourceUIDForName")
+PoddingConduit._make_share_homechild_action("resourcenameforuid", "resourceNameForUID")
+PoddingConduit._make_share_homechild_action("movehere", "moveObjectResourceHere")
+PoddingConduit._make_share_homechild_action("moveaway", "moveObjectResourceAway")
+PoddingConduit._make_share_homechild_action("synctoken", "syncToken")
+PoddingConduit._make_share_homechild_action("resourcenamessincerevision", "resourceNamesSinceRevision", transform_send=PoddingConduit._to_tuple)
+PoddingConduit._make_share_homechild_action("search", "search")
</ins><span class="cx">
</span><span class="cx"> # Calls on L{CommonObjectResource} objects
</span><del>-PoddingConduit._make_simple_object_action("loadallobjects", "loadAllObjects", transform_recv=PoddingConduit._to_externalize)
-PoddingConduit._make_simple_object_action("loadallobjectswithnames", "loadAllObjectsWithNames", transform_recv=PoddingConduit._to_externalize)
-PoddingConduit._make_simple_object_action("objectwith", "objectWith", transform_recv=PoddingConduit._to_externalize)
-PoddingConduit._make_simple_object_action("create", "create", transform_recv=PoddingConduit._to_externalize)
-PoddingConduit._make_simple_object_action("setcomponent", "setComponent")
-PoddingConduit._make_simple_object_action("component", "component", transform_recv=PoddingConduit._to_string)
-PoddingConduit._make_simple_object_action("remove", "remove")
</del><ins>+PoddingConduit._make_share_object_action("loadallobjects", "loadAllObjects", transform_recv=PoddingConduit._to_externalize_list)
+PoddingConduit._make_share_object_action("loadallobjectswithnames", "loadAllObjectsWithNames", transform_recv=PoddingConduit._to_externalize_list)
+PoddingConduit._make_share_object_action("objectwith", "objectWith", transform_recv=PoddingConduit._to_externalize)
+PoddingConduit._make_share_object_action("create", "create", transform_recv=PoddingConduit._to_externalize)
+PoddingConduit._make_share_object_action("setcomponent", "setComponent")
+PoddingConduit._make_share_object_action("component", "component", transform_recv=PoddingConduit._to_string)
+PoddingConduit._make_share_object_action("remove", "remove")
</ins></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastorepoddingmigrationpy"></a>
<div class="addfile"><h4>Added: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/migration.py (0 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/migration.py         (rev 0)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/migration.py        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -0,0 +1,335 @@
</span><ins>+##
+# Copyright (c) 2013-2014 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+from txdav.common.icommondatastore import CommonStoreError
+from twisted.internet.defer import inlineCallbacks
+from txdav.common.datastore.sql_tables import _HOME_STATUS_NORMAL, \
+ _MIGRATION_STATUS_MIGRATING, _MIGRATION_STATUS_MIGRATED, \
+ _MIGRATION_STATUS_NONE
+
+"""
+Support for cross-pod migration of users.
+
+Migration will be divided into four steps:
+
+1. Initial data-only sync.
+ This will sync the actual CommonObjectResource data and owned collection bind information
+ from the source pod to the destination pod. This must be able to execute whilst the
+ service is running. The sync'd data will be store on the destination pod under an
+ "inactive" home - i.e., one that can never be accessed via public store apis.
+
+2. Incremental data-only sync.
+ Will the update destination's previous sync'd state to match the current source state.
+ This can be run multiple times prior to step 3 - with the last one ideally right before
+ step 3 is done. This must be able to execute whilst the service is running.
+
+3. Migration commit.
+ 1. The service is shut down.
+ 2. A final incremental data-sync is done.
+ 3. A sync/update of sharing state is done on both the source and destination (this may
+ require creating external shares on the source for sharee's who remain on that pod,
+ and converting external shares to internal ones on the destination pod for sharee's
+ that are now on the same pod as the migrated owner).
+ 4. The source home is marked as inactive.
+ 5. The destination home is marked as active.
+ 6. The service is brought back up.
+
+4. Data clean-up.
+ The inactive home and associated owned data on the source pod is removed. This must be
+ able to execute whilst the service is running.
+
+X. A step that can be used any time prior to step 3 that stops the current migration. That
+ should simply involve removing the local inactive homes. There are no changes on the old
+ pod until step 3.
+
+During migration we will create an "inactive" set of home collections for the user being
+migrated on their new pod. An "inactive" home is one where the MIGRATION value is non-zero.
+There will be two types - one for use during migration, and one for use
+after migration, with the later being used to mark the data on the old pod as inactive
+prior to deletion (step 4).
+
+The migration process will be driven by calls on the new pod, which will then use cross-pod
+request to sync data and trigger state changes on the old pod.
+
+"""
+
+class UserAlreadyOnThisPod(CommonStoreError):
+ pass
+
+
+
+class UserAlreadyBeingMigrated(CommonStoreError):
+ pass
+
+
+
+class UserNotAlreadyBeingMigrated(CommonStoreError):
+ pass
+
+
+
+class MigrationController(object):
+ """
+ Class that manages each of the four steps. Will assume that this is being run on the new pod,
+ pulling data for a user whose directory record still points to the old pod for the first three
+ steps.
+
+ TODO: For now this only works with calendars.
+ """
+
+ def __init__(self, store, homeTypes=None):
+ self.store = store
+ self.homeTypes = homeTypes
+
+
+ def migrationTypes(self, txn):
+ return txn.homeTypes if self.homeTypes is None else self.homeTypes
+
+
+ @inlineCallbacks
+ def step1(self, user):
+ """
+ Carry out step 1 migration: full sync of user data to this pod.
+
+ @param user: GUID of the user to migrate
+ @type user: C{str}
+ """
+
+ # Validity checks
+ self.verifyNonLocalUser(user)
+
+ txn = self.store.newTransaction()
+
+ # Can't already have a migrating home prior to step 1
+ for home_type in self.migrationTypes(txn):
+ home = yield txn.homeWithUID(home_type, user, create=False, migration=_MIGRATION_STATUS_MIGRATING)
+ if home is not None:
+ raise UserAlreadyBeingMigrated("Inactive {} home exists".format(txn.homeClass(home_type)))
+ home = yield txn.notificationsWithUID(user, create=False, migration=_MIGRATION_STATUS_MIGRATING)
+ if home is not None:
+ raise UserAlreadyBeingMigrated("Inactive notifications exists")
+
+ # Can't already have a migrated homes prior to step 1 - i.e., a step 4 is
+ # still in progress on this pod. We can't migrate the user back until that is done.
+ for home_type in self.migrationTypes(txn):
+ home = yield txn.homeWithUID(home_type, user, create=False, migration=_MIGRATION_STATUS_MIGRATED)
+ if home is not None:
+ raise UserAlreadyBeingMigrated("Inactive {} home exists".format(txn.homeClass(home_type)))
+ home = yield txn.notificationsWithUID(user, create=False, migration=_MIGRATION_STATUS_MIGRATED)
+ if home is not None:
+ raise UserAlreadyBeingMigrated("Inactive notifications exists")
+
+ # Create the new invalid homes
+ for home_type in self.migrationTypes(txn):
+ home = yield txn.homeWithUID(home_type, user, create=True, migration=_MIGRATION_STATUS_MIGRATING)
+ home = yield txn.notificationsWithUID(user, create=True, migration=_MIGRATION_STATUS_MIGRATING)
+ yield txn.commit()
+
+ # Now we do a full sync - but in fact we can simply do the incremental sync,
+ # however that will get everything we need for step 1
+ yield self.syncHomes(user)
+
+
+ @inlineCallbacks
+ def step2(self, user):
+ """
+ Carry out step 2 migration: incremental sync of user data to this pod.
+
+ @param user: GUID of the user to migrate
+ @type user: C{str}
+ """
+
+ # Validity checks
+ self.verifyNonLocalUser(user)
+
+ # Must already have a migrating home prior to step 2
+ txn = self.store.newTransaction()
+ yield self.validMigratingUser(txn, user)
+ yield txn.commit()
+
+ # Now make each home sync
+ yield self.syncHomes(user)
+
+
+ @inlineCallbacks
+ def step3(self, user):
+ """
+ Carry out step 2 migration: incremental sync of user data to this pod.
+
+ @param user: GUID of the user to migrate
+ @type user: C{str}
+ """
+
+ # Validity checks
+ self.verifyNonLocalUser(user)
+
+ # Must already have a migrating home prior to step 3
+ txn = self.store.newTransaction()
+ yield self.validMigratingUser(txn, user)
+ yield txn.commit()
+
+ # Step 3.2 Now make each home sync
+ yield self.syncHomes(user, final=True)
+
+ # Up to this point everything has been non-destructive in that all the existing data is intact.
+ # After this point we will start changing the existing data and we really want those changes to
+ # be consistent across the pods (i.e., transactional). There shouldn't be a lot of SQL changes
+ # that need to be done so we can do this all in one transaction on the local pod. Need to figure
+ # out how to recover the remote pod, because we can't keep transactions open across multiple
+ # cross-pod calls.
+
+ # Step 3.3 Sync the sharing state
+ yield self.syncSharingState(user)
+
+ # Step 3.4 Deactivate user on old pod
+ yield self.deactivateUserOnPod(user)
+
+ # Step 3.5 Activate user on new pod
+ yield self.activateUserOnPod(user)
+
+
+ @inlineCallbacks
+ def syncHomes(self, user, final=False):
+ """
+ Synchronize the contents of the user home from their current pod to this one. If L{final} is C{False},
+ treat this as a step 1/step 2 full/incremental sync. If L{final} is C{True}, then this is the step 3
+ sync where we need to sync additional meta-data and the notifications collection content.
+
+ @param user: GUID of the user to migrate
+ @type user: C{str}
+ @param final: if C{True}, indicates if this is the step 3 sync
+ @type final: C{bool}
+ """
+
+ txn = self.store.newTransaction()
+ for home_type in self.migrationTypes(txn):
+ home = yield txn.homeWithUID(home_type, user, migration=_MIGRATION_STATUS_MIGRATING)
+ yield home.migrateToThisPod(user, final)
+
+ if final:
+ home = yield txn.notificationsWithUID(user, migration=_MIGRATION_STATUS_MIGRATING)
+ yield home.migrateToThisPod(user, final)
+ yield txn.commit()
+
+
+ @inlineCallbacks
+ def syncSharingState(self, user):
+ """
+ Synchronize the sharing state for the user being migrated. This involves:
+
+ 1. For each item being shared by this user, make a bind entry in the new Pod
+ for each share, adjusting for whether the sharee is now local or external
+ the pod. The old pod bind entry will be removed in step 4, no clean-up needed.
+
+ 2. For each item shared to this user, make a bind entry in the new pod, adjusting
+ for whether the sharer is local or external to the pod. If the sharer is local
+ to the old pod, adjust their bind row to point to the user on the new pod. If
+ the sharer is on some other pod (not the new or old one) then the existing bind
+ rows on that pod should still be valid - no change needed.
+
+ @param user: GUID of the user to migrate
+ @type user: C{str}
+ """
+
+ # TODO:
+ pass
+
+
+ @inlineCallbacks
+ def deactivateUserOnOtherPod(self, user):
+ """
+ Deactivate the user on their old Pod by adjusting migration state.
+ This method makes a cross-pod call to the other pod and calls its
+ L{deactivateUserOnThisPod} method.
+
+ @param user: GUID of the user to migrate
+ @type user: C{str}
+ """
+
+ # TODO: Do a cross-pod call to tell the other Pod to deactivate
+ pass
+
+
+ @inlineCallbacks
+ def deactivateUserOnThisPod(self, user):
+ """
+ Deactivate the user on their old Pod (this one). Called as a result of the cross-pod
+ call from the new pod L{deactivateUserOnOtherPod}.
+
+ @param user: GUID of the user to migrate
+ @type user: C{str}
+ """
+
+ txn = self.store.newTransaction()
+ for home_type in self.migrationTypes(txn):
+ home = yield txn.homeWithUID(home_type, user, migration=_MIGRATION_STATUS_NONE)
+ yield home.updateDetails(newMigration=_MIGRATION_STATUS_MIGRATED)
+
+ home = yield txn.notificationsWithUID(user, migration=_MIGRATION_STATUS_NONE)
+ yield home.updateDetails(newMigration=_MIGRATION_STATUS_MIGRATED)
+
+ yield txn.commit()
+
+
+ @inlineCallbacks
+ def activateUserOnThisPod(self, user):
+ """
+ Activate the user on their new Pod (this one) by adjusting migration state
+ on each home. We also need to force their state to normal to properly indicate
+ they are now hosted on this pod.
+
+ @param user: GUID of the user to migrate
+ @type user: C{str}
+ """
+
+ yield self.changeActivateState(user, _MIGRATION_STATUS_NONE, _HOME_STATUS_NORMAL)
+
+ txn = self.store.newTransaction()
+ for home_type in self.migrationTypes(txn):
+ home = yield txn.homeWithUID(home_type, user, migration=_MIGRATION_STATUS_MIGRATING)
+ yield home.updateDetails(newStatus=_HOME_STATUS_NORMAL, newMigration=_MIGRATION_STATUS_NONE)
+
+ home = yield txn.notificationsWithUID(user, migration=_MIGRATION_STATUS_MIGRATING)
+ yield home.updateDetails(newStatus=_HOME_STATUS_NORMAL, newMigration=_MIGRATION_STATUS_NONE)
+
+ yield txn.commit()
+
+
+ def verifyNonLocalUser(self, user):
+ record = self.store.directoryService().recordWithGUID(user)
+ if record.thisServer():
+ raise UserAlreadyOnThisPod("User being migrated is already hosted on this pod")
+
+
+ @inlineCallbacks
+ def validMigratingUser(self, txn, user):
+ """
+ Determine if the specified user is already setup to migrate (i.e., step 1 has been done).
+
+ @param txn: transaction to use
+ @type txn: L{txdav.common.datastore.sql.CommonStoreTransaction}
+ @param user: the user GUID to migrate
+ @type user: C{str}
+
+ @raise: L{UserNotAlreadyBeingMigrated} if invalid
+ """
+
+ for home_type in self.migrationTypes(txn):
+ home = yield txn.homeWithUID(home_type, user, create=False, migration=_MIGRATION_STATUS_MIGRATING)
+ if home is None:
+ raise UserNotAlreadyBeingMigrated("Inactive {} home does not exist".format(txn.homeClass(home_type)))
+ home = yield txn.notificationsWithUID(user, create=False, migration=_MIGRATION_STATUS_MIGRATING)
+ if home is None:
+ raise UserNotAlreadyBeingMigrated("Inactive notifications does not exist")
</ins></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastorepoddingtesttest_conduitpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/test_conduit.py (12417 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/test_conduit.py        2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/test_conduit.py        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -41,7 +41,8 @@
</span><span class="cx"> from txdav.common.datastore.podding.resource import ConduitResource
</span><span class="cx"> from txdav.common.datastore.podding.test.util import MultiStoreConduitTest, \
</span><span class="cx"> FakeConduitRequest
</span><del>-from txdav.common.datastore.sql_tables import _BIND_STATUS_ACCEPTED
</del><ins>+from txdav.common.datastore.sql_tables import _BIND_STATUS_ACCEPTED, \
+ _MIGRATION_STATUS_MIGRATING
</ins><span class="cx"> from txdav.common.datastore.test.util import populateCalendarsFrom, CommonCommonTests
</span><span class="cx"> from txdav.common.icommondatastore import ObjectResourceNameAlreadyExistsError, \
</span><span class="cx"> ObjectResourceNameNotAllowedError
</span><span class="lines">@@ -123,19 +124,19 @@
</span><span class="cx"> }
</span><span class="cx">
</span><span class="cx">
</span><del>- def test_validRequst(self):
</del><ins>+ def test_validShareRequest(self):
</ins><span class="cx"> """
</span><span class="cx"> Cross-pod request fails when there is no shared secret header present.
</span><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> conduit = PoddingConduit(self.storeUnderTest())
</span><del>- r1, r2 = conduit.validRequst("user01", "puser02")
</del><ins>+ r1, r2 = conduit.validShareRequest("user01", "puser02")
</ins><span class="cx"> self.assertTrue(r1 is not None)
</span><span class="cx"> self.assertTrue(r2 is not None)
</span><span class="cx">
</span><del>- self.assertRaises(DirectoryRecordNotFoundError, conduit.validRequst, "bogus01", "user02")
- self.assertRaises(DirectoryRecordNotFoundError, conduit.validRequst, "user01", "bogus02")
- self.assertRaises(FailedCrossPodRequestError, conduit.validRequst, "user01", "user02")
</del><ins>+ self.assertRaises(DirectoryRecordNotFoundError, conduit.validShareRequest, "bogus01", "user02")
+ self.assertRaises(DirectoryRecordNotFoundError, conduit.validShareRequest, "user01", "bogus02")
+ self.assertRaises(FailedCrossPodRequestError, conduit.validShareRequest, "user01", "user02")
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -145,7 +146,7 @@
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def send_fake(self, txn, ownerUID, shareeUID):
</span><del>- _ignore_owner, sharee = self.validRequst(ownerUID, shareeUID)
</del><ins>+ _ignore_owner, sharee = self.validShareRequest(ownerUID, shareeUID)
</ins><span class="cx"> action = {
</span><span class="cx"> "action": "fake",
</span><span class="cx"> "echo": "bravo"
</span><span class="lines">@@ -202,9 +203,9 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx">
</span><del>-class TestConduitAPI(MultiStoreConduitTest):
</del><ins>+class TestConduitAPIForSharing(MultiStoreConduitTest):
</ins><span class="cx"> """
</span><del>- Test that the conduit api works.
</del><ins>+ Test that the conduit api works for sharing.
</ins><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> nowYear = {"now": DateTime.getToday().getYear()}
</span><span class="lines">@@ -1081,3 +1082,25 @@
</span><span class="cx"> attachment = yield ManagedAttachment.load(self.transactionUnderTest(), resourceID, managedID)
</span><span class="cx"> self.assertTrue(attachment is None)
</span><span class="cx"> yield self.commit()
</span><ins>+
+
+
+class TestConduitAPIForMigration(MultiStoreConduitTest):
+ """
+ Test that the conduit api works for migration.
+ """
+
+ @inlineCallbacks
+ def test_loadallobjects(self):
+ """
+ Test that action=loadallobjects works.
+ """
+
+ yield self.homeUnderTest(txn=self.newOtherTransaction(), name="puser01", create=True)
+ yield self.otherCommit()
+
+ remote_home = yield self.homeUnderTest(name="puser01", create=True)
+ remote_home._migration = _MIGRATION_STATUS_MIGRATING
+
+ results = yield remote_home.loadChildren()
+ self.assertEqual(set([result.name() for result in results]), set(("calendar", "tasks", "inbox",)))
</ins></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastorepoddingtesttest_migrationpy"></a>
<div class="addfile"><h4>Added: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/test_migration.py (0 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/test_migration.py         (rev 0)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/test_migration.py        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -0,0 +1,200 @@
</span><ins>+##
+# Copyright (c) 2005-2014 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twisted.internet.defer import inlineCallbacks
+
+from txdav.common.datastore.podding.test.util import MultiStoreConduitTest
+from txdav.common.datastore.sql_tables import _MIGRATION_STATUS_MIGRATING
+from txdav.common.datastore.podding.migration import MigrationController, \
+ UserAlreadyBeingMigrated
+from txdav.common.datastore.sql import ECALENDARTYPE
+
+class TestCalendarMigration(MultiStoreConduitTest):
+ """
+ Test that the migration api works for migration.
+ """
+
+ @inlineCallbacks
+ def _provision_remote(self):
+ """
+ Provision the user account on the remote pod.
+ """
+ yield self.homeUnderTest(txn=self.otherTransactionUnderTest(), name="puser01", create=True)
+ yield self.addressbookHomeUnderTest(txn=self.otherTransactionUnderTest(), name="puser01", create=True)
+ yield self.otherCommit()
+
+
+ @inlineCallbacks
+ def test_step1_ok(self):
+ """
+ Test that step1 works.
+ """
+
+ yield self._provision_remote()
+
+ migrator = MigrationController(self.storeUnderTest(), homeTypes=(ECALENDARTYPE,))
+ yield migrator.step1("puser01")
+
+ # Verify local home is not visible to normal api calls
+ local_home = yield self.homeUnderTest(name="puser01")
+ self.assertTrue(local_home is not None)
+ self.assertTrue(local_home.external())
+ yield self.commit()
+
+ # Verify local migrating items exist
+ local_home = yield self.homeUnderTest(name="puser01", migration=_MIGRATION_STATUS_MIGRATING)
+ self.assertTrue(local_home is not None)
+ self.assertTrue(not local_home.external())
+
+ results = yield local_home.loadChildren()
+ self.assertEqual(set([result.name() for result in results]), set(("calendar", "tasks", "inbox",)))
+
+
+ @inlineCallbacks
+ def test_step1_twice(self):
+ """
+ Test that step1 fails a second time.
+ """
+
+ yield self._provision_remote()
+
+ migrator = MigrationController(self.storeUnderTest(), homeTypes=(ECALENDARTYPE,))
+ yield migrator.step1("puser01")
+
+ # Verify local migrating items exist
+ local_home = yield self.homeUnderTest(name="puser01", migration=_MIGRATION_STATUS_MIGRATING)
+ self.assertTrue(local_home is not None)
+ self.assertTrue(not local_home.external())
+ yield self.commit()
+
+ migrator = MigrationController(self.storeUnderTest(), homeTypes=(ECALENDARTYPE,))
+ yield self.failUnlessFailure(migrator.step1("puser01"), UserAlreadyBeingMigrated)
+
+
+ @inlineCallbacks
+ def test_step2_no_change(self):
+ """
+ Test that step1 fails a second time.
+ """
+
+ yield self._provision_remote()
+
+ migrator = MigrationController(self.storeUnderTest(), homeTypes=(ECALENDARTYPE,))
+ yield migrator.step1("puser01")
+
+ # Verify local migrating items exist
+ local_home = yield self.homeUnderTest(name="puser01", migration=_MIGRATION_STATUS_MIGRATING)
+ results = yield local_home.loadChildren()
+ self.assertEqual(set([result.name() for result in results]), set(("calendar", "tasks", "inbox",)))
+ rids = dict([(result.id(), result.external_id()) for result in results])
+
+ migrator = MigrationController(self.storeUnderTest(), homeTypes=(ECALENDARTYPE,))
+ yield migrator.step2("puser01")
+
+ # Verify local migrating items exist
+ local_home = yield self.homeUnderTest(name="puser01", migration=_MIGRATION_STATUS_MIGRATING)
+ results = yield local_home.loadChildren()
+ self.assertEqual(set([result.name() for result in results]), set(("calendar", "tasks", "inbox",)))
+ rids2 = dict([(result.id(), result.external_id()) for result in results])
+ self.assertEqual(rids, rids2)
+
+
+ @inlineCallbacks
+ def test_step2_changes(self):
+ """
+ Test that step1 fails a second time.
+ """
+
+ yield self._provision_remote()
+
+ migrator = MigrationController(self.storeUnderTest(), homeTypes=(ECALENDARTYPE,))
+ yield migrator.step1("puser01")
+
+ local_home = yield self.homeUnderTest(name="puser01", migration=_MIGRATION_STATUS_MIGRATING)
+ results = yield local_home.loadChildren()
+ self.assertEqual(set([result.name() for result in results]), set(("calendar", "tasks", "inbox",)))
+ rids = dict([(result.id(), result.external_id()) for result in results])
+
+ # Create new calendar
+ txn = self.otherTransactionUnderTest()
+ remote_home = yield self.homeUnderTest(txn, name="puser01")
+ new_calendar = yield remote_home.createCalendarWithName("new_calendar")
+ external_id = new_calendar.id()
+ yield self.otherCommit()
+
+ migrator = MigrationController(self.storeUnderTest(), homeTypes=(ECALENDARTYPE,))
+ yield migrator.step2("puser01")
+
+ local_home = yield self.homeUnderTest(name="puser01", migration=_MIGRATION_STATUS_MIGRATING)
+ new_calendar = yield local_home.calendarWithName("new_calendar")
+ rids[new_calendar.id()] = external_id
+
+ results = yield local_home.loadChildren()
+ self.assertEqual(set([result.name() for result in results]), set(("new_calendar", "calendar", "tasks", "inbox",)))
+ rids2 = dict([(result.id(), result.external_id()) for result in results])
+ self.assertEqual(rids, rids2)
+
+ # Remove old calendar
+ txn = self.otherTransactionUnderTest()
+ remote_home = yield self.homeUnderTest(txn, name="puser01")
+ old_calendar = yield remote_home.calendarWithName("calendar")
+ external_id = old_calendar.id()
+ yield old_calendar.remove()
+ del rids[dict([(v, k) for k, v in rids.items()])[external_id]]
+ yield self.otherCommit()
+
+ migrator = MigrationController(self.storeUnderTest(), homeTypes=(ECALENDARTYPE,))
+ yield migrator.step2("puser01")
+
+ local_home = yield self.homeUnderTest(name="puser01", migration=_MIGRATION_STATUS_MIGRATING)
+ results = yield local_home.loadChildren()
+ self.assertEqual(set([result.name() for result in results]), set(("new_calendar", "tasks", "inbox",)))
+ rids2 = dict([(result.id(), result.external_id()) for result in results])
+ self.assertEqual(rids, rids2)
+ tasks = yield local_home.calendarWithName("tasks")
+ self.assertFalse(tasks.isUsedForFreeBusy())
+
+ # Add, remove, rename, prop change
+ txn = self.otherTransactionUnderTest()
+ remote_home = yield self.homeUnderTest(txn, name="puser01")
+
+ new_calendar = yield remote_home.createCalendarWithName("new_calendar2")
+ external_id2 = new_calendar.id()
+
+ old_calendar = yield remote_home.calendarWithName("new_calendar")
+ external_id = old_calendar.id()
+ yield old_calendar.remove()
+ del rids[dict([(v, k) for k, v in rids.items()])[external_id]]
+
+ tasks = yield remote_home.calendarWithName("tasks")
+ yield tasks.rename("todos")
+ yield tasks.setUsedForFreeBusy(True)
+
+ yield self.otherCommit()
+
+ migrator = MigrationController(self.storeUnderTest(), homeTypes=(ECALENDARTYPE,))
+ yield migrator.step2("puser01")
+
+ local_home = yield self.homeUnderTest(name="puser01", migration=_MIGRATION_STATUS_MIGRATING)
+ new_calendar = yield local_home.calendarWithName("new_calendar2")
+ rids[new_calendar.id()] = external_id2
+
+ results = yield local_home.loadChildren()
+ self.assertEqual(set([result.name() for result in results]), set(("new_calendar2", "todos", "inbox",)))
+ rids2 = dict([(result.id(), result.external_id()) for result in results])
+ self.assertEqual(rids, rids2)
+ tasks = yield local_home.calendarWithName("todos")
+ self.assertTrue(tasks.isUsedForFreeBusy())
</ins></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastorepoddingtestutilpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/util.py (12417 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/util.py        2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/podding/test/util.py        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -172,6 +172,7 @@
</span><span class="cx"> store = yield builder.buildStore(self, self.notifierFactory, directory)
</span><span class="cx"> store.queryCacher = None # Cannot use query caching
</span><span class="cx"> store.conduit = self.makeConduit(store)
</span><ins>+ store.name = "internal" if internal else "external"
</ins><span class="cx"> returnValue(store)
</span><span class="cx">
</span><span class="cx">
</span></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresqlpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql.py (12417 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql.py        2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql.py        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -64,7 +64,9 @@
</span><span class="cx"> from txdav.common.datastore.sql_tables import _BIND_MODE_OWN, \
</span><span class="cx"> _BIND_STATUS_ACCEPTED, _BIND_STATUS_DECLINED, _BIND_STATUS_INVALID, \
</span><span class="cx"> _BIND_STATUS_INVITED, _BIND_MODE_DIRECT, _BIND_STATUS_DELETED, \
</span><del>- _BIND_MODE_INDIRECT, _HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL
</del><ins>+ _BIND_MODE_INDIRECT, _HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL, \
+ _MIGRATION_STATUS_NONE, _MIGRATION_STATUS_MIGRATING, \
+ _MIGRATION_STATUS_MIGRATED
</ins><span class="cx"> from txdav.common.datastore.sql_tables import schema, splitSQLString
</span><span class="cx"> from txdav.common.icommondatastore import ConcurrentModification, \
</span><span class="cx"> RecordNotAllowedError, ExternalShareFailed, ShareNotAllowed, \
</span><span class="lines">@@ -473,8 +475,23 @@
</span><span class="cx"> enableCalendars, enableAddressBooks,
</span><span class="cx"> notifierFactories, label, migrating=False, disableCache=False):
</span><span class="cx"> self._store = store
</span><del>- self._calendarHomes = {}
- self._addressbookHomes = {}
</del><ins>+ self._homes = {
+ _MIGRATION_STATUS_NONE: {
+ ECALENDARTYPE : {},
+ EADDRESSBOOKTYPE: {},
+ ENOTIFICATIONTYPE: {}
+ },
+ _MIGRATION_STATUS_MIGRATING: {
+ ECALENDARTYPE : {},
+ EADDRESSBOOKTYPE: {},
+ ENOTIFICATIONTYPE: {}
+ },
+ _MIGRATION_STATUS_MIGRATED: {
+ ECALENDARTYPE : {},
+ EADDRESSBOOKTYPE: {},
+ ENOTIFICATIONTYPE: {}
+ },
+ }
</ins><span class="cx"> self._notificationHomes = {}
</span><span class="cx"> self._notifierFactories = notifierFactories
</span><span class="cx"> self._notifiedAlready = set()
</span><span class="lines">@@ -583,16 +600,27 @@
</span><span class="cx"> ).on(self)
</span><span class="cx">
</span><span class="cx">
</span><del>- def _determineMemo(self, storeType, uid, create=False): #@UnusedVariable
</del><ins>+ def _determineMemo(self, storeType, uid, create=False, migration=_MIGRATION_STATUS_NONE): #@UnusedVariable
</ins><span class="cx"> """
</span><span class="cx"> Determine the memo dictionary to use for homeWithUID.
</span><span class="cx"> """
</span><del>- if storeType == ECALENDARTYPE:
- return self._calendarHomes
- else:
- return self._addressbookHomes
</del><ins>+ return self._homes[migration][storeType]
</ins><span class="cx">
</span><span class="cx">
</span><ins>+ def homeTypes(self):
+ """
+ Return list of all supported home types.
+ """
+ return self._homeClass.keys()
+
+
+ def homeClass(self, storeType):
+ """
+ Return list of all supported home types.
+ """
+ return self._homeClass.get(storeType)
+
+
</ins><span class="cx"> @inlineCallbacks
</span><span class="cx"> def homes(self, storeType):
</span><span class="cx"> """
</span><span class="lines">@@ -609,19 +637,19 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @memoizedKey("uid", _determineMemo)
</span><del>- def homeWithUID(self, storeType, uid, create=False):
- if storeType not in (ECALENDARTYPE, EADDRESSBOOKTYPE):
</del><ins>+ def homeWithUID(self, storeType, uid, create=False, migration=_MIGRATION_STATUS_NONE):
+ if storeType not in self._homeClass:
</ins><span class="cx"> raise RuntimeError("Unknown home type.")
</span><span class="cx">
</span><del>- return self._homeClass[storeType].homeWithUID(self, uid, create)
</del><ins>+ return self._homeClass[storeType].homeWithUID(self, uid, create, migration)
</ins><span class="cx">
</span><span class="cx">
</span><del>- def calendarHomeWithUID(self, uid, create=False):
- return self.homeWithUID(ECALENDARTYPE, uid, create=create)
</del><ins>+ def calendarHomeWithUID(self, uid, create=False, migration=_MIGRATION_STATUS_NONE):
+ return self.homeWithUID(ECALENDARTYPE, uid, create=create, migration=migration)
</ins><span class="cx">
</span><span class="cx">
</span><del>- def addressbookHomeWithUID(self, uid, create=False):
- return self.homeWithUID(EADDRESSBOOKTYPE, uid, create=create)
</del><ins>+ def addressbookHomeWithUID(self, uid, create=False, migration=_MIGRATION_STATUS_NONE):
+ return self.homeWithUID(EADDRESSBOOKTYPE, uid, create=create, migration=migration)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="lines">@@ -629,8 +657,7 @@
</span><span class="cx"> """
</span><span class="cx"> Load a calendar or addressbook home by its integer resource ID.
</span><span class="cx"> """
</span><del>- uid = (yield self._homeClass[storeType]
- .homeUIDWithResourceID(self, rid))
</del><ins>+ uid = (yield self._homeClass[storeType].homeUIDWithResourceID(self, rid))
</ins><span class="cx"> if uid:
</span><span class="cx"> result = (yield self.homeWithUID(storeType, uid, create))
</span><span class="cx"> else:
</span><span class="lines">@@ -646,12 +673,19 @@
</span><span class="cx"> return self.homeWithResourceID(EADDRESSBOOKTYPE, rid)
</span><span class="cx">
</span><span class="cx">
</span><del>- @memoizedKey("uid", "_notificationHomes")
- def notificationsWithUID(self, uid, create=True):
</del><ins>+ def _determineNotificationMemo(self, uid, create=False, migration=_MIGRATION_STATUS_NONE): #@UnusedVariable
</ins><span class="cx"> """
</span><ins>+ Determine the memo dictionary to use for homeWithUID.
+ """
+ return self._homes[migration][ENOTIFICATIONTYPE]
+
+
+ @memoizedKey("uid", _determineNotificationMemo)
+ def notificationsWithUID(self, uid, create=True, migration=_MIGRATION_STATUS_NONE):
+ """
</ins><span class="cx"> Implement notificationsWithUID.
</span><span class="cx"> """
</span><del>- return NotificationCollection.notificationsWithUID(self, uid, create)
</del><ins>+ return NotificationCollection.notificationsWithUID(self, uid, create, migration)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @memoizedKey("rid", "_notificationHomes")
</span><span class="lines">@@ -1590,6 +1624,8 @@
</span><span class="cx"> class CommonHome(SharingHomeMixIn):
</span><span class="cx"> log = Logger()
</span><span class="cx">
</span><ins>+ _storeType = "home"
+
</ins><span class="cx"> # All these need to be initialized by derived classes for each store type
</span><span class="cx"> _homeType = None
</span><span class="cx"> _homeTable = None
</span><span class="lines">@@ -1602,11 +1638,9 @@
</span><span class="cx"> _dataVersionKey = None
</span><span class="cx"> _dataVersionValue = None
</span><span class="cx">
</span><del>- _cacher = None # Initialize in derived classes
-
</del><span class="cx"> @classmethod
</span><span class="cx"> @inlineCallbacks
</span><del>- def makeClass(cls, transaction, ownerUID, no_cache=False):
</del><ins>+ def makeClass(cls, transaction, homeData, metadataData):
</ins><span class="cx"> """
</span><span class="cx"> Build the actual home class taking into account the possibility that we might need to
</span><span class="cx"> switch in the external version of the class.
</span><span class="lines">@@ -1615,19 +1649,100 @@
</span><span class="cx"> @type transaction: L{CommonStoreTransaction}
</span><span class="cx"> @param ownerUID: owner UID of home to load
</span><span class="cx"> @type ownerUID: C{str}
</span><ins>+ @param migration: migration status for home to load
+ @type ownerUID: C{int}
</ins><span class="cx"> @param no_cache: should cached query be used
</span><span class="cx"> @type no_cache: C{bool}
</span><span class="cx"> """
</span><del>- home = cls(transaction, ownerUID)
- actualHome = yield home.initFromStore(no_cache)
- returnValue(actualHome)
</del><span class="cx">
</span><ins>+ resourceID = homeData[cls.homeColumns().index(cls._homeSchema.RESOURCE_ID)]
+ ownerUID = homeData[cls.homeColumns().index(cls._homeSchema.OWNER_UID)]
+ status = homeData[cls.homeColumns().index(cls._homeSchema.STATUS)]
+ migration = homeData[cls.homeColumns().index(cls._homeSchema.MIGRATION)]
</ins><span class="cx">
</span><del>- def __init__(self, transaction, ownerUID):
</del><ins>+ # If the status is external we need to convert this object to a CommonHomeExternal class which will
+ # have the right behavior for non-hosted external users.
+ if status == _HOME_STATUS_EXTERNAL and migration == _MIGRATION_STATUS_NONE:
+ home = cls._externalClass(transaction, ownerUID, resourceID)
+ else:
+ home = cls(transaction, ownerUID, migration=migration)
+
+ for attr, value in zip(cls.homeAttributes(), homeData):
+ setattr(home, attr, value)
+
+ for attr, value in zip(cls.metadataAttributes(), metadataData):
+ setattr(home, attr, value)
+
+ yield home._loadPropertyStore()
+
+ for factory_type, factory in transaction._notifierFactories.items():
+ home.addNotifier(factory_type, factory.newNotifier(home))
+
+ yield home.made()
+
+ returnValue(home)
+
+
+ @classmethod
+ @inlineCallbacks
+ def _getDBData(cls, transaction, ownerUID, migration=_MIGRATION_STATUS_NONE, no_cache=False):
+ """
+ Given a set of identifying information, load the metadataData rows for the object.
+
+ @param transaction: transaction
+ @type transaction: L{CommonStoreTransaction}
+ @param ownerUID: owner UID of home to load
+ @type ownerUID: C{str}
+ @param migration: migration status for home to load
+ @type ownerUID: C{int}
+ @param no_cache: should cached query be used
+ @type no_cache: C{bool}
+ """
+
+ queryCacher = transaction._queryCacher
+ homeData = None
+ if queryCacher:
+ cacheKey = queryCacher.keyForHomeData(cls._homeType, ownerUID, migration)
+ homeData = yield queryCacher.get(cacheKey)
+
+ if homeData is None:
+ homeData = yield cls._homeColumnsFromOwnerQuery.on(transaction, ownerUID=ownerUID, migration=migration)
+ if homeData:
+ homeData = homeData[0]
+ if not no_cache and queryCacher:
+ yield queryCacher.set(cacheKey, homeData)
+
+ if not homeData:
+ returnValue(None)
+
+ resourceID = homeData[cls.homeColumns().index(cls._homeSchema.RESOURCE_ID)]
+
+ metadataData = None
+ if queryCacher:
+ # Get cached copy
+ cacheKey = queryCacher.keyForHomeMetaData(resourceID)
+ metadataData = yield queryCacher.get(cacheKey)
+
+ if metadataData is None:
+ # Don't have a cached copy
+ metadataData = (yield cls._metaDataQuery.on(transaction, resourceID=resourceID))
+ if metadataData:
+ metadataData = metadataData[0]
+ else:
+ metadataData = None
+ if queryCacher:
+ # Cache the metadataData
+ yield queryCacher.setAfterCommit(transaction, cacheKey, metadataData)
+
+ returnValue((homeData, metadataData))
+
+
+ def __init__(self, transaction, ownerUID, migration=_MIGRATION_STATUS_NONE):
</ins><span class="cx"> self._txn = transaction
</span><span class="cx"> self._ownerUID = ownerUID
</span><span class="cx"> self._resourceID = None
</span><span class="cx"> self._status = _HOME_STATUS_NORMAL
</span><ins>+ self._migration = migration
</ins><span class="cx"> self._dataVersion = None
</span><span class="cx"> self._childrenLoaded = False
</span><span class="cx"> self._children = {}
</span><span class="lines">@@ -1636,8 +1751,6 @@
</span><span class="cx"> self._created = None
</span><span class="cx"> self._modified = None
</span><span class="cx"> self._syncTokenRevision = None
</span><del>- if transaction._disableCache:
- self._cacher = _EmptyCacher()
</del><span class="cx">
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><span class="lines">@@ -1650,6 +1763,13 @@
</span><span class="cx"> CommonStoreTransaction._homeClass[cls._homeType] = cls
</span><span class="cx">
</span><span class="cx">
</span><ins>+ def made(self):
+ """
+ Called after class has been built. This is here to allow sub-classes to do their own initialization stuff.
+ """
+ return succeed(None)
+
+
</ins><span class="cx"> def quotaAllowedBytes(self):
</span><span class="cx"> return self._txn.store().quota
</span><span class="cx">
</span><span class="lines">@@ -1660,7 +1780,8 @@
</span><span class="cx"> return Select(
</span><span class="cx"> cls.homeColumns(),
</span><span class="cx"> From=home,
</span><del>- Where=home.OWNER_UID == Parameter("ownerUID")
</del><ins>+ Where=(home.OWNER_UID == Parameter("ownerUID")).And(
+ home.MIGRATION == Parameter("migration"))
</ins><span class="cx"> )
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -1692,6 +1813,7 @@
</span><span class="cx"> cls._homeSchema.RESOURCE_ID,
</span><span class="cx"> cls._homeSchema.OWNER_UID,
</span><span class="cx"> cls._homeSchema.STATUS,
</span><ins>+ cls._homeSchema.MIGRATION,
</ins><span class="cx"> )
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -1707,6 +1829,7 @@
</span><span class="cx"> "_resourceID",
</span><span class="cx"> "_ownerUID",
</span><span class="cx"> "_status",
</span><ins>+ "_migration",
</ins><span class="cx"> )
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -1742,66 +1865,6 @@
</span><span class="cx"> )
</span><span class="cx">
</span><span class="cx">
</span><del>- @inlineCallbacks
- def initFromStore(self, no_cache=False):
- """
- Initialize this object from the store. We read in and cache all the
- extra meta-data from the DB to avoid having to do DB queries for those
- individually later.
- """
- result = yield self._cacher.get(self._ownerUID)
- if result is None:
- result = yield self._homeColumnsFromOwnerQuery.on(self._txn, ownerUID=self._ownerUID)
- if result:
- result = result[0]
- if not no_cache:
- yield self._cacher.set(self._ownerUID, result)
-
- if result:
- for attr, value in zip(self.homeAttributes(), result):
- setattr(self, attr, value)
-
- # STOP! If the status is external we need to convert this object to a CommonHomeExternal class which will
- # have the right behavior for non-hosted external users.
- if self._status == _HOME_STATUS_EXTERNAL:
- actualHome = self._externalClass(self._txn, self._ownerUID, self._resourceID)
- else:
- actualHome = self
- yield actualHome.initMetaDataFromStore()
- yield actualHome._loadPropertyStore()
-
- for factory_type, factory in self._txn._notifierFactories.items():
- actualHome.addNotifier(factory_type, factory.newNotifier(actualHome))
-
- returnValue(actualHome)
- else:
- returnValue(None)
-
-
- @inlineCallbacks
- def initMetaDataFromStore(self):
- """
- Load up the metadata and property store
- """
-
- queryCacher = self._txn._queryCacher
- if queryCacher:
- # Get cached copy
- cacheKey = queryCacher.keyForHomeMetaData(self._resourceID)
- data = yield queryCacher.get(cacheKey)
- else:
- data = None
- if data is None:
- # Don't have a cached copy
- data = (yield self._metaDataQuery.on(self._txn, resourceID=self._resourceID))[0]
- if queryCacher:
- # Cache the data
- yield queryCacher.setAfterCommit(self._txn, cacheKey, data)
-
- for attr, value in zip(self.metadataAttributes(), data):
- setattr(self, attr, value)
-
-
</del><span class="cx"> @classmethod
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def listHomes(cls, txn):
</span><span class="lines">@@ -1810,9 +1873,11 @@
</span><span class="cx">
</span><span class="cx"> @return: an iterable of C{str}s.
</span><span class="cx"> """
</span><ins>+ ch = cls._homeSchema
</ins><span class="cx"> rows = yield Select(
</span><del>- [cls._homeSchema.OWNER_UID],
</del><ins>+ [ch.OWNER_UID],
</ins><span class="cx"> From=cls._homeSchema,
</span><ins>+ Where=(ch.MIGRATION == _MIGRATION_STATUS_NONE)
</ins><span class="cx"> ).on(txn)
</span><span class="cx"> rids = [row[0] for row in rows]
</span><span class="cx"> returnValue(rids)
</span><span class="lines">@@ -1820,9 +1885,11 @@
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><span class="cx"> @inlineCallbacks
</span><del>- def homeWithUID(cls, txn, uid, create=False):
- homeObject = yield cls.makeClass(txn, uid)
- if homeObject is not None:
</del><ins>+ def homeWithUID(cls, txn, uid, create=False, migration=_MIGRATION_STATUS_NONE):
+ dbData = yield cls._getDBData(txn, uid, migration)
+ if dbData is not None:
+ homeData, metadataData = dbData
+ homeObject = yield cls.makeClass(txn, homeData, metadataData)
</ins><span class="cx"> returnValue(homeObject)
</span><span class="cx"> else:
</span><span class="cx"> if not create:
</span><span class="lines">@@ -1847,6 +1914,7 @@
</span><span class="cx"> {
</span><span class="cx"> cls._homeSchema.OWNER_UID: uid,
</span><span class="cx"> cls._homeSchema.STATUS: state,
</span><ins>+ cls._homeSchema.MIGRATION: migration,
</ins><span class="cx"> cls._homeSchema.DATAVERSION: cls._dataVersionValue,
</span><span class="cx"> },
</span><span class="cx"> Return=cls._homeSchema.RESOURCE_ID
</span><span class="lines">@@ -1856,8 +1924,10 @@
</span><span class="cx"> yield savepoint.rollback(txn)
</span><span class="cx">
</span><span class="cx"> # Retry the query - row may exist now, if not re-raise
</span><del>- homeObject = yield cls.makeClass(txn, uid)
- if homeObject:
</del><ins>+ dbData = yield cls._getDBData(txn, uid, migration)
+ if dbData:
+ homeData, metadataData = dbData
+ homeObject = yield cls.makeClass(txn, homeData, metadataData)
</ins><span class="cx"> returnValue(homeObject)
</span><span class="cx"> else:
</span><span class="cx"> raise
</span><span class="lines">@@ -1867,8 +1937,11 @@
</span><span class="cx"> # Note that we must not cache the owner_uid->resource_id
</span><span class="cx"> # mapping in _cacher when creating as we don't want that to appear
</span><span class="cx"> # until AFTER the commit
</span><del>- home = yield cls.makeClass(txn, uid, no_cache=True)
- yield home.createdHome()
</del><ins>+ dbData = yield cls._getDBData(txn, uid, migration, no_cache=True)
+ homeData, metadataData = dbData
+ home = yield cls.makeClass(txn, homeData, metadataData)
+ if migration == _MIGRATION_STATUS_NONE:
+ yield home.createdHome()
</ins><span class="cx"> returnValue(home)
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -1886,6 +1959,10 @@
</span><span class="cx"> return "<%s: %s, %s>" % (self.__class__.__name__, self._resourceID, self._ownerUID)
</span><span class="cx">
</span><span class="cx">
</span><ins>+ def cacheKey(self):
+ return "{}.{}".format(self._migration, self._ownerUID)
+
+
</ins><span class="cx"> def id(self):
</span><span class="cx"> """
</span><span class="cx"> Retrieve the store identifier for this home.
</span><span class="lines">@@ -1905,6 +1982,32 @@
</span><span class="cx"> return self._ownerUID
</span><span class="cx">
</span><span class="cx">
</span><ins>+ @inlineCallbacks
+ def updateDetails(self, newUID=None, newStatus=None, newMigration=None):
+ """
+ Change any of the uid, status, or migration of this home.
+ """
+ ch = self._homeSchema
+ columns = {}
+ if newUID is not None:
+ columns[ch.OWNER_UID] = newUID
+ if newStatus is not None:
+ columns[ch.STATUS] = newStatus
+ if newMigration is not None:
+ columns[ch.MIGRATION] = newMigration
+ yield Update(
+ columns,
+ Where=(ch.OWNER_UID == self._ownerUID)
+ ).on(self._txn)
+
+ if newUID is not None:
+ self._ownerUID = newUID
+ if newStatus is not None:
+ self._status = newStatus
+ if newStatus is not None:
+ self._migration = newMigration
+
+
</ins><span class="cx"> def external(self):
</span><span class="cx"> """
</span><span class="cx"> Is this an external home.
</span><span class="lines">@@ -2497,6 +2600,10 @@
</span><span class="cx"> per transaction.
</span><span class="cx"> """
</span><span class="cx">
</span><ins>+ # Migrating resources never send notifications
+ if self._migration != _MIGRATION_STATUS_NONE:
+ returnValue(None)
+
</ins><span class="cx"> if self._txn.isNotifiedAlready(self):
</span><span class="cx"> returnValue(None)
</span><span class="cx"> self._txn.notificationAddedForObject(self)
</span><span class="lines">@@ -2588,7 +2695,55 @@
</span><span class="cx"> returnValue((ownerHome, ownerName))
</span><span class="cx">
</span><span class="cx">
</span><ins>+ #
+ # Migration related
+ #
</ins><span class="cx">
</span><ins>+ @inlineCallbacks
+ def migrateToThisPod(self, user, final):
+ """
+ Migrate data for the specified user (on a different pod) to this pod. L{final} is
+ C{True} when this is the final incremental sync (where we need to sync everything).
+ When C{False} we only sync owned child collections (without properties).
+
+ @param user: the user to migrate
+ @type user: C{str}
+ @param final: whether or not this is the final sync
+ @type final: C{bool}
+ """
+
+ assert self._migration == _MIGRATION_STATUS_MIGRATING
+
+ # Get external home for the user (create if needed)
+ otherHome = yield self._txn.homeWithUID(self._homeType, user, create=True)
+ assert otherHome._status == _HOME_STATUS_EXTERNAL
+
+ # Force the external home to look like it is migrating. This will enable certain external API calls
+ # that are normally disabled for sharing (e.g., ability to load all child resources).
+ otherHome._migration = _MIGRATION_STATUS_MIGRATING
+
+ local_children = yield self.loadChildren()
+ local_children = dict([(child.external_id(), child) for child in local_children if child.owned()])
+
+ # Get list of owned child collections
+ remote_children = yield otherHome.loadChildren()
+ remote_children = dict([(child.id(), child) for child in remote_children if child.owned()])
+
+ # Remove local ones to longer present on remote
+ for external_id in set(local_children.keys()) - set(remote_children.keys()):
+ child = local_children[external_id]
+ yield child.remove()
+ del local_children[external_id]
+
+ # Sync each one on the remote side
+ for remote_child in remote_children.values():
+ local_child = local_children.get(remote_child.id())
+ if local_child is None:
+ local_child = yield self.createChildWithName(remote_child.name(), remote_child.id())
+ yield local_child.migrateToThisPod(remote_child, final)
+
+
+
</ins><span class="cx"> class _SharedSyncLogic(object):
</span><span class="cx"> """
</span><span class="cx"> Logic for maintaining sync-token shared between notification collections and
</span><span class="lines">@@ -3187,9 +3342,9 @@
</span><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> if not self.direct() and self.shareStatus() != _BIND_STATUS_ACCEPTED:
</span><del>- if self.external():
</del><ins>+ ownerView = yield self.ownerView()
+ if ownerView.external():
</ins><span class="cx"> yield self._replyExternalInvite(_BIND_STATUS_ACCEPTED, summary)
</span><del>- ownerView = yield self.ownerView()
</del><span class="cx"> yield ownerView.updateShare(self, status=_BIND_STATUS_ACCEPTED)
</span><span class="cx"> yield self.newShare(displayname=summary)
</span><span class="cx"> if not ownerView.external():
</span><span class="lines">@@ -3203,9 +3358,9 @@
</span><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> if not self.direct() and self.shareStatus() != _BIND_STATUS_DECLINED:
</span><del>- if self.external():
</del><ins>+ ownerView = yield self.ownerView()
+ if ownerView.external():
</ins><span class="cx"> yield self._replyExternalInvite(_BIND_STATUS_DECLINED)
</span><del>- ownerView = yield self.ownerView()
</del><span class="cx"> yield ownerView.updateShare(self, status=_BIND_STATUS_DECLINED)
</span><span class="cx"> if not ownerView.external():
</span><span class="cx"> yield self._sendReplyNotification(ownerView)
</span><span class="lines">@@ -3865,17 +4020,38 @@
</span><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> return (
</span><del>- cls._bindSchema.BIND_MODE,
</del><span class="cx"> cls._bindSchema.HOME_RESOURCE_ID,
</span><span class="cx"> cls._bindSchema.RESOURCE_ID,
</span><span class="cx"> cls._bindSchema.EXTERNAL_ID,
</span><span class="cx"> cls._bindSchema.RESOURCE_NAME,
</span><ins>+ cls._bindSchema.BIND_MODE,
</ins><span class="cx"> cls._bindSchema.BIND_STATUS,
</span><span class="cx"> cls._bindSchema.BIND_REVISION,
</span><span class="cx"> cls._bindSchema.MESSAGE
</span><span class="cx"> )
</span><span class="cx">
</span><ins>+
+ @classmethod
+ def bindAttributes(cls):
+ """
+ Return a list of attribute names for retrieval of during creation. This allows
+ different child classes to have their own type specific data, but still make use of the
+ common base logic.
+ """
+
+ return (
+ "_resourceID", # This is actually ignored - it should be the home id
+ "_resourceID",
+ "_externalID",
+ "_name",
+ "_bindMode",
+ "_bindStatus",
+ "_bindRevision",
+ "_bindMessage",
+ )
+
</ins><span class="cx"> bindColumnCount = 8
</span><ins>+ bindColumnSyncStart = 3 # Starting column for bind attributes to sync
</ins><span class="cx">
</span><span class="cx"> @classmethod
</span><span class="cx"> def additionalBindColumns(cls):
</span><span class="lines">@@ -3945,6 +4121,8 @@
</span><span class="cx"> """
</span><span class="cx"> log = Logger()
</span><span class="cx">
</span><ins>+ _storeType = "child"
+
</ins><span class="cx"> compareAttributes = (
</span><span class="cx"> "_name",
</span><span class="cx"> "_home",
</span><span class="lines">@@ -3985,8 +4163,16 @@
</span><span class="cx"> @rtype: L{CommonHomeChild}
</span><span class="cx"> """
</span><span class="cx">
</span><del>- bindMode, _ignore_homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = bindData
</del><ins>+ bindData = dict(zip(cls.bindColumns(), bindData))
</ins><span class="cx">
</span><ins>+ resourceID = bindData[cls._bindSchema.RESOURCE_ID]
+ externalID = bindData[cls._bindSchema.EXTERNAL_ID]
+ name = bindData[cls._bindSchema.RESOURCE_NAME]
+ bindMode = bindData[cls._bindSchema.BIND_MODE]
+ bindStatus = bindData[cls._bindSchema.BIND_STATUS]
+ bindRevision = bindData[cls._bindSchema.BIND_REVISION]
+ bindMessage = bindData[cls._bindSchema.MESSAGE]
+
</ins><span class="cx"> if ownerHome is None:
</span><span class="cx"> if bindMode == _BIND_MODE_OWN:
</span><span class="cx"> ownerHome = home
</span><span class="lines">@@ -3996,7 +4182,9 @@
</span><span class="cx"> else:
</span><span class="cx"> ownerName = None
</span><span class="cx">
</span><del>- c = cls._externalClass if ownerHome.external() else cls
</del><ins>+ # The actual class depends on whether the owner home is external or not. We need to be careful here,
+ # because we need to determine the proper class from the ownerHome, not the home that called this method.
+ c = ownerHome._childClass._externalClass if ownerHome.external() else ownerHome._childClass
</ins><span class="cx"> child = c(
</span><span class="cx"> home=home,
</span><span class="cx"> name=name,
</span><span class="lines">@@ -4302,6 +4490,36 @@
</span><span class="cx"> returnValue(child)
</span><span class="cx">
</span><span class="cx">
</span><ins>+ def externalize(self):
+ """
+ Create a dictionary mapping key attributes so this object can be sent over a cross-pod call
+ and reconstituted at the other end. Note that the other end may have a different schema so
+ the attributes may not match exactly and will need to be processed accordingly.
+ """
+ serialized = {}
+ serialized["bind"] = dict([(attr[1:], getattr(self, attr, None)) for attr in self.bindAttributes()])
+ serialized["additionalBind"] = dict([(attr[1:], getattr(self, attr, None)) for attr in self.additionalBindAttributes()])
+ serialized["metadata"] = dict([(attr[1:], getattr(self, attr, None)) for attr in self.metadataAttributes()])
+ return serialized
+
+
+ @classmethod
+ @inlineCallbacks
+ def internalize(cls, parent, mapping):
+ """
+ Given a mapping generated by L{externalize}, convert the values into an array of database
+ like items that conforms to the ordering of L{_allColumns} so it can be fed into L{makeClass}.
+ Note that there may be a schema mismatch with the external data, so treat missing items as
+ C{None} and ignore extra items.
+ """
+
+ bind = [mapping["bind"].get(row[1:]) for row in cls.bindAttributes()]
+ additionalBind = [mapping["additionalBind"].get(row[1:]) for row in cls.additionalBindAttributes()]
+ metadata = [mapping["metadata"].get(row[1:]) for row in cls.metadataAttributes()]
+ child = yield cls.makeClass(parent, bind, additionalBind, metadata)
+ returnValue(child)
+
+
</ins><span class="cx"> @classproperty
</span><span class="cx"> def _metadataByIDQuery(cls): #@NoSelf
</span><span class="cx"> """
</span><span class="lines">@@ -5051,6 +5269,10 @@
</span><span class="cx"> a child resource being added, changed or removed.
</span><span class="cx"> @type property_change: C{bool}
</span><span class="cx"> """
</span><ins>+ # Migrating resources never send notifications
+ if self.ownerHome()._migration != _MIGRATION_STATUS_NONE:
+ returnValue(None)
+
</ins><span class="cx"> if self._txn.isNotifiedAlready(self):
</span><span class="cx"> returnValue(None)
</span><span class="cx"> self._txn.notificationAddedForObject(self)
</span><span class="lines">@@ -5121,13 +5343,72 @@
</span><span class="cx"> log.debug("CommonHomeChild.bumpModified failed")
</span><span class="cx">
</span><span class="cx">
</span><ins>+ #
+ # Migration related
+ #
</ins><span class="cx">
</span><ins>+ @inlineCallbacks
+ def migrateToThisPod(self, remote, final):
+ """
+ Migrate data for the specified user (on a different pod) to this pod. L{final} is
+ C{True} when this is the final incremental sync (where we need to sync everything).
+ When C{False} we only sync owned child collections (without properties).
+
+ @param remote: the remote collection to migrate
+ @type remote: L{CommonHomeCHild}
+ @param final: whether or not this is the final sync
+ @type final: C{bool}
+ """
+
+ assert self.ownerHome()._migration == _MIGRATION_STATUS_MIGRATING
+ assert remote.ownerHome().external()
+
+ # Need to sync bind data if different
+ remote_items = dict([(col, getattr(remote, attr)) for col, attr in zip(remote.bindColumns(), remote.bindAttributes())[self.bindColumnSyncStart:]])
+ remote_items.update(dict([(col, getattr(remote, attr)) for col, attr in zip(remote.additionalBindColumns(), remote.additionalBindAttributes())]))
+
+ local_items = dict([(col, getattr(self, attr)) for col, attr in zip(self.bindColumns(), self.bindAttributes())[self.bindColumnSyncStart:]])
+ local_items.update(dict([(col, getattr(self, attr)) for col, attr in zip(self.additionalBindColumns(), self.additionalBindAttributes())]))
+
+ # Regular bind info first (note we ignore the first few)
+ updates = {}
+ for k, v in remote_items.items():
+ if v != local_items[k]:
+ updates[k] = v
+
+ if updates:
+ yield self.invalidateQueryCache()
+
+ cb = self._bindSchema
+ yield Update(
+ columnMap=updates,
+ Where=(cb.RESOURCE_ID == self._resourceID).And
+ (cb.HOME_RESOURCE_ID == self._home._resourceID)
+ ).on(self._txn)
+
+ # Only do name update if different
+ if self._name != remote._name:
+ # update memos
+ del self._home._children[self._name]
+ self._home._children[remote._name] = self
+ yield self._renameSyncToken()
+
+ # Sync the attributes on this object
+ for attr in remote.bindAttributes()[self.bindColumnSyncStart:]:
+ setattr(self, attr, getattr(remote, attr))
+ for attr in remote.additionalBindAttributes():
+ setattr(self, attr, getattr(remote, attr))
+
+
+
</ins><span class="cx"> class CommonObjectResource(FancyEqMixin, object):
</span><span class="cx"> """
</span><span class="cx"> Base class for object resources.
</span><span class="cx"> """
</span><span class="cx"> log = Logger()
</span><span class="cx">
</span><ins>+ _storeType = "resource"
+
</ins><span class="cx"> compareAttributes = (
</span><span class="cx"> "_name",
</span><span class="cx"> "_parentCollection",
</span><span class="lines">@@ -5770,11 +6051,12 @@
</span><span class="cx"> _homeSchema = schema.NOTIFICATION_HOME
</span><span class="cx">
</span><span class="cx">
</span><del>- def __init__(self, txn, uid, resourceID):
</del><ins>+ def __init__(self, txn, uid, resourceID, migration=_MIGRATION_STATUS_NONE):
</ins><span class="cx">
</span><span class="cx"> self._txn = txn
</span><span class="cx"> self._uid = uid
</span><span class="cx"> self._resourceID = resourceID
</span><ins>+ self._migration = migration
</ins><span class="cx"> self._dataVersion = None
</span><span class="cx"> self._notifications = {}
</span><span class="cx"> self._notificationNames = None
</span><span class="lines">@@ -5785,15 +6067,23 @@
</span><span class="cx"> self._notifiers = dict([(factory_name, factory.newNotifier(self),) for factory_name, factory in txn._notifierFactories.items()])
</span><span class="cx">
</span><span class="cx"> _resourceIDFromUIDQuery = Select(
</span><del>- [_homeSchema.RESOURCE_ID], From=_homeSchema,
- Where=_homeSchema.OWNER_UID == Parameter("uid"))
</del><ins>+ [_homeSchema.RESOURCE_ID],
+ From=_homeSchema,
+ Where=(_homeSchema.OWNER_UID == Parameter("uid")).And(
+ _homeSchema.MIGRATION == Parameter("migration"))
+ )
</ins><span class="cx">
</span><span class="cx"> _UIDFromResourceIDQuery = Select(
</span><del>- [_homeSchema.OWNER_UID], From=_homeSchema,
- Where=_homeSchema.RESOURCE_ID == Parameter("rid"))
</del><ins>+ [_homeSchema.OWNER_UID],
+ From=_homeSchema,
+ Where=_homeSchema.RESOURCE_ID == Parameter("rid")
+ )
</ins><span class="cx">
</span><span class="cx"> _provisionNewNotificationsQuery = Insert(
</span><del>- {_homeSchema.OWNER_UID: Parameter("uid")},
</del><ins>+ {
+ _homeSchema.OWNER_UID: Parameter("uid"),
+ _homeSchema.MIGRATION: Parameter("migration"),
+ },
</ins><span class="cx"> Return=_homeSchema.RESOURCE_ID
</span><span class="cx"> )
</span><span class="cx">
</span><span class="lines">@@ -5809,8 +6099,8 @@
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><span class="cx"> @inlineCallbacks
</span><del>- def notificationsWithUID(cls, txn, uid, create):
- rows = yield cls._resourceIDFromUIDQuery.on(txn, uid=uid)
</del><ins>+ def notificationsWithUID(cls, txn, uid, create, migration=_MIGRATION_STATUS_NONE):
+ rows = yield cls._resourceIDFromUIDQuery.on(txn, uid=uid, migration=migration)
</ins><span class="cx">
</span><span class="cx"> if rows:
</span><span class="cx"> resourceID = rows[0][0]
</span><span class="lines">@@ -5822,7 +6112,7 @@
</span><span class="cx"> raise DirectoryRecordNotFoundError("Cannot create home for UID since no directory record exists: {}".format(uid))
</span><span class="cx">
</span><span class="cx"> state = _HOME_STATUS_NORMAL if record.thisServer() else _HOME_STATUS_EXTERNAL
</span><del>- if state == _HOME_STATUS_EXTERNAL:
</del><ins>+ if state == _HOME_STATUS_EXTERNAL and migration == _MIGRATION_STATUS_NONE:
</ins><span class="cx"> raise RecordNotAllowedError("Cannot store notifications for external user: {}".format(uid))
</span><span class="cx">
</span><span class="cx"> # Use savepoint so we can do a partial rollback if there is a race
</span><span class="lines">@@ -5832,7 +6122,7 @@
</span><span class="cx">
</span><span class="cx"> try:
</span><span class="cx"> resourceID = str((
</span><del>- yield cls._provisionNewNotificationsQuery.on(txn, uid=uid)
</del><ins>+ yield cls._provisionNewNotificationsQuery.on(txn, uid=uid, migration=migration)
</ins><span class="cx"> )[0][0])
</span><span class="cx"> except Exception:
</span><span class="cx"> # FIXME: Really want to trap the pg.DatabaseError but in a non-
</span><span class="lines">@@ -5840,7 +6130,7 @@
</span><span class="cx"> yield savepoint.rollback(txn)
</span><span class="cx">
</span><span class="cx"> # Retry the query - row may exist now, if not re-raise
</span><del>- rows = yield cls._resourceIDFromUIDQuery.on(txn, uid=uid)
</del><ins>+ rows = yield cls._resourceIDFromUIDQuery.on(txn, uid=uid, migration=migration)
</ins><span class="cx"> if rows:
</span><span class="cx"> resourceID = rows[0][0]
</span><span class="cx"> created = False
</span><span class="lines">@@ -5851,7 +6141,7 @@
</span><span class="cx"> yield savepoint.release(txn)
</span><span class="cx"> else:
</span><span class="cx"> returnValue(None)
</span><del>- collection = cls(txn, uid, resourceID)
</del><ins>+ collection = cls(txn, uid, resourceID, migration)
</ins><span class="cx"> yield collection._loadPropertyStore()
</span><span class="cx"> if created:
</span><span class="cx"> yield collection._initSyncToken()
</span><span class="lines">@@ -5922,6 +6212,32 @@
</span><span class="cx"> return self._uid
</span><span class="cx">
</span><span class="cx">
</span><ins>+ @inlineCallbacks
+ def updateDetails(self, newUID=None, newStatus=None, newMigration=None):
+ """
+ Change any of the uid, status, or migration of this home.
+ """
+ ch = self._homeSchema
+ columns = {}
+ if newUID is not None:
+ columns[ch.OWNER_UID] = newUID
+ if newStatus is not None:
+ columns[ch.STATUS] = newStatus
+ if newMigration is not None:
+ columns[ch.MIGRATION] = newMigration
+ yield Update(
+ columns,
+ Where=(ch.OWNER_UID == self._ownerUID)
+ ).on(self._txn)
+
+ if newUID is not None:
+ self._ownerUID = newUID
+ if newStatus is not None:
+ self._status = newStatus
+ if newStatus is not None:
+ self._migration = newMigration
+
+
</ins><span class="cx"> def owned(self):
</span><span class="cx"> return True
</span><span class="cx">
</span><span class="lines">@@ -6090,6 +6406,11 @@
</span><span class="cx"> the resource has changed. We ensure we only do this once per object
</span><span class="cx"> per transaction.
</span><span class="cx"> """
</span><ins>+
+ # Migrating resources never send notifications
+ if self._migration != _MIGRATION_STATUS_NONE:
+ returnValue(None)
+
</ins><span class="cx"> if self._txn.isNotifiedAlready(self):
</span><span class="cx"> returnValue(None)
</span><span class="cx"> self._txn.notificationAddedForObject(self)
</span></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_externalpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_external.py (12417 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_external.py        2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_external.py        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -26,7 +26,8 @@
</span><span class="cx"> from txdav.base.propertystore.sql import PropertyStore
</span><span class="cx"> from txdav.common.datastore.sql import CommonHome, CommonHomeChild, \
</span><span class="cx"> CommonObjectResource
</span><del>-from txdav.common.datastore.sql_tables import _HOME_STATUS_EXTERNAL
</del><ins>+from txdav.common.datastore.sql_tables import _HOME_STATUS_EXTERNAL, \
+ _MIGRATION_STATUS_MIGRATING
</ins><span class="cx"> from txdav.common.icommondatastore import NonExistentExternalShare, \
</span><span class="cx"> ExternalShareFailed
</span><span class="cx">
</span><span class="lines">@@ -38,9 +39,15 @@
</span><span class="cx"> A CommonHome for a user not hosted on this system, but on another pod. This is needed to provide a
</span><span class="cx"> "reference" to the external user so we can share with them. Actual operations to list child resources, etc
</span><span class="cx"> are all stubbed out since no data for the user is actually hosted in this store.
</span><ins>+
+ For sharing, we actually load child resources that are of the "internal" class as those know how to
+ proxy sharing calls to the remote side.
+
+ For migration, we want the child resources to always be external.
</ins><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> def __init__(self, transaction, ownerUID, resourceID):
</span><ins>+ self._childClass = self._childClass._externalClass
</ins><span class="cx"> super(CommonHomeExternal, self).__init__(transaction, ownerUID)
</span><span class="cx"> self._resourceID = resourceID
</span><span class="cx"> self._status = _HOME_STATUS_EXTERNAL
</span><span class="lines">@@ -69,13 +76,20 @@
</span><span class="cx"> raise AssertionError("CommonHomeExternal: not supported")
</span><span class="cx">
</span><span class="cx">
</span><ins>+ @inlineCallbacks
</ins><span class="cx"> def loadChildren(self):
</span><span class="cx"> """
</span><span class="cx"> No children.
</span><span class="cx"> """
</span><del>- raise AssertionError("CommonHomeExternal: not supported")
</del><span class="cx">
</span><ins>+ # Only available if migrating
+ if self._migration != _MIGRATION_STATUS_MIGRATING:
+ raise AssertionError("CommonHomeExternal: not supported")
</ins><span class="cx">
</span><ins>+ results = yield super(CommonHomeExternal, self).loadChildren()
+ returnValue(results)
+
+
</ins><span class="cx"> def listChildren(self):
</span><span class="cx"> """
</span><span class="cx"> No children.
</span><span class="lines">@@ -209,9 +223,22 @@
</span><span class="cx"> specific apis to the other pod using cross-pod requests.
</span><span class="cx"> """
</span><span class="cx">
</span><ins>+ @classmethod
+ @inlineCallbacks
+ def loadAllObjects(cls, home):
+ mapping_list = yield home._txn.store().conduit.send_loadchildren(home)
+
+ results = []
+ if mapping_list:
+ for mapping in mapping_list:
+ child = yield cls.internalize(home, mapping)
+ results.append(child)
+ returnValue(results)
+
+
</ins><span class="cx"> def external(self):
</span><span class="cx"> """
</span><del>- Is this an external home.
</del><ins>+ Is this an external home child.
</ins><span class="cx">
</span><span class="cx"> @return: a string.
</span><span class="cx"> """
</span></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_schemacurrentoracledialectsql"></a>
<div class="modfile"><h4>Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/current-oracle-dialect.sql (12417 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/current-oracle-dialect.sql        2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/current-oracle-dialect.sql        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -17,9 +17,11 @@
</span><span class="cx">
</span><span class="cx"> create table CALENDAR_HOME (
</span><span class="cx"> "RESOURCE_ID" integer primary key,
</span><del>- "OWNER_UID" nvarchar2(255) unique,
</del><ins>+ "OWNER_UID" nvarchar2(255),
</ins><span class="cx"> "STATUS" integer default 0 not null,
</span><del>- "DATAVERSION" integer default 0 not null
</del><ins>+ "MIGRATION" integer default 0 not null,
+ "DATAVERSION" integer default 0 not null,
+ unique("OWNER_UID", "MIGRATION")
</ins><span class="cx"> );
</span><span class="cx">
</span><span class="cx"> create table HOME_STATUS (
</span><span class="lines">@@ -29,6 +31,14 @@
</span><span class="cx">
</span><span class="cx"> insert into HOME_STATUS (DESCRIPTION, ID) values ('normal', 0);
</span><span class="cx"> insert into HOME_STATUS (DESCRIPTION, ID) values ('external', 1);
</span><ins>+create table MIGRATION_STATUS (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into MIGRATION_STATUS (DESCRIPTION, ID) values ('none', 0);
+insert into MIGRATION_STATUS (DESCRIPTION, ID) values ('migrating', 1);
+insert into MIGRATION_STATUS (DESCRIPTION, ID) values ('migrated', 2);
</ins><span class="cx"> create table CALENDAR (
</span><span class="cx"> "RESOURCE_ID" integer primary key
</span><span class="cx"> );
</span><span class="lines">@@ -57,9 +67,11 @@
</span><span class="cx">
</span><span class="cx"> create table NOTIFICATION_HOME (
</span><span class="cx"> "RESOURCE_ID" integer primary key,
</span><del>- "OWNER_UID" nvarchar2(255) unique,
</del><ins>+ "OWNER_UID" nvarchar2(255),
</ins><span class="cx"> "STATUS" integer default 0 not null,
</span><del>- "DATAVERSION" integer default 0 not null
</del><ins>+ "MIGRATION" integer default 0 not null,
+ "DATAVERSION" integer default 0 not null,
+ unique("OWNER_UID", "MIGRATION")
</ins><span class="cx"> );
</span><span class="cx">
</span><span class="cx"> create table NOTIFICATION (
</span><span class="lines">@@ -219,9 +231,11 @@
</span><span class="cx"> create table ADDRESSBOOK_HOME (
</span><span class="cx"> "RESOURCE_ID" integer primary key,
</span><span class="cx"> "ADDRESSBOOK_PROPERTY_STORE_ID" integer not null,
</span><del>- "OWNER_UID" nvarchar2(255) unique,
</del><ins>+ "OWNER_UID" nvarchar2(255),
</ins><span class="cx"> "STATUS" integer default 0 not null,
</span><del>- "DATAVERSION" integer default 0 not null
</del><ins>+ "MIGRATION" integer default 0 not null,
+ "DATAVERSION" integer default 0 not null,
+ unique("OWNER_UID", "MIGRATION")
</ins><span class="cx"> );
</span><span class="cx">
</span><span class="cx"> create table ADDRESSBOOK_HOME_METADATA (
</span><span class="lines">@@ -400,7 +414,7 @@
</span><span class="cx"> "VALUE" nvarchar2(255)
</span><span class="cx"> );
</span><span class="cx">
</span><del>-insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '33');
</del><ins>+insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '34');
</ins><span class="cx"> insert into CALENDARSERVER (NAME, VALUE) values ('CALENDAR-DATAVERSION', '5');
</span><span class="cx"> insert into CALENDARSERVER (NAME, VALUE) values ('ADDRESSBOOK-DATAVERSION', '2');
</span><span class="cx"> insert into CALENDARSERVER (NAME, VALUE) values ('NOTIFICATION-DATAVERSION', '1');
</span></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_schemacurrentsql"></a>
<div class="modfile"><h4>Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/current.sql (12417 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/current.sql        2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/current.sql        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -53,9 +53,12 @@
</span><span class="cx">
</span><span class="cx"> create table CALENDAR_HOME (
</span><span class="cx"> RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
</span><del>- OWNER_UID varchar(255) not null unique, -- implicit index
</del><ins>+ OWNER_UID varchar(255) not null,
</ins><span class="cx"> STATUS integer default 0 not null, -- enum HOME_STATUS
</span><del>- DATAVERSION integer default 0 not null
</del><ins>+ MIGRATION integer default 0 not null, -- enum MIGRATION_STATUS
+ DATAVERSION integer default 0 not null,
+
+ unique(OWNER_UID, MIGRATION) -- implicit index
</ins><span class="cx"> );
</span><span class="cx">
</span><span class="cx"> -- Enumeration of statuses
</span><span class="lines">@@ -69,6 +72,16 @@
</span><span class="cx"> insert into HOME_STATUS values (1, 'external');
</span><span class="cx">
</span><span class="cx">
</span><ins>+create table MIGRATION_STATUS (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into MIGRATION_STATUS values (0, 'none' );
+insert into MIGRATION_STATUS values (1, 'migrating');
+insert into MIGRATION_STATUS values (2, 'migrated');
+
+
</ins><span class="cx"> --------------
</span><span class="cx"> -- Calendar --
</span><span class="cx"> --------------
</span><span class="lines">@@ -123,9 +136,12 @@
</span><span class="cx">
</span><span class="cx"> create table NOTIFICATION_HOME (
</span><span class="cx"> RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
</span><del>- OWNER_UID varchar(255) not null unique, -- implicit index
</del><ins>+ OWNER_UID varchar(255) not null,
</ins><span class="cx"> STATUS integer default 0 not null, -- enum HOME_STATUS
</span><del>- DATAVERSION integer default 0 not null
</del><ins>+ MIGRATION integer default 0 not null, -- enum MIGRATION_STATUS
+ DATAVERSION integer default 0 not null,
+
+ unique(OWNER_UID, MIGRATION) -- implicit index
</ins><span class="cx"> );
</span><span class="cx">
</span><span class="cx"> create table NOTIFICATION (
</span><span class="lines">@@ -398,9 +414,12 @@
</span><span class="cx"> create table ADDRESSBOOK_HOME (
</span><span class="cx"> RESOURCE_ID                                 integer                        primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
</span><span class="cx"> ADDRESSBOOK_PROPERTY_STORE_ID        integer         default nextval('RESOURCE_ID_SEQ') not null,         -- implicit index
</span><del>- OWNER_UID                                 varchar(255)         not null unique, -- implicit index
</del><ins>+ OWNER_UID                                 varchar(255)         not null,
</ins><span class="cx"> STATUS                                 integer         default 0 not null, -- enum HOME_STATUS
</span><del>- DATAVERSION                                 integer         default 0 not null
</del><ins>+ MIGRATION                                 integer          default 0 not null, -- enum MIGRATION_STATUS
+ DATAVERSION                                 integer         default 0 not null,
+
+ unique(OWNER_UID, MIGRATION) -- implicit index
</ins><span class="cx"> );
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -751,7 +770,7 @@
</span><span class="cx"> VALUE varchar(255)
</span><span class="cx"> );
</span><span class="cx">
</span><del>-insert into CALENDARSERVER values ('VERSION', '33');
</del><ins>+insert into CALENDARSERVER values ('VERSION', '34');
</ins><span class="cx"> insert into CALENDARSERVER values ('CALENDAR-DATAVERSION', '5');
</span><span class="cx"> insert into CALENDARSERVER values ('ADDRESSBOOK-DATAVERSION', '2');
</span><span class="cx"> insert into CALENDARSERVER values ('NOTIFICATION-DATAVERSION', '1');
</span></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_schemaoldoracledialectv33sql"></a>
<div class="addfile"><h4>Added: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/old/oracle-dialect/v33.sql (0 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/old/oracle-dialect/v33.sql         (rev 0)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/old/oracle-dialect/v33.sql        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -0,0 +1,540 @@
</span><ins>+create sequence RESOURCE_ID_SEQ;
+create sequence INSTANCE_ID_SEQ;
+create sequence ATTACHMENT_ID_SEQ;
+create sequence REVISION_SEQ;
+create sequence WORKITEM_SEQ;
+create table NODE_INFO (
+ "HOSTNAME" nvarchar2(255),
+ "PID" integer not null,
+ "PORT" integer not null,
+ "TIME" timestamp default CURRENT_TIMESTAMP at time zone 'UTC' not null,
+ primary key("HOSTNAME", "PORT")
+);
+
+create table NAMED_LOCK (
+ "LOCK_NAME" nvarchar2(255) primary key
+);
+
+create table CALENDAR_HOME (
+ "RESOURCE_ID" integer primary key,
+ "OWNER_UID" nvarchar2(255) unique,
+ "STATUS" integer default 0 not null,
+ "DATAVERSION" integer default 0 not null
+);
+
+create table HOME_STATUS (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into HOME_STATUS (DESCRIPTION, ID) values ('normal', 0);
+insert into HOME_STATUS (DESCRIPTION, ID) values ('external', 1);
+create table CALENDAR (
+ "RESOURCE_ID" integer primary key
+);
+
+create table CALENDAR_HOME_METADATA (
+ "RESOURCE_ID" integer primary key references CALENDAR_HOME on delete cascade,
+ "QUOTA_USED_BYTES" integer default 0 not null,
+ "DEFAULT_EVENTS" integer default null references CALENDAR on delete set null,
+ "DEFAULT_TASKS" integer default null references CALENDAR on delete set null,
+ "DEFAULT_POLLS" integer default null references CALENDAR on delete set null,
+ "ALARM_VEVENT_TIMED" nclob default null,
+ "ALARM_VEVENT_ALLDAY" nclob default null,
+ "ALARM_VTODO_TIMED" nclob default null,
+ "ALARM_VTODO_ALLDAY" nclob default null,
+ "AVAILABILITY" nclob default null,
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table CALENDAR_METADATA (
+ "RESOURCE_ID" integer primary key references CALENDAR on delete cascade,
+ "SUPPORTED_COMPONENTS" nvarchar2(255) default null,
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table NOTIFICATION_HOME (
+ "RESOURCE_ID" integer primary key,
+ "OWNER_UID" nvarchar2(255) unique,
+ "STATUS" integer default 0 not null,
+ "DATAVERSION" integer default 0 not null
+);
+
+create table NOTIFICATION (
+ "RESOURCE_ID" integer primary key,
+ "NOTIFICATION_HOME_RESOURCE_ID" integer not null references NOTIFICATION_HOME,
+ "NOTIFICATION_UID" nvarchar2(255),
+ "NOTIFICATION_TYPE" nvarchar2(255),
+ "NOTIFICATION_DATA" nclob,
+ "MD5" nchar(32),
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ unique("NOTIFICATION_UID", "NOTIFICATION_HOME_RESOURCE_ID")
+);
+
+create table CALENDAR_BIND (
+ "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+ "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+ "EXTERNAL_ID" integer default null,
+ "CALENDAR_RESOURCE_NAME" nvarchar2(255),
+ "BIND_MODE" integer not null,
+ "BIND_STATUS" integer not null,
+ "BIND_REVISION" integer default 0 not null,
+ "MESSAGE" nclob,
+ "TRANSP" integer default 0 not null,
+ "ALARM_VEVENT_TIMED" nclob default null,
+ "ALARM_VEVENT_ALLDAY" nclob default null,
+ "ALARM_VTODO_TIMED" nclob default null,
+ "ALARM_VTODO_ALLDAY" nclob default null,
+ "TIMEZONE" nclob default null,
+ primary key("CALENDAR_HOME_RESOURCE_ID", "CALENDAR_RESOURCE_ID"),
+ unique("CALENDAR_HOME_RESOURCE_ID", "CALENDAR_RESOURCE_NAME")
+);
+
+create table CALENDAR_BIND_MODE (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('own', 0);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('read', 1);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('write', 2);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('direct', 3);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('indirect', 4);
+create table CALENDAR_BIND_STATUS (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('invited', 0);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('accepted', 1);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('declined', 2);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('invalid', 3);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('deleted', 4);
+create table CALENDAR_TRANSP (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_TRANSP (DESCRIPTION, ID) values ('opaque', 0);
+insert into CALENDAR_TRANSP (DESCRIPTION, ID) values ('transparent', 1);
+create table CALENDAR_OBJECT (
+ "RESOURCE_ID" integer primary key,
+ "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+ "RESOURCE_NAME" nvarchar2(255),
+ "ICALENDAR_TEXT" nclob,
+ "ICALENDAR_UID" nvarchar2(255),
+ "ICALENDAR_TYPE" nvarchar2(255),
+ "ATTACHMENTS_MODE" integer default 0 not null,
+ "DROPBOX_ID" nvarchar2(255),
+ "ORGANIZER" nvarchar2(255),
+ "RECURRANCE_MIN" date,
+ "RECURRANCE_MAX" date,
+ "ACCESS" integer default 0 not null,
+ "SCHEDULE_OBJECT" integer default 0,
+ "SCHEDULE_TAG" nvarchar2(36) default null,
+ "SCHEDULE_ETAGS" nclob default null,
+ "PRIVATE_COMMENTS" integer default 0 not null,
+ "MD5" nchar(32),
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ unique("CALENDAR_RESOURCE_ID", "RESOURCE_NAME")
+);
+
+create table CALENDAR_OBJECT_ATTACHMENTS_MO (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_OBJECT_ATTACHMENTS_MO (DESCRIPTION, ID) values ('none', 0);
+insert into CALENDAR_OBJECT_ATTACHMENTS_MO (DESCRIPTION, ID) values ('read', 1);
+insert into CALENDAR_OBJECT_ATTACHMENTS_MO (DESCRIPTION, ID) values ('write', 2);
+create table CALENDAR_ACCESS_TYPE (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(32) unique
+);
+
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('', 0);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('public', 1);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('private', 2);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('confidential', 3);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('restricted', 4);
+create table TIME_RANGE (
+ "INSTANCE_ID" integer primary key,
+ "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+ "CALENDAR_OBJECT_RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+ "FLOATING" integer not null,
+ "START_DATE" timestamp not null,
+ "END_DATE" timestamp not null,
+ "FBTYPE" integer not null,
+ "TRANSPARENT" integer not null
+);
+
+create table FREE_BUSY_TYPE (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('unknown', 0);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('free', 1);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy', 2);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy-unavailable', 3);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy-tentative', 4);
+create table TRANSPARENCY (
+ "TIME_RANGE_INSTANCE_ID" integer not null references TIME_RANGE on delete cascade,
+ "USER_ID" nvarchar2(255),
+ "TRANSPARENT" integer not null
+);
+
+create table ATTACHMENT (
+ "ATTACHMENT_ID" integer primary key,
+ "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+ "DROPBOX_ID" nvarchar2(255),
+ "CONTENT_TYPE" nvarchar2(255),
+ "SIZE" integer not null,
+ "MD5" nchar(32),
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "PATH" nvarchar2(1024)
+);
+
+create table ATTACHMENT_CALENDAR_OBJECT (
+ "ATTACHMENT_ID" integer not null references ATTACHMENT on delete cascade,
+ "MANAGED_ID" nvarchar2(255),
+ "CALENDAR_OBJECT_RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+ primary key("ATTACHMENT_ID", "CALENDAR_OBJECT_RESOURCE_ID"),
+ unique("MANAGED_ID", "CALENDAR_OBJECT_RESOURCE_ID")
+);
+
+create table RESOURCE_PROPERTY (
+ "RESOURCE_ID" integer not null,
+ "NAME" nvarchar2(255),
+ "VALUE" nclob,
+ "VIEWER_UID" nvarchar2(255),
+ primary key("RESOURCE_ID", "NAME", "VIEWER_UID")
+);
+
+create table ADDRESSBOOK_HOME (
+ "RESOURCE_ID" integer primary key,
+ "ADDRESSBOOK_PROPERTY_STORE_ID" integer not null,
+ "OWNER_UID" nvarchar2(255) unique,
+ "STATUS" integer default 0 not null,
+ "DATAVERSION" integer default 0 not null
+);
+
+create table ADDRESSBOOK_HOME_METADATA (
+ "RESOURCE_ID" integer primary key references ADDRESSBOOK_HOME on delete cascade,
+ "QUOTA_USED_BYTES" integer default 0 not null,
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table SHARED_ADDRESSBOOK_BIND (
+ "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+ "OWNER_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+ "EXTERNAL_ID" integer default null,
+ "ADDRESSBOOK_RESOURCE_NAME" nvarchar2(255),
+ "BIND_MODE" integer not null,
+ "BIND_STATUS" integer not null,
+ "BIND_REVISION" integer default 0 not null,
+ "MESSAGE" nclob,
+ primary key("ADDRESSBOOK_HOME_RESOURCE_ID", "OWNER_HOME_RESOURCE_ID"),
+ unique("ADDRESSBOOK_HOME_RESOURCE_ID", "ADDRESSBOOK_RESOURCE_NAME")
+);
+
+create table ADDRESSBOOK_OBJECT (
+ "RESOURCE_ID" integer primary key,
+ "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+ "RESOURCE_NAME" nvarchar2(255),
+ "VCARD_TEXT" nclob,
+ "VCARD_UID" nvarchar2(255),
+ "KIND" integer not null,
+ "MD5" nchar(32),
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ unique("ADDRESSBOOK_HOME_RESOURCE_ID", "RESOURCE_NAME"),
+ unique("ADDRESSBOOK_HOME_RESOURCE_ID", "VCARD_UID")
+);
+
+create table ADDRESSBOOK_OBJECT_KIND (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('person', 0);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('group', 1);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('resource', 2);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('location', 3);
+create table ABO_MEMBERS (
+ "GROUP_ID" integer not null,
+ "ADDRESSBOOK_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+ "MEMBER_ID" integer not null,
+ "REVISION" integer not null,
+ "REMOVED" integer default 0 not null,
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ primary key("GROUP_ID", "MEMBER_ID", "REVISION")
+);
+
+create table ABO_FOREIGN_MEMBERS (
+ "GROUP_ID" integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+ "ADDRESSBOOK_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+ "MEMBER_ADDRESS" nvarchar2(255),
+ primary key("GROUP_ID", "MEMBER_ADDRESS")
+);
+
+create table SHARED_GROUP_BIND (
+ "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+ "GROUP_RESOURCE_ID" integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+ "EXTERNAL_ID" integer default null,
+ "GROUP_ADDRESSBOOK_NAME" nvarchar2(255),
+ "BIND_MODE" integer not null,
+ "BIND_STATUS" integer not null,
+ "BIND_REVISION" integer default 0 not null,
+ "MESSAGE" nclob,
+ primary key("ADDRESSBOOK_HOME_RESOURCE_ID", "GROUP_RESOURCE_ID"),
+ unique("ADDRESSBOOK_HOME_RESOURCE_ID", "GROUP_ADDRESSBOOK_NAME")
+);
+
+create table CALENDAR_OBJECT_REVISIONS (
+ "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+ "CALENDAR_RESOURCE_ID" integer references CALENDAR,
+ "CALENDAR_NAME" nvarchar2(255) default null,
+ "RESOURCE_NAME" nvarchar2(255),
+ "REVISION" integer not null,
+ "DELETED" integer not null,
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table ADDRESSBOOK_OBJECT_REVISIONS (
+ "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+ "OWNER_HOME_RESOURCE_ID" integer references ADDRESSBOOK_HOME,
+ "ADDRESSBOOK_NAME" nvarchar2(255) default null,
+ "OBJECT_RESOURCE_ID" integer default 0,
+ "RESOURCE_NAME" nvarchar2(255),
+ "REVISION" integer not null,
+ "DELETED" integer not null,
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table NOTIFICATION_OBJECT_REVISIONS (
+ "NOTIFICATION_HOME_RESOURCE_ID" integer not null references NOTIFICATION_HOME on delete cascade,
+ "RESOURCE_NAME" nvarchar2(255),
+ "REVISION" integer not null,
+ "DELETED" integer not null,
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ unique("NOTIFICATION_HOME_RESOURCE_ID", "RESOURCE_NAME")
+);
+
+create table APN_SUBSCRIPTIONS (
+ "TOKEN" nvarchar2(255),
+ "RESOURCE_KEY" nvarchar2(255),
+ "MODIFIED" integer not null,
+ "SUBSCRIBER_GUID" nvarchar2(255),
+ "USER_AGENT" nvarchar2(255) default null,
+ "IP_ADDR" nvarchar2(255) default null,
+ primary key("TOKEN", "RESOURCE_KEY")
+);
+
+create table IMIP_TOKENS (
+ "TOKEN" nvarchar2(255),
+ "ORGANIZER" nvarchar2(255),
+ "ATTENDEE" nvarchar2(255),
+ "ICALUID" nvarchar2(255),
+ "ACCESSED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ primary key("ORGANIZER", "ATTENDEE", "ICALUID")
+);
+
+create table IMIP_INVITATION_WORK (
+ "WORK_ID" integer primary key not null,
+ "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "FROM_ADDR" nvarchar2(255),
+ "TO_ADDR" nvarchar2(255),
+ "ICALENDAR_TEXT" nclob
+);
+
+create table IMIP_POLLING_WORK (
+ "WORK_ID" integer primary key not null,
+ "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table IMIP_REPLY_WORK (
+ "WORK_ID" integer primary key not null,
+ "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "ORGANIZER" nvarchar2(255),
+ "ATTENDEE" nvarchar2(255),
+ "ICALENDAR_TEXT" nclob
+);
+
+create table PUSH_NOTIFICATION_WORK (
+ "WORK_ID" integer primary key not null,
+ "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "PUSH_ID" nvarchar2(255),
+ "PRIORITY" integer not null
+);
+
+create table GROUP_CACHER_POLLING_WORK (
+ "WORK_ID" integer primary key not null,
+ "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table CALENDAR_OBJECT_SPLITTER_WORK (
+ "WORK_ID" integer primary key not null,
+ "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade
+);
+
+create table FIND_MIN_VALID_REVISION_WORK (
+ "WORK_ID" integer primary key not null,
+ "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table REVISION_CLEANUP_WORK (
+ "WORK_ID" integer primary key not null,
+ "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table CALENDARSERVER (
+ "NAME" nvarchar2(255) primary key,
+ "VALUE" nvarchar2(255)
+);
+
+insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '33');
+insert into CALENDARSERVER (NAME, VALUE) values ('CALENDAR-DATAVERSION', '5');
+insert into CALENDARSERVER (NAME, VALUE) values ('ADDRESSBOOK-DATAVERSION', '2');
+insert into CALENDARSERVER (NAME, VALUE) values ('NOTIFICATION-DATAVERSION', '1');
+insert into CALENDARSERVER (NAME, VALUE) values ('MIN-VALID-REVISION', '1');
+create index CALENDAR_HOME_METADAT_3cb9049e on CALENDAR_HOME_METADATA (
+ DEFAULT_EVENTS
+);
+
+create index CALENDAR_HOME_METADAT_d55e5548 on CALENDAR_HOME_METADATA (
+ DEFAULT_TASKS
+);
+
+create index CALENDAR_HOME_METADAT_910264ce on CALENDAR_HOME_METADATA (
+ DEFAULT_POLLS
+);
+
+create index NOTIFICATION_NOTIFICA_f891f5f9 on NOTIFICATION (
+ NOTIFICATION_HOME_RESOURCE_ID
+);
+
+create index CALENDAR_BIND_RESOURC_e57964d4 on CALENDAR_BIND (
+ CALENDAR_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_CALEN_a9a453a9 on CALENDAR_OBJECT (
+ CALENDAR_RESOURCE_ID,
+ ICALENDAR_UID
+);
+
+create index CALENDAR_OBJECT_CALEN_96e83b73 on CALENDAR_OBJECT (
+ CALENDAR_RESOURCE_ID,
+ RECURRANCE_MAX
+);
+
+create index CALENDAR_OBJECT_ICALE_82e731d5 on CALENDAR_OBJECT (
+ ICALENDAR_UID
+);
+
+create index CALENDAR_OBJECT_DROPB_de041d80 on CALENDAR_OBJECT (
+ DROPBOX_ID
+);
+
+create index TIME_RANGE_CALENDAR_R_beb6e7eb on TIME_RANGE (
+ CALENDAR_RESOURCE_ID
+);
+
+create index TIME_RANGE_CALENDAR_O_acf37bd1 on TIME_RANGE (
+ CALENDAR_OBJECT_RESOURCE_ID
+);
+
+create index TRANSPARENCY_TIME_RAN_5f34467f on TRANSPARENCY (
+ TIME_RANGE_INSTANCE_ID
+);
+
+create index ATTACHMENT_CALENDAR_H_0078845c on ATTACHMENT (
+ CALENDAR_HOME_RESOURCE_ID
+);
+
+create index ATTACHMENT_DROPBOX_ID_5073cf23 on ATTACHMENT (
+ DROPBOX_ID
+);
+
+create index ATTACHMENT_CALENDAR_O_81508484 on ATTACHMENT_CALENDAR_OBJECT (
+ CALENDAR_OBJECT_RESOURCE_ID
+);
+
+create index SHARED_ADDRESSBOOK_BI_e9a2e6d4 on SHARED_ADDRESSBOOK_BIND (
+ OWNER_HOME_RESOURCE_ID
+);
+
+create index ABO_MEMBERS_ADDRESSBO_4effa879 on ABO_MEMBERS (
+ ADDRESSBOOK_ID
+);
+
+create index ABO_MEMBERS_MEMBER_ID_8d66adcf on ABO_MEMBERS (
+ MEMBER_ID
+);
+
+create index ABO_FOREIGN_MEMBERS_A_1fd2c5e9 on ABO_FOREIGN_MEMBERS (
+ ADDRESSBOOK_ID
+);
+
+create index SHARED_GROUP_BIND_RES_cf52f95d on SHARED_GROUP_BIND (
+ GROUP_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_REVIS_3a3956c4 on CALENDAR_OBJECT_REVISIONS (
+ CALENDAR_HOME_RESOURCE_ID,
+ CALENDAR_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_REVIS_6d9d929c on CALENDAR_OBJECT_REVISIONS (
+ CALENDAR_RESOURCE_ID,
+ RESOURCE_NAME,
+ DELETED,
+ REVISION
+);
+
+create index CALENDAR_OBJECT_REVIS_265c8acf on CALENDAR_OBJECT_REVISIONS (
+ CALENDAR_RESOURCE_ID,
+ REVISION
+);
+
+create index ADDRESSBOOK_OBJECT_RE_2bfcf757 on ADDRESSBOOK_OBJECT_REVISIONS (
+ ADDRESSBOOK_HOME_RESOURCE_ID,
+ OWNER_HOME_RESOURCE_ID
+);
+
+create index ADDRESSBOOK_OBJECT_RE_00fe8288 on ADDRESSBOOK_OBJECT_REVISIONS (
+ OWNER_HOME_RESOURCE_ID,
+ RESOURCE_NAME,
+ DELETED,
+ REVISION
+);
+
+create index ADDRESSBOOK_OBJECT_RE_45004780 on ADDRESSBOOK_OBJECT_REVISIONS (
+ OWNER_HOME_RESOURCE_ID,
+ REVISION
+);
+
+create index NOTIFICATION_OBJECT_R_036a9cee on NOTIFICATION_OBJECT_REVISIONS (
+ NOTIFICATION_HOME_RESOURCE_ID,
+ REVISION
+);
+
+create index APN_SUBSCRIPTIONS_RES_9610d78e on APN_SUBSCRIPTIONS (
+ RESOURCE_KEY
+);
+
+create index IMIP_TOKENS_TOKEN_e94b918f on IMIP_TOKENS (
+ TOKEN
+);
+
+create index CALENDAR_OBJECT_SPLIT_af71dcda on CALENDAR_OBJECT_SPLITTER_WORK (
+ RESOURCE_ID
+);
+
</ins></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_schemaoldpostgresdialectv33sql"></a>
<div class="addfile"><h4>Added: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/old/postgres-dialect/v33.sql (0 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/old/postgres-dialect/v33.sql         (rev 0)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/old/postgres-dialect/v33.sql        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -0,0 +1,758 @@
</span><ins>+-- -*- test-case-name: txdav.caldav.datastore.test.test_sql,txdav.carddav.datastore.test.test_sql -*-
+
+----
+-- Copyright (c) 2010-2014 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+
+-----------------
+-- Resource ID --
+-----------------
+
+create sequence RESOURCE_ID_SEQ;
+
+
+-------------------------
+-- Cluster Bookkeeping --
+-------------------------
+
+-- Information about a process connected to this database.
+
+-- Note that this must match the node info schema in twext.enterprise.queue.
+create table NODE_INFO (
+ HOSTNAME varchar(255) not null,
+ PID integer not null,
+ PORT integer not null,
+ TIME timestamp not null default timezone('UTC', CURRENT_TIMESTAMP),
+
+ primary key (HOSTNAME, PORT)
+);
+
+-- Unique named locks. This table should always be empty, but rows are
+-- temporarily created in order to prevent undesirable concurrency.
+create table NAMED_LOCK (
+ LOCK_NAME varchar(255) primary key
+);
+
+
+-------------------
+-- Calendar Home --
+-------------------
+
+create table CALENDAR_HOME (
+ RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+ OWNER_UID varchar(255) not null unique, -- implicit index
+ STATUS integer default 0 not null, -- enum HOME_STATUS
+ DATAVERSION integer default 0 not null
+);
+
+-- Enumeration of statuses
+
+create table HOME_STATUS (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into HOME_STATUS values (0, 'normal' );
+insert into HOME_STATUS values (1, 'external');
+
+
+--------------
+-- Calendar --
+--------------
+
+create table CALENDAR (
+ RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ') -- implicit index
+);
+
+
+----------------------------
+-- Calendar Home Metadata --
+----------------------------
+
+create table CALENDAR_HOME_METADATA (
+ RESOURCE_ID integer primary key references CALENDAR_HOME on delete cascade, -- implicit index
+ QUOTA_USED_BYTES integer default 0 not null,
+ DEFAULT_EVENTS integer default null references CALENDAR on delete set null,
+ DEFAULT_TASKS integer default null references CALENDAR on delete set null,
+ DEFAULT_POLLS integer default null references CALENDAR on delete set null,
+ ALARM_VEVENT_TIMED text default null,
+ ALARM_VEVENT_ALLDAY text default null,
+ ALARM_VTODO_TIMED text default null,
+ ALARM_VTODO_ALLDAY text default null,
+ AVAILABILITY text default null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+create index CALENDAR_HOME_METADATA_DEFAULT_EVENTS on
+        CALENDAR_HOME_METADATA(DEFAULT_EVENTS);
+create index CALENDAR_HOME_METADATA_DEFAULT_TASKS on
+        CALENDAR_HOME_METADATA(DEFAULT_TASKS);
+create index CALENDAR_HOME_METADATA_DEFAULT_POLLS on
+        CALENDAR_HOME_METADATA(DEFAULT_POLLS);
+
+
+-----------------------
+-- Calendar Metadata --
+-----------------------
+
+create table CALENDAR_METADATA (
+ RESOURCE_ID integer primary key references CALENDAR on delete cascade, -- implicit index
+ SUPPORTED_COMPONENTS varchar(255) default null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+---------------------------
+-- Sharing Notifications --
+---------------------------
+
+create table NOTIFICATION_HOME (
+ RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+ OWNER_UID varchar(255) not null unique, -- implicit index
+ STATUS integer default 0 not null, -- enum HOME_STATUS
+ DATAVERSION integer default 0 not null
+);
+
+create table NOTIFICATION (
+ RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+ NOTIFICATION_HOME_RESOURCE_ID integer not null references NOTIFICATION_HOME,
+ NOTIFICATION_UID varchar(255) not null,
+ NOTIFICATION_TYPE varchar(255) not null,
+ NOTIFICATION_DATA text not null,
+ MD5 char(32) not null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+
+ unique(NOTIFICATION_UID, NOTIFICATION_HOME_RESOURCE_ID) -- implicit index
+);
+
+create index NOTIFICATION_NOTIFICATION_HOME_RESOURCE_ID on
+        NOTIFICATION(NOTIFICATION_HOME_RESOURCE_ID);
+
+
+-------------------
+-- Calendar Bind --
+-------------------
+
+-- Joins CALENDAR_HOME and CALENDAR
+
+create table CALENDAR_BIND (
+ CALENDAR_HOME_RESOURCE_ID integer not null references CALENDAR_HOME,
+ CALENDAR_RESOURCE_ID integer not null references CALENDAR on delete cascade,
+ EXTERNAL_ID                         integer default null,
+ CALENDAR_RESOURCE_NAME varchar(255) not null,
+ BIND_MODE integer not null, -- enum CALENDAR_BIND_MODE
+ BIND_STATUS integer not null, -- enum CALENDAR_BIND_STATUS
+ BIND_REVISION                                integer default 0 not null,
+ MESSAGE text,
+ TRANSP integer default 0 not null, -- enum CALENDAR_TRANSP
+ ALARM_VEVENT_TIMED text default null,
+ ALARM_VEVENT_ALLDAY text default null,
+ ALARM_VTODO_TIMED text default null,
+ ALARM_VTODO_ALLDAY text default null,
+ TIMEZONE text default null,
+
+ primary key(CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_ID), -- implicit index
+ unique(CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_NAME) -- implicit index
+);
+
+create index CALENDAR_BIND_RESOURCE_ID on
+        CALENDAR_BIND(CALENDAR_RESOURCE_ID);
+
+-- Enumeration of calendar bind modes
+
+create table CALENDAR_BIND_MODE (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_BIND_MODE values (0, 'own' );
+insert into CALENDAR_BIND_MODE values (1, 'read' );
+insert into CALENDAR_BIND_MODE values (2, 'write');
+insert into CALENDAR_BIND_MODE values (3, 'direct');
+insert into CALENDAR_BIND_MODE values (4, 'indirect');
+
+-- Enumeration of statuses
+
+create table CALENDAR_BIND_STATUS (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_BIND_STATUS values (0, 'invited' );
+insert into CALENDAR_BIND_STATUS values (1, 'accepted');
+insert into CALENDAR_BIND_STATUS values (2, 'declined');
+insert into CALENDAR_BIND_STATUS values (3, 'invalid');
+insert into CALENDAR_BIND_STATUS values (4, 'deleted');
+
+
+-- Enumeration of transparency
+
+create table CALENDAR_TRANSP (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_TRANSP values (0, 'opaque' );
+insert into CALENDAR_TRANSP values (1, 'transparent');
+
+
+---------------------
+-- Calendar Object --
+---------------------
+
+create table CALENDAR_OBJECT (
+ RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+ CALENDAR_RESOURCE_ID integer not null references CALENDAR on delete cascade,
+ RESOURCE_NAME varchar(255) not null,
+ ICALENDAR_TEXT text not null,
+ ICALENDAR_UID varchar(255) not null,
+ ICALENDAR_TYPE varchar(255) not null,
+ ATTACHMENTS_MODE integer default 0 not null, -- enum CALENDAR_OBJECT_ATTACHMENTS_MODE
+ DROPBOX_ID varchar(255),
+ ORGANIZER varchar(255),
+ RECURRANCE_MIN date, -- minimum date that recurrences have been expanded to.
+ RECURRANCE_MAX date, -- maximum date that recurrences have been expanded to.
+ ACCESS integer default 0 not null,
+ SCHEDULE_OBJECT boolean default false,
+ SCHEDULE_TAG varchar(36) default null,
+ SCHEDULE_ETAGS text default null,
+ PRIVATE_COMMENTS boolean default false not null,
+ MD5 char(32) not null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+
+ unique (CALENDAR_RESOURCE_ID, RESOURCE_NAME) -- implicit index
+
+ -- since the 'inbox' is a 'calendar resource' for the purpose of storing
+ -- calendar objects, this constraint has to be selectively enforced by the
+ -- application layer.
+
+ -- unique(CALENDAR_RESOURCE_ID, ICALENDAR_UID)
+);
+
+create index CALENDAR_OBJECT_CALENDAR_RESOURCE_ID_AND_ICALENDAR_UID on
+ CALENDAR_OBJECT(CALENDAR_RESOURCE_ID, ICALENDAR_UID);
+
+create index CALENDAR_OBJECT_CALENDAR_RESOURCE_ID_RECURRANCE_MAX on
+ CALENDAR_OBJECT(CALENDAR_RESOURCE_ID, RECURRANCE_MAX);
+
+create index CALENDAR_OBJECT_ICALENDAR_UID on
+ CALENDAR_OBJECT(ICALENDAR_UID);
+
+create index CALENDAR_OBJECT_DROPBOX_ID on
+ CALENDAR_OBJECT(DROPBOX_ID);
+
+-- Enumeration of attachment modes
+
+create table CALENDAR_OBJECT_ATTACHMENTS_MODE (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_OBJECT_ATTACHMENTS_MODE values (0, 'none' );
+insert into CALENDAR_OBJECT_ATTACHMENTS_MODE values (1, 'read' );
+insert into CALENDAR_OBJECT_ATTACHMENTS_MODE values (2, 'write');
+
+
+-- Enumeration of calendar access types
+
+create table CALENDAR_ACCESS_TYPE (
+ ID integer primary key,
+ DESCRIPTION varchar(32) not null unique
+);
+
+insert into CALENDAR_ACCESS_TYPE values (0, '' );
+insert into CALENDAR_ACCESS_TYPE values (1, 'public' );
+insert into CALENDAR_ACCESS_TYPE values (2, 'private' );
+insert into CALENDAR_ACCESS_TYPE values (3, 'confidential' );
+insert into CALENDAR_ACCESS_TYPE values (4, 'restricted' );
+
+
+-----------------
+-- Instance ID --
+-----------------
+
+create sequence INSTANCE_ID_SEQ;
+
+
+----------------
+-- Time Range --
+----------------
+
+create table TIME_RANGE (
+ INSTANCE_ID integer primary key default nextval('INSTANCE_ID_SEQ'), -- implicit index
+ CALENDAR_RESOURCE_ID integer not null references CALENDAR on delete cascade,
+ CALENDAR_OBJECT_RESOURCE_ID integer not null references CALENDAR_OBJECT on delete cascade,
+ FLOATING boolean not null,
+ START_DATE timestamp not null,
+ END_DATE timestamp not null,
+ FBTYPE integer not null,
+ TRANSPARENT boolean not null
+);
+
+create index TIME_RANGE_CALENDAR_RESOURCE_ID on
+ TIME_RANGE(CALENDAR_RESOURCE_ID);
+create index TIME_RANGE_CALENDAR_OBJECT_RESOURCE_ID on
+ TIME_RANGE(CALENDAR_OBJECT_RESOURCE_ID);
+
+
+-- Enumeration of free/busy types
+
+create table FREE_BUSY_TYPE (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into FREE_BUSY_TYPE values (0, 'unknown' );
+insert into FREE_BUSY_TYPE values (1, 'free' );
+insert into FREE_BUSY_TYPE values (2, 'busy' );
+insert into FREE_BUSY_TYPE values (3, 'busy-unavailable');
+insert into FREE_BUSY_TYPE values (4, 'busy-tentative' );
+
+
+------------------
+-- Transparency --
+------------------
+
+create table TRANSPARENCY (
+ TIME_RANGE_INSTANCE_ID integer not null references TIME_RANGE on delete cascade,
+ USER_ID varchar(255) not null,
+ TRANSPARENT boolean not null
+);
+
+create index TRANSPARENCY_TIME_RANGE_INSTANCE_ID on
+ TRANSPARENCY(TIME_RANGE_INSTANCE_ID);
+
+
+----------------
+-- Attachment --
+----------------
+
+create sequence ATTACHMENT_ID_SEQ;
+
+create table ATTACHMENT (
+ ATTACHMENT_ID integer primary key default nextval('ATTACHMENT_ID_SEQ'), -- implicit index
+ CALENDAR_HOME_RESOURCE_ID integer not null references CALENDAR_HOME,
+ DROPBOX_ID varchar(255),
+ CONTENT_TYPE varchar(255) not null,
+ SIZE integer not null,
+ MD5 char(32) not null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ PATH varchar(1024) not null
+);
+
+create index ATTACHMENT_CALENDAR_HOME_RESOURCE_ID on
+ ATTACHMENT(CALENDAR_HOME_RESOURCE_ID);
+
+create index ATTACHMENT_DROPBOX_ID on
+ ATTACHMENT(DROPBOX_ID);
+
+-- Many-to-many relationship between attachments and calendar objects
+create table ATTACHMENT_CALENDAR_OBJECT (
+ ATTACHMENT_ID integer not null references ATTACHMENT on delete cascade,
+ MANAGED_ID varchar(255) not null,
+ CALENDAR_OBJECT_RESOURCE_ID integer not null references CALENDAR_OBJECT on delete cascade,
+
+ primary key (ATTACHMENT_ID, CALENDAR_OBJECT_RESOURCE_ID), -- implicit index
+ unique (MANAGED_ID, CALENDAR_OBJECT_RESOURCE_ID) --implicit index
+);
+
+create index ATTACHMENT_CALENDAR_OBJECT_CALENDAR_OBJECT_RESOURCE_ID on
+        ATTACHMENT_CALENDAR_OBJECT(CALENDAR_OBJECT_RESOURCE_ID);
+
+-----------------------
+-- Resource Property --
+-----------------------
+
+create table RESOURCE_PROPERTY (
+ RESOURCE_ID integer not null, -- foreign key: *.RESOURCE_ID
+ NAME varchar(255) not null,
+ VALUE text not null, -- FIXME: xml?
+ VIEWER_UID varchar(255),
+
+ primary key (RESOURCE_ID, NAME, VIEWER_UID) -- implicit index
+);
+
+
+----------------------
+-- AddressBook Home --
+----------------------
+
+create table ADDRESSBOOK_HOME (
+ RESOURCE_ID                                 integer                        primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+ ADDRESSBOOK_PROPERTY_STORE_ID        integer         default nextval('RESOURCE_ID_SEQ') not null,         -- implicit index
+ OWNER_UID                                 varchar(255)         not null unique, -- implicit index
+ STATUS                                 integer         default 0 not null, -- enum HOME_STATUS
+ DATAVERSION                                 integer         default 0 not null
+);
+
+
+-------------------------------
+-- AddressBook Home Metadata --
+-------------------------------
+
+create table ADDRESSBOOK_HOME_METADATA (
+ RESOURCE_ID integer primary key references ADDRESSBOOK_HOME on delete cascade, -- implicit index
+ QUOTA_USED_BYTES integer default 0 not null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+-----------------------------
+-- Shared AddressBook Bind --
+-----------------------------
+
+-- Joins sharee ADDRESSBOOK_HOME and owner ADDRESSBOOK_HOME
+
+create table SHARED_ADDRESSBOOK_BIND (
+ ADDRESSBOOK_HOME_RESOURCE_ID                        integer                        not null references ADDRESSBOOK_HOME,
+ OWNER_HOME_RESOURCE_ID                         integer         not null references ADDRESSBOOK_HOME on delete cascade,
+ EXTERNAL_ID                         integer default null,
+ ADDRESSBOOK_RESOURCE_NAME                         varchar(255)         not null,
+ BIND_MODE                         integer         not null,        -- enum CALENDAR_BIND_MODE
+ BIND_STATUS                         integer         not null,        -- enum CALENDAR_BIND_STATUS
+ BIND_REVISION                                                         integer         default 0 not null,
+ MESSAGE                         text,                 -- FIXME: xml?
+
+ primary key (ADDRESSBOOK_HOME_RESOURCE_ID, OWNER_HOME_RESOURCE_ID), -- implicit index
+ unique (ADDRESSBOOK_HOME_RESOURCE_ID, ADDRESSBOOK_RESOURCE_NAME) -- implicit index
+);
+
+create index SHARED_ADDRESSBOOK_BIND_RESOURCE_ID on
+ SHARED_ADDRESSBOOK_BIND(OWNER_HOME_RESOURCE_ID);
+
+
+------------------------
+-- AddressBook Object --
+------------------------
+
+create table ADDRESSBOOK_OBJECT (
+ RESOURCE_ID                 integer                 primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+ ADDRESSBOOK_HOME_RESOURCE_ID         integer         not null references ADDRESSBOOK_HOME on delete cascade,
+ RESOURCE_NAME                 varchar(255)         not null,
+ VCARD_TEXT                 text         not null,
+ VCARD_UID                 varchar(255)         not null,
+ KIND                                                           integer         not null, -- enum ADDRESSBOOK_OBJECT_KIND
+ MD5                 char(32)         not null,
+ CREATED                 timestamp         default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED                 timestamp         default timezone('UTC', CURRENT_TIMESTAMP),
+
+ unique (ADDRESSBOOK_HOME_RESOURCE_ID, RESOURCE_NAME), -- implicit index
+ unique (ADDRESSBOOK_HOME_RESOURCE_ID, VCARD_UID) -- implicit index
+);
+
+
+-----------------------------
+-- AddressBook Object kind --
+-----------------------------
+
+create table ADDRESSBOOK_OBJECT_KIND (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into ADDRESSBOOK_OBJECT_KIND values (0, 'person');
+insert into ADDRESSBOOK_OBJECT_KIND values (1, 'group' );
+insert into ADDRESSBOOK_OBJECT_KIND values (2, 'resource');
+insert into ADDRESSBOOK_OBJECT_KIND values (3, 'location');
+
+
+----------------------------------
+-- Revisions, forward reference --
+----------------------------------
+
+create sequence REVISION_SEQ;
+
+---------------------------------
+-- Address Book Object Members --
+---------------------------------
+
+create table ABO_MEMBERS (
+ GROUP_ID                 integer                not null, -- references ADDRESSBOOK_OBJECT on delete cascade,        -- AddressBook Object's (kind=='group') RESOURCE_ID
+         ADDRESSBOOK_ID        integer                not null references ADDRESSBOOK_HOME on delete cascade,
+ MEMBER_ID         integer                not null, -- references ADDRESSBOOK_OBJECT,                                                -- member AddressBook Object's RESOURCE_ID
+         REVISION integer         default nextval('REVISION_SEQ') not null,
+         REMOVED         boolean                default false not null,
+        MODIFIED timestamp        default timezone('UTC', CURRENT_TIMESTAMP),
+
+ primary key (GROUP_ID, MEMBER_ID, REVISION) -- implicit index
+);
+
+create index ABO_MEMBERS_ADDRESSBOOK_ID on
+        ABO_MEMBERS(ADDRESSBOOK_ID);
+create index ABO_MEMBERS_MEMBER_ID on
+        ABO_MEMBERS(MEMBER_ID);
+
+------------------------------------------
+-- Address Book Object Foreign Members --
+------------------------------------------
+
+create table ABO_FOREIGN_MEMBERS (
+ GROUP_ID integer not null references ADDRESSBOOK_OBJECT on delete cascade,        -- AddressBook Object's (kind=='group') RESOURCE_ID
+         ADDRESSBOOK_ID                 integer not null references ADDRESSBOOK_HOME on delete cascade,
+ MEMBER_ADDRESS          varchar(255) not null,                                                                                                         -- member AddressBook Object's 'calendar' address
+
+ primary key (GROUP_ID, MEMBER_ADDRESS) -- implicit index
+);
+
+create index ABO_FOREIGN_MEMBERS_ADDRESSBOOK_ID on
+        ABO_FOREIGN_MEMBERS(ADDRESSBOOK_ID);
+
+-----------------------
+-- Shared Group Bind --
+-----------------------
+
+-- Joins ADDRESSBOOK_HOME and ADDRESSBOOK_OBJECT (kind == group)
+
+create table SHARED_GROUP_BIND (        
+ ADDRESSBOOK_HOME_RESOURCE_ID                 integer not null references ADDRESSBOOK_HOME,
+ GROUP_RESOURCE_ID                         integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+ EXTERNAL_ID                         integer default null,
+ GROUP_ADDRESSBOOK_NAME                        varchar(255) not null,
+ BIND_MODE                 integer not null, -- enum CALENDAR_BIND_MODE
+ BIND_STATUS                 integer not null, -- enum CALENDAR_BIND_STATUS
+ BIND_REVISION                                                 integer default 0 not null,
+ MESSAGE                 text, -- FIXME: xml?
+
+ primary key (ADDRESSBOOK_HOME_RESOURCE_ID, GROUP_RESOURCE_ID), -- implicit index
+ unique (ADDRESSBOOK_HOME_RESOURCE_ID, GROUP_ADDRESSBOOK_NAME) -- implicit index
+);
+
+create index SHARED_GROUP_BIND_RESOURCE_ID on
+ SHARED_GROUP_BIND(GROUP_RESOURCE_ID);
+
+
+---------------
+-- Revisions --
+---------------
+
+-- create sequence REVISION_SEQ;
+
+
+-------------------------------
+-- Calendar Object Revisions --
+-------------------------------
+
+create table CALENDAR_OBJECT_REVISIONS (
+ CALENDAR_HOME_RESOURCE_ID integer not null references CALENDAR_HOME,
+ CALENDAR_RESOURCE_ID integer references CALENDAR,
+ CALENDAR_NAME varchar(255) default null,
+ RESOURCE_NAME varchar(255),
+ REVISION integer default nextval('REVISION_SEQ') not null,
+ DELETED boolean not null,
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+create index CALENDAR_OBJECT_REVISIONS_HOME_RESOURCE_ID_CALENDAR_RESOURCE_ID
+ on CALENDAR_OBJECT_REVISIONS(CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_ID);
+
+create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_RESOURCE_NAME_DELETED_REVISION
+ on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, RESOURCE_NAME, DELETED, REVISION);
+
+create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_REVISION
+ on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, REVISION);
+
+
+----------------------------------
+-- AddressBook Object Revisions --
+----------------------------------
+
+create table ADDRESSBOOK_OBJECT_REVISIONS (
+ ADDRESSBOOK_HOME_RESOURCE_ID         integer                 not null references ADDRESSBOOK_HOME,
+ OWNER_HOME_RESOURCE_ID         integer references ADDRESSBOOK_HOME,
+ ADDRESSBOOK_NAME         varchar(255) default null,
+ OBJECT_RESOURCE_ID                        integer                 default 0,
+ RESOURCE_NAME         varchar(255),
+ REVISION         integer default nextval('REVISION_SEQ') not null,
+ DELETED         boolean          not null,
+ MODIFIED         timestamp          default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_HOME_RESOURCE_ID_OWNER_HOME_RESOURCE_ID
+ on ADDRESSBOOK_OBJECT_REVISIONS(ADDRESSBOOK_HOME_RESOURCE_ID, OWNER_HOME_RESOURCE_ID);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_OWNER_HOME_RESOURCE_ID_RESOURCE_NAME_DELETED_REVISION
+ on ADDRESSBOOK_OBJECT_REVISIONS(OWNER_HOME_RESOURCE_ID, RESOURCE_NAME, DELETED, REVISION);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_OWNER_HOME_RESOURCE_ID_REVISION
+ on ADDRESSBOOK_OBJECT_REVISIONS(OWNER_HOME_RESOURCE_ID, REVISION);
+
+
+-----------------------------------
+-- Notification Object Revisions --
+-----------------------------------
+
+create table NOTIFICATION_OBJECT_REVISIONS (
+ NOTIFICATION_HOME_RESOURCE_ID integer not null references NOTIFICATION_HOME on delete cascade,
+ RESOURCE_NAME varchar(255),
+ REVISION integer default nextval('REVISION_SEQ') not null,
+ DELETED boolean not null,
+ MODIFIED         timestamp         default timezone('UTC', CURRENT_TIMESTAMP),
+
+ unique(NOTIFICATION_HOME_RESOURCE_ID, RESOURCE_NAME) -- implicit index
+);
+
+create index NOTIFICATION_OBJECT_REVISIONS_RESOURCE_ID_REVISION
+ on NOTIFICATION_OBJECT_REVISIONS(NOTIFICATION_HOME_RESOURCE_ID, REVISION);
+
+
+-------------------------------------------
+-- Apple Push Notification Subscriptions --
+-------------------------------------------
+
+create table APN_SUBSCRIPTIONS (
+ TOKEN varchar(255) not null,
+ RESOURCE_KEY varchar(255) not null,
+ MODIFIED integer not null,
+ SUBSCRIBER_GUID varchar(255) not null,
+ USER_AGENT varchar(255) default null,
+ IP_ADDR varchar(255) default null,
+
+ primary key (TOKEN, RESOURCE_KEY) -- implicit index
+);
+
+create index APN_SUBSCRIPTIONS_RESOURCE_KEY
+ on APN_SUBSCRIPTIONS(RESOURCE_KEY);
+
+
+-----------------
+-- IMIP Tokens --
+-----------------
+
+create table IMIP_TOKENS (
+ TOKEN varchar(255) not null,
+ ORGANIZER varchar(255) not null,
+ ATTENDEE varchar(255) not null,
+ ICALUID varchar(255) not null,
+ ACCESSED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+
+ primary key (ORGANIZER, ATTENDEE, ICALUID) -- implicit index
+);
+
+create index IMIP_TOKENS_TOKEN
+ on IMIP_TOKENS(TOKEN);
+
+
+----------------
+-- Work Items --
+----------------
+
+create sequence WORKITEM_SEQ;
+
+
+---------------------------
+-- IMIP Inivitation Work --
+---------------------------
+
+create table IMIP_INVITATION_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+ NOT_BEFORE timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ FROM_ADDR varchar(255) not null,
+ TO_ADDR varchar(255) not null,
+ ICALENDAR_TEXT text not null
+);
+
+
+-----------------------
+-- IMIP Polling Work --
+-----------------------
+
+create table IMIP_POLLING_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+ NOT_BEFORE timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+---------------------
+-- IMIP Reply Work --
+---------------------
+
+create table IMIP_REPLY_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+ NOT_BEFORE timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ ORGANIZER varchar(255) not null,
+ ATTENDEE varchar(255) not null,
+ ICALENDAR_TEXT text not null
+);
+
+
+------------------------
+-- Push Notifications --
+------------------------
+
+create table PUSH_NOTIFICATION_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+ NOT_BEFORE timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ PUSH_ID varchar(255) not null,
+ PRIORITY integer not null -- 1:low 5:medium 10:high
+);
+
+-----------------
+-- GroupCacher --
+-----------------
+
+create table GROUP_CACHER_POLLING_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+ NOT_BEFORE timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+--------------------------
+-- Object Splitter Work --
+--------------------------
+
+create table CALENDAR_OBJECT_SPLITTER_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+ NOT_BEFORE timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ RESOURCE_ID integer not null references CALENDAR_OBJECT on delete cascade
+);
+
+create index CALENDAR_OBJECT_SPLITTER_WORK_RESOURCE_ID on
+        CALENDAR_OBJECT_SPLITTER_WORK(RESOURCE_ID);
+
+---------------------------
+-- Revision Cleaner Work --
+---------------------------
+
+create table FIND_MIN_VALID_REVISION_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+ NOT_BEFORE timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+create table REVISION_CLEANUP_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+ NOT_BEFORE timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+--------------------
+-- Schema Version --
+--------------------
+
+create table CALENDARSERVER (
+ NAME varchar(255) primary key, -- implicit index
+ VALUE varchar(255)
+);
+
+insert into CALENDARSERVER values ('VERSION', '33');
+insert into CALENDARSERVER values ('CALENDAR-DATAVERSION', '5');
+insert into CALENDARSERVER values ('ADDRESSBOOK-DATAVERSION', '2');
+insert into CALENDARSERVER values ('NOTIFICATION-DATAVERSION', '1');
+insert into CALENDARSERVER values ('MIN-VALID-REVISION', '1');
</ins></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_schemaupgradesoracledialectupgrade_from_32_to_33sql"></a>
<div class="modfile"><h4>Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_32_to_33.sql (12417 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_32_to_33.sql        2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_32_to_33.sql        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -15,7 +15,7 @@
</span><span class="cx"> ----
</span><span class="cx">
</span><span class="cx"> ---------------------------------------------------
</span><del>--- Upgrade database schema from VERSION 30 to 31 --
</del><ins>+-- Upgrade database schema from VERSION 32 to 33 --
</ins><span class="cx"> ---------------------------------------------------
</span><span class="cx">
</span><span class="cx"> -- Add timestamp to revision tables --
</span><span class="lines">@@ -42,4 +42,4 @@
</span><span class="cx">
</span><span class="cx"> -- Update version --
</span><span class="cx">
</span><del>-update CALENDARSERVER set VALUE = '31' where NAME = 'VERSION';
</del><ins>+update CALENDARSERVER set VALUE = '33' where NAME = 'VERSION';
</ins></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_schemaupgradesoracledialectupgrade_from_33_to_34sql"></a>
<div class="addfile"><h4>Added: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_33_to_34.sql (0 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_33_to_34.sql         (rev 0)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_33_to_34.sql        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -0,0 +1,58 @@
</span><ins>+----
+-- Copyright (c) 2012-2013 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+---------------------------------------------------
+-- Upgrade database schema from VERSION 33 to 34 --
+---------------------------------------------------
+
+-- Add timestamp to revision tables --
+
+-- Add migration column to tables --
+
+alter table CALENDAR_HOME
+ drop unique ("OWNER_UID");
+alter table CALENDAR_HOME
+ add ("MIGRATION" integer default 0 not null);
+alter table CALENDAR_HOME
+ add unique(OWNER_UID, MIGRATION);
+
+alter table NOTIFICATION_HOME
+ drop unique ("OWNER_UID");
+alter table NOTIFICATION_HOME
+ add ("MIGRATION" integer default 0 not null);
+alter table NOTIFICATION_HOME
+ add unique(OWNER_UID, MIGRATION);
+
+alter table ADDRESSBOOK_HOME
+ drop unique ("OWNER_UID");
+alter table ADDRESSBOOK_HOME
+ add ("MIGRATION" integer default 0 not null);
+alter table ADDRESSBOOK_HOME
+ add unique(OWNER_UID, MIGRATION);
+
+
+create table MIGRATION_STATUS (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into MIGRATION_STATUS (DESCRIPTION, ID) values ('none', 0);
+insert into MIGRATION_STATUS (DESCRIPTION, ID) values ('migrating', 1);
+insert into MIGRATION_STATUS (DESCRIPTION, ID) values ('migrated', 2);
+
+-- Update version --
+
+update CALENDARSERVER set VALUE = '34' where NAME = 'VERSION';
</ins></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_schemaupgradespostgresdialectupgrade_from_32_to_33sql"></a>
<div class="modfile"><h4>Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_32_to_33.sql (12417 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_32_to_33.sql        2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_32_to_33.sql        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -15,7 +15,7 @@
</span><span class="cx"> ----
</span><span class="cx">
</span><span class="cx"> ---------------------------------------------------
</span><del>--- Upgrade database schema from VERSION 30 to 31 --
</del><ins>+-- Upgrade database schema from VERSION 32 to 33 --
</ins><span class="cx"> ---------------------------------------------------
</span><span class="cx">
</span><span class="cx"> -- Add timestamp to revision tables --
</span></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_schemaupgradespostgresdialectupgrade_from_33_to_34sql"></a>
<div class="addfile"><h4>Added: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_33_to_34.sql (0 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_33_to_34.sql         (rev 0)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_33_to_34.sql        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -0,0 +1,50 @@
</span><ins>+----
+-- Copyright (c) 2012-2013 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+---------------------------------------------------
+-- Upgrade database schema from VERSION 33 to 34 --
+---------------------------------------------------
+
+-- Add migration column to tables --
+
+alter table CALENDAR_HOME
+ drop constraint CALENDAR_HOME_OWNER_UID_KEY,
+ add column MIGRATION integer default 0 not null,
+ add unique(OWNER_UID, MIGRATION);
+
+alter table NOTIFICATION_HOME
+ drop constraint NOTIFICATION_HOME_OWNER_UID_KEY,
+ add column MIGRATION integer default 0 not null,
+ add unique(OWNER_UID, MIGRATION);
+
+alter table ADDRESSBOOK_HOME
+ drop constraint ADDRESSBOOK_HOME_OWNER_UID_KEY,
+ add column MIGRATION integer default 0 not null,
+ add unique(OWNER_UID, MIGRATION);
+
+
+create table MIGRATION_STATUS (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into MIGRATION_STATUS values (0, 'none' );
+insert into MIGRATION_STATUS values (1, 'migrating');
+insert into MIGRATION_STATUS values (2, 'migrated');
+
+-- Update version --
+
+update CALENDARSERVER set VALUE = '34' where NAME = 'VERSION';
</ins></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoresql_tablespy"></a>
<div class="modfile"><h4>Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_tables.py (12417 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_tables.py        2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/sql_tables.py        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -138,10 +138,20 @@
</span><span class="cx"> schema.HOME_STATUS.ID
</span><span class="cx"> )
</span><span class="cx">
</span><del>-
</del><span class="cx"> _HOME_STATUS_NORMAL = _homeStatus('normal')
</span><span class="cx"> _HOME_STATUS_EXTERNAL = _homeStatus('external')
</span><span class="cx">
</span><ins>+
+_migrationStatus = _schemaConstants(
+ schema.MIGRATION_STATUS.DESCRIPTION,
+ schema.MIGRATION_STATUS.ID
+)
+
+_MIGRATION_STATUS_NONE = _migrationStatus('none')
+_MIGRATION_STATUS_MIGRATING = _migrationStatus('migrating')
+_MIGRATION_STATUS_MIGRATED = _migrationStatus('migrated')
+
+
</ins><span class="cx"> _bindStatus = _schemaConstants(
</span><span class="cx"> schema.CALENDAR_BIND_STATUS.DESCRIPTION,
</span><span class="cx"> schema.CALENDAR_BIND_STATUS.ID
</span></span></pre></div>
<a id="CalendarServerbranchesuserscdaboopodmigrationtxdavcommondatastoretestutilpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/test/util.py (12417 => 12418)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/test/util.py        2014-01-21 22:34:12 UTC (rev 12417)
+++ CalendarServer/branches/users/cdaboo/pod-migration/txdav/common/datastore/test/util.py        2014-01-22 04:07:03 UTC (rev 12418)
</span><span class="lines">@@ -60,7 +60,7 @@
</span><span class="cx"> from txdav.base.propertystore.base import PropertyName
</span><span class="cx"> from txdav.caldav.icalendarstore import ComponentUpdateState
</span><span class="cx"> from txdav.common.datastore.sql import CommonDataStore, current_sql_schema
</span><del>-from txdav.common.datastore.sql_tables import schema
</del><ins>+from txdav.common.datastore.sql_tables import schema, _MIGRATION_STATUS_NONE
</ins><span class="cx"> from txdav.common.icommondatastore import NoSuchHomeChildError
</span><span class="cx">
</span><span class="cx"> from zope.interface.exceptions import BrokenMethodImplementation, \
</span><span class="lines">@@ -342,10 +342,8 @@
</span><span class="cx"> yield cleanupTxn.commit()
</span><span class="cx">
</span><span class="cx"> # Deal with memcached items that must be cleared
</span><del>- from txdav.caldav.datastore.sql import CalendarHome
- CalendarHome._cacher.flushAll()
- from txdav.carddav.datastore.sql import AddressBookHome
- AddressBookHome._cacher.flushAll()
</del><ins>+ if storeToClean.queryCacher:
+ storeToClean.queryCacher.flushAll()
</ins><span class="cx"> from txdav.base.propertystore.sql import PropertyStore
</span><span class="cx"> PropertyStore._cacher.flushAll()
</span><span class="cx">
</span><span class="lines">@@ -723,13 +721,13 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><del>- def homeUnderTest(self, txn=None, name="home1", create=False):
</del><ins>+ def homeUnderTest(self, txn=None, name="home1", create=False, migration=_MIGRATION_STATUS_NONE):
</ins><span class="cx"> """
</span><span class="cx"> Get the calendar home detailed by C{requirements['home1']}.
</span><span class="cx"> """
</span><span class="cx"> if txn is None:
</span><span class="cx"> txn = self.transactionUnderTest()
</span><del>- returnValue((yield txn.calendarHomeWithUID(name, create=create)))
</del><ins>+ returnValue((yield txn.calendarHomeWithUID(name, create=create, migration=migration)))
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="lines">@@ -752,13 +750,13 @@
</span><span class="cx"> .calendarObjectWithName(name)))
</span><span class="cx">
</span><span class="cx">
</span><del>- def addressbookHomeUnderTest(self, txn=None, name="home1"):
</del><ins>+ def addressbookHomeUnderTest(self, txn=None, name="home1", create=False, migration=_MIGRATION_STATUS_NONE):
</ins><span class="cx"> """
</span><span class="cx"> Get the addressbook home detailed by C{requirements['home1']}.
</span><span class="cx"> """
</span><span class="cx"> if txn is None:
</span><span class="cx"> txn = self.transactionUnderTest()
</span><del>- return txn.addressbookHomeWithUID(name)
</del><ins>+ return txn.addressbookHomeWithUID(name, create=create, migration=migration)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span></span></pre>
</div>
</div>
</body>
</html>