<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>[14522] CalendarServer/trunk</title>
</head>
<body>

<style type="text/css"><!--
#msg dl.meta { border: 1px #006 solid; background: #369; padding: 6px; color: #fff; }
#msg dl.meta dt { float: left; width: 6em; font-weight: bold; }
#msg dt:after { content:':';}
#msg dl, #msg dt, #msg ul, #msg li, #header, #footer, #logmsg { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt;  }
#msg dl a { font-weight: bold}
#msg dl a:link    { color:#fc3; }
#msg dl a:active  { color:#ff0; }
#msg dl a:visited { color:#cc6; }
h3 { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt; font-weight: bold; }
#msg pre { overflow: auto; background: #ffc; border: 1px #fa0 solid; padding: 6px; }
#logmsg { background: #ffc; border: 1px #fa0 solid; padding: 1em 1em 0 1em; }
#logmsg p, #logmsg pre, #logmsg blockquote { margin: 0 0 1em 0; }
#logmsg p, #logmsg li, #logmsg dt, #logmsg dd { line-height: 14pt; }
#logmsg h1, #logmsg h2, #logmsg h3, #logmsg h4, #logmsg h5, #logmsg h6 { margin: .5em 0; }
#logmsg h1:first-child, #logmsg h2:first-child, #logmsg h3:first-child, #logmsg h4:first-child, #logmsg h5:first-child, #logmsg h6:first-child { margin-top: 0; }
#logmsg ul, #logmsg ol { padding: 0; list-style-position: inside; margin: 0 0 0 1em; }
#logmsg ul { text-indent: -1em; padding-left: 1em; }#logmsg ol { text-indent: -1.5em; padding-left: 1.5em; }
#logmsg > ul, #logmsg > ol { margin: 0 0 1em 0; }
#logmsg pre { background: #eee; padding: 1em; }
#logmsg blockquote { border: 1px solid #fa0; border-left-width: 10px; padding: 1em 1em 0 1em; background: white;}
#logmsg dl { margin: 0; }
#logmsg dt { font-weight: bold; }
#logmsg dd { margin: 0; padding: 0 0 0.5em 0; }
#logmsg dd:before { content:'\00bb';}
#logmsg table { border-spacing: 0px; border-collapse: collapse; border-top: 4px solid #fa0; border-bottom: 1px solid #fa0; background: #fff; }
#logmsg table th { text-align: left; font-weight: normal; padding: 0.2em 0.5em; border-top: 1px dotted #fa0; }
#logmsg table td { text-align: right; border-top: 1px dotted #fa0; padding: 0.2em 0.5em; }
#logmsg table thead th { text-align: center; border-bottom: 1px solid #fa0; }
#logmsg table th.Corner { text-align: left; }
#logmsg hr { border: none 0; border-top: 2px dashed #fa0; height: 1px; }
#header, #footer { color: #fff; background: #636; border: 1px #300 solid; padding: 6px; }
#patch { width: 100%; }
#patch h4 {font-family: verdana,arial,helvetica,sans-serif;font-size:10pt;padding:8px;background:#369;color:#fff;margin:0;}
#patch .propset h4, #patch .binary h4 {margin:0;}
#patch pre {padding:0;line-height:1.2em;margin:0;}
#patch .diff {width:100%;background:#eee;padding: 0 0 10px 0;overflow:auto;}
#patch .propset .diff, #patch .binary .diff  {padding:10px 0;}
#patch span {display:block;padding:0 10px;}
#patch .modfile, #patch .addfile, #patch .delfile, #patch .propset, #patch .binary, #patch .copfile {border:1px solid #ccc;margin:10px 0;}
#patch ins {background:#dfd;text-decoration:none;display:block;padding:0 10px;}
#patch del {background:#fdd;text-decoration:none;display:block;padding:0 10px;}
#patch .lines, .info {color:#888;background:#fff;}
--></style>
<div id="msg">
<dl class="meta">
<dt>Revision</dt> <dd><a href="http://trac.calendarserver.org//changeset/14522">14522</a></dd>
<dt>Author</dt> <dd>cdaboo@apple.com</dd>
<dt>Date</dt> <dd>2015-03-06 13:42:49 -0800 (Fri, 06 Mar 2015)</dd>
</dl>

<h3>Log Message</h3>
<pre>Merge pod-2-pod branch to trunk.</pre>

<h3>Modified Paths</h3>
<ul>
<li><a href="#CalendarServertrunkcalendarserverpushapplepushpy">CalendarServer/trunk/calendarserver/push/applepush.py</a></li>
<li><a href="#CalendarServertrunkcalendarserverpushtesttest_applepushpy">CalendarServer/trunk/calendarserver/push/test/test_applepush.py</a></li>
<li><a href="#CalendarServertrunkcalendarserverpushtesttest_notifierpy">CalendarServer/trunk/calendarserver/push/test/test_notifier.py</a></li>
<li><a href="#CalendarServertrunkcalendarservertoolsexportpy">CalendarServer/trunk/calendarserver/tools/export.py</a></li>
<li><a href="#CalendarServertrunkcalendarservertoolsprincipalspy">CalendarServer/trunk/calendarserver/tools/principals.py</a></li>
<li><a href="#CalendarServertrunkcalendarservertoolspurgepy">CalendarServer/trunk/calendarserver/tools/purge.py</a></li>
<li><a href="#CalendarServertrunkcalendarservertoolspushpy">CalendarServer/trunk/calendarserver/tools/push.py</a></li>
<li><a href="#CalendarServertrunkcalendarserverwebadminworkpy">CalendarServer/trunk/calendarserver/webadmin/work.py</a></li>
<li><a href="#CalendarServertrunkrequirementsstabletxt">CalendarServer/trunk/requirements-stable.txt</a></li>
<li><a href="#CalendarServertrunktwistedcaldavresourcepy">CalendarServer/trunk/twistedcaldav/resource.py</a></li>
<li><a href="#CalendarServertrunktwistedcaldavstdconfigpy">CalendarServer/trunk/twistedcaldav/stdconfig.py</a></li>
<li><a href="#CalendarServertrunktwistedcaldavtesttest_wrappingpy">CalendarServer/trunk/twistedcaldav/test/test_wrapping.py</a></li>
<li><a href="#CalendarServertrunktxdavbasedatastoreutilpy">CalendarServer/trunk/txdav/base/datastore/util.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreschedulingimipinboundpy">CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/inbound.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreschedulingimipoutboundpy">CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/outbound.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreschedulingimiptesttest_inboundpy">CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_inbound.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreschedulingimiptesttest_mailgatewaypy">CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_mailgateway.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreschedulingimiptesttest_outboundpy">CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_outbound.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreschedulingischeduledeliverypy">CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/delivery.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreschedulingtesttest_workpy">CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_work.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreschedulingworkpy">CalendarServer/trunk/txdav/caldav/datastore/scheduling/work.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoresqlpy">CalendarServer/trunk/txdav/caldav/datastore/sql.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoresql_externalpy">CalendarServer/trunk/txdav/caldav/datastore/sql_external.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoretestcommonpy">CalendarServer/trunk/txdav/caldav/datastore/test/common.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoretesttest_attachmentspy">CalendarServer/trunk/txdav/caldav/datastore/test/test_attachments.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoretesttest_sqlpy">CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoretesttest_sql_sharingpy">CalendarServer/trunk/txdav/caldav/datastore/test/test_sql_sharing.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreutilpy">CalendarServer/trunk/txdav/caldav/datastore/util.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavicalendarstorepy">CalendarServer/trunk/txdav/caldav/icalendarstore.py</a></li>
<li><a href="#CalendarServertrunktxdavcarddavdatastoresqlpy">CalendarServer/trunk/txdav/carddav/datastore/sql.py</a></li>
<li><a href="#CalendarServertrunktxdavcarddavdatastoresql_externalpy">CalendarServer/trunk/txdav/carddav/datastore/sql_external.py</a></li>
<li><a href="#CalendarServertrunktxdavcarddavdatastoretesttest_sqlpy">CalendarServer/trunk/txdav/carddav/datastore/test/test_sql.py</a></li>
<li><a href="#CalendarServertrunktxdavcarddavdatastoretesttest_sql_sharingpy">CalendarServer/trunk/txdav/carddav/datastore/test/test_sql_sharing.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorefilepy">CalendarServer/trunk/txdav/common/datastore/file.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingattachmentspy">CalendarServer/trunk/txdav/common/datastore/podding/attachments.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingconduitpy">CalendarServer/trunk/txdav/common/datastore/podding/conduit.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingdirectorypy">CalendarServer/trunk/txdav/common/datastore/podding/directory.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingrequestpy">CalendarServer/trunk/txdav/common/datastore/podding/request.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingresourcepy">CalendarServer/trunk/txdav/common/datastore/podding/resource.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingsharing_invitespy">CalendarServer/trunk/txdav/common/datastore/podding/sharing_invites.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingstore_apipy">CalendarServer/trunk/txdav/common/datastore/podding/store_api.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingtesttest_conduitpy">CalendarServer/trunk/txdav/common/datastore/podding/test/test_conduit.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingtesttest_store_apipy">CalendarServer/trunk/txdav/common/datastore/podding/test/test_store_api.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingtestutilpy">CalendarServer/trunk/txdav/common/datastore/podding/test/util.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresqlpy">CalendarServer/trunk/txdav/common/datastore/sql.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_externalpy">CalendarServer/trunk/txdav/common/datastore/sql_external.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_schemacurrentoracledialectsql">CalendarServer/trunk/txdav/common/datastore/sql_schema/current-oracle-dialect.sql</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_schemacurrentsql">CalendarServer/trunk/txdav/common/datastore/sql_schema/current.sql</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_tablespy">CalendarServer/trunk/txdav/common/datastore/sql_tables.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoretesttest_sqlpy">CalendarServer/trunk/txdav/common/datastore/test/test_sql.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoretestutilpy">CalendarServer/trunk/txdav/common/datastore/test/util.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoreupgradesqlupgradescalendar_upgrade_from_2_to_3py">CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_2_to_3.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoreupgradesqlupgradestesttest_notification_upgrade_from_0_to_1py">CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/test/test_notification_upgrade_from_0_to_1.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoreworktesttest_revision_cleanuppy">CalendarServer/trunk/txdav/common/datastore/work/test/test_revision_cleanup.py</a></li>
<li><a href="#CalendarServertrunktxdavcommonicommondatastorepy">CalendarServer/trunk/txdav/common/icommondatastore.py</a></li>
<li><a href="#CalendarServertrunktxdavwhodelegatespy">CalendarServer/trunk/txdav/who/delegates.py</a></li>
<li><a href="#CalendarServertrunktxdavwhogroupspy">CalendarServer/trunk/txdav/who/groups.py</a></li>
<li><a href="#CalendarServertrunktxdavwhotesttest_delegatespy">CalendarServer/trunk/txdav/who/test/test_delegates.py</a></li>
<li><a href="#CalendarServertrunktxdavwhotesttest_group_attendeespy">CalendarServer/trunk/txdav/who/test/test_group_attendees.py</a></li>
<li><a href="#CalendarServertrunktxdavwhotesttest_group_shareespy">CalendarServer/trunk/txdav/who/test/test_group_sharees.py</a></li>
<li><a href="#CalendarServertrunktxdavwhotesttest_groupspy">CalendarServer/trunk/txdav/who/test/test_groups.py</a></li>
</ul>

<h3>Added Paths</h3>
<ul>
<li><a href="#CalendarServertrunkcalendarservertoolspod_migrationpy">CalendarServer/trunk/calendarserver/tools/pod_migration.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoresql_attachmentpy">CalendarServer/trunk/txdav/caldav/datastore/sql_attachment.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoresql_directorypy">CalendarServer/trunk/txdav/caldav/datastore/sql_directory.py</a></li>
<li>CalendarServer/trunk/txdav/common/datastore/podding/migration/</li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigration__init__py">CalendarServer/trunk/txdav/common/datastore/podding/migration/__init__.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationhome_syncpy">CalendarServer/trunk/txdav/common/datastore/podding/migration/home_sync.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationsync_metadatapy">CalendarServer/trunk/txdav/common/datastore/podding/migration/sync_metadata.py</a></li>
<li>CalendarServer/trunk/txdav/common/datastore/podding/migration/test/</li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtest__init__py">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/__init__.py</a></li>
<li>CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/</li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtestaccountsaugmentsxml">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/augments.xml</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtestaccountsgroupAccountsxml">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtesttest_home_syncpy">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_home_sync.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtesttest_migrationpy">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_migration.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingutilpy">CalendarServer/trunk/txdav/common/datastore/podding/util.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_apnpy">CalendarServer/trunk/txdav/common/datastore/sql_apn.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_directorypy">CalendarServer/trunk/txdav/common/datastore/sql_directory.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_imippy">CalendarServer/trunk/txdav/common/datastore/sql_imip.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_notificationpy">CalendarServer/trunk/txdav/common/datastore/sql_notification.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_schemaoldoracledialectv51sql">CalendarServer/trunk/txdav/common/datastore/sql_schema/old/oracle-dialect/v51.sql</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_schemaoldpostgresdialectv51sql">CalendarServer/trunk/txdav/common/datastore/sql_schema/old/postgres-dialect/v51.sql</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_schemaupgradesoracledialectupgrade_from_51_to_52sql">CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_51_to_52.sql</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_schemaupgradespostgresdialectupgrade_from_51_to_52sql">CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_51_to_52.sql</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_sharingpy">CalendarServer/trunk/txdav/common/datastore/sql_sharing.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_utilpy">CalendarServer/trunk/txdav/common/datastore/sql_util.py</a></li>
</ul>

<h3>Removed Paths</h3>
<ul>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreschedulepy">CalendarServer/trunk/txdav/caldav/datastore/schedule.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoretesttest_schedulepy">CalendarServer/trunk/txdav/caldav/datastore/test/test_schedule.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigration__init__py">CalendarServer/trunk/txdav/common/datastore/podding/migration/__init__.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationhome_syncpy">CalendarServer/trunk/txdav/common/datastore/podding/migration/home_sync.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationsync_metadatapy">CalendarServer/trunk/txdav/common/datastore/podding/migration/sync_metadata.py</a></li>
<li>CalendarServer/trunk/txdav/common/datastore/podding/migration/test/</li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtest__init__py">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/__init__.py</a></li>
<li>CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/</li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtestaccountsaugmentsxml">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/augments.xml</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtestaccountsgroupAccountsxml">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtesttest_home_syncpy">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_home_sync.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtesttest_migrationpy">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_migration.py</a></li>
</ul>

<h3>Property Changed</h3>
<ul>
<li><a href="#CalendarServertrunk">CalendarServer/trunk/</a></li>
</ul>

</div>
<div id="patch">
<h3>Diff</h3>
<a id="CalendarServertrunk"></a>
<div class="propset"><h4>Property changes: CalendarServer/trunk</h4>
<pre class="diff"><span>
</span></pre></div>
<a id="svnmergeinfo"></a>
<div class="modfile"><h4>Modified: svn:mergeinfo</h4></div>
<span class="cx">/CalendarServer/branches/config-separation:4379-4443
</span><span class="cx">/CalendarServer/branches/egg-info-351:4589-4625
</span><span class="cx">/CalendarServer/branches/generic-sqlstore:6167-6191
</span><span class="cx">/CalendarServer/branches/new-store:5594-5934
</span><span class="cx">/CalendarServer/branches/new-store-no-caldavfile:5911-5935
</span><span class="cx">/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
</span><span class="cx">/CalendarServer/branches/release/CalendarServer-4.3-dev:10180-10190,10192
</span><span class="cx">/CalendarServer/branches/release/CalendarServer-5.1-dev:11846
</span><span class="cx">/CalendarServer/branches/release/CalendarServer-5.2-dev:11972,12357-12358,12794,12814
</span><span class="cx">/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
</span><span class="cx">/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
</span><span class="cx">/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
</span><span class="cx">/CalendarServer/branches/users/cdaboo/cross-pod-sharing:12038-12191
</span><span class="cx">/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
</span><span class="cx">/CalendarServer/branches/users/cdaboo/fix-no-ischedule:11607-11871
</span><span class="cx">/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
</span><span class="cx">/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
</span><span class="cx">/CalendarServer/branches/users/cdaboo/json:11622-11912
</span><span class="cx">/CalendarServer/branches/users/cdaboo/managed-attachments:9985-10145
</span><span class="cx">/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
</span><span class="cx">/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
</span><span class="cx">/CalendarServer/branches/users/cdaboo/performance-tweaks:11824-11836
</span><span class="cx">/CalendarServer/branches/users/cdaboo/pods:7297-7377
</span><span class="cx">/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
</span><span class="cx">/CalendarServer/branches/users/cdaboo/pycard:7227-7237
</span><span class="cx">/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
</span><span class="cx">/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
</span><span class="cx">/CalendarServer/branches/users/cdaboo/reverse-proxy-pods:11875-11900
</span><span class="cx">/CalendarServer/branches/users/cdaboo/scheduling-queue-refresh:11783-12557
</span><span class="cx">/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
</span><span class="cx">/CalendarServer/branches/users/cdaboo/sharing-in-the-store:11935-12016
</span><span class="cx">/CalendarServer/branches/users/cdaboo/store-scheduling:10876-11129
</span><span class="cx">/CalendarServer/branches/users/cdaboo/timezones:7443-7699
</span><span class="cx">/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
</span><span class="cx">/CalendarServer/branches/users/gaya/cleanrevisions:12152-12334
</span><span class="cx">/CalendarServer/branches/users/gaya/groupsharee2:13669-13773
</span><span class="cx">/CalendarServer/branches/users/gaya/sharedgroupfixes:12120-12142
</span><span class="cx">/CalendarServer/branches/users/gaya/sharedgroups-3:11088-11204
</span><span class="cx">/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
</span><span class="cx">/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
</span><span class="cx">/CalendarServer/branches/users/glyph/conn-limit:6574-6577
</span><span class="cx">/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
</span><span class="cx">/CalendarServer/branches/users/glyph/dalify:6932-7023
</span><span class="cx">/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
</span><span class="cx">/CalendarServer/branches/users/glyph/deploybuild:7563-7572
</span><span class="cx">/CalendarServer/branches/users/glyph/digest-auth-redux:10624-10635
</span><span class="cx">/CalendarServer/branches/users/glyph/disable-quota:7718-7727
</span><span class="cx">/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
</span><span class="cx">/CalendarServer/branches/users/glyph/enforce-max-requests:11640-11643
</span><span class="cx">/CalendarServer/branches/users/glyph/hang-fix:11465-11491
</span><span class="cx">/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
</span><span class="cx">/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
</span><span class="cx">/CalendarServer/branches/users/glyph/launchd-wrapper-bis:11413-11436
</span><span class="cx">/CalendarServer/branches/users/glyph/linux-tests:6893-6900
</span><span class="cx">/CalendarServer/branches/users/glyph/log-cleanups:11691-11731
</span><span class="cx">/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
</span><span class="cx">/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
</span><span class="cx">/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
</span><span class="cx">/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
</span><span class="cx">/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
</span><span class="cx">/CalendarServer/branches/users/glyph/new-export:7444-7485
</span><span class="cx">/CalendarServer/branches/users/glyph/one-home-list-api:10048-10073
</span><span class="cx">/CalendarServer/branches/users/glyph/oracle:7106-7155
</span><span class="cx">/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
</span><span class="cx">/CalendarServer/branches/users/glyph/other-html:8062-8091
</span><span class="cx">/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
</span><span class="cx">/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
</span><span class="cx">/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
</span><span class="cx">/CalendarServer/branches/users/glyph/q:9560-9688
</span><span class="cx">/CalendarServer/branches/users/glyph/queue-locking-and-timing:10204-10289
</span><span class="cx">/CalendarServer/branches/users/glyph/quota:7604-7637
</span><span class="cx">/CalendarServer/branches/users/glyph/sendfdport:5388-5424
</span><span class="cx">/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
</span><span class="cx">/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
</span><span class="cx">/CalendarServer/branches/users/glyph/sharedpool:6490-6550
</span><span class="cx">/CalendarServer/branches/users/glyph/sharing-api:9192-9205
</span><span class="cx">/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
</span><span class="cx">/CalendarServer/branches/users/glyph/sql-store:5929-6073
</span><span class="cx">/CalendarServer/branches/users/glyph/start-service-start-loop:11060-11065
</span><span class="cx">/CalendarServer/branches/users/glyph/subtransactions:7248-7258
</span><span class="cx">/CalendarServer/branches/users/glyph/table-alias:8651-8664
</span><span class="cx">/CalendarServer/branches/users/glyph/uidexport:7673-7676
</span><span class="cx">/CalendarServer/branches/users/glyph/unshare-when-access-revoked:10562-10595
</span><span class="cx">/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
</span><span class="cx">/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
</span><span class="cx">/CalendarServer/branches/users/glyph/warning-cleanups:11347-11357
</span><span class="cx">/CalendarServer/branches/users/glyph/whenNotProposed:11881-11897
</span><span class="cx">/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
</span><span class="cx">/CalendarServer/branches/users/sagen/applepush:8126-8184
</span><span class="cx">/CalendarServer/branches/users/sagen/inboxitems:7380-7381
</span><span class="cx">/CalendarServer/branches/users/sagen/locations-resources:5032-5051
</span><span class="cx">/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who:12819-12860
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who-2:12861-12898
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who-3:12899-12913
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who-4:12914-13157
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who-5:13158-13163
</span><span class="cx">/CalendarServer/branches/users/sagen/newcua:13309-13327
</span><span class="cx">/CalendarServer/branches/users/sagen/newcua-1:13328-13330
</span><span class="cx">/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
</span><span class="cx">/CalendarServer/branches/users/sagen/recordtypes:13648-13656
</span><span class="cx">/CalendarServer/branches/users/sagen/recordtypes-2:13657
</span><span class="cx">/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
</span><span class="cx">/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
</span><span class="cx">/CalendarServer/branches/users/sagen/resources-2:5084-5093
</span><span class="cx">/CalendarServer/branches/users/sagen/testing:10827-10851,10853-10855
</span><span class="cx">/CalendarServer/branches/users/wsanchez/psycopg2cffi:14427-14439
</span><span class="cx">/CalendarServer/branches/users/wsanchez/transations:5515-5593
</span><span class="cx">   + /CalDAVTester/trunk:11193-11198
</span><span class="cx">/CalendarServer/branches/config-separation:4379-4443
</span><span class="cx">/CalendarServer/branches/egg-info-351:4589-4625
</span><span class="cx">/CalendarServer/branches/generic-sqlstore:6167-6191
</span><span class="cx">/CalendarServer/branches/new-store:5594-5934
</span><span class="cx">/CalendarServer/branches/new-store-no-caldavfile:5911-5935
</span><span class="cx">/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
</span><span class="cx">/CalendarServer/branches/release/CalendarServer-4.3-dev:10180-10190,10192
</span><span class="cx">/CalendarServer/branches/release/CalendarServer-5.1-dev:11846
</span><span class="cx">/CalendarServer/branches/release/CalendarServer-5.2-dev:11972,12357-12358,12794,12814
</span><span class="cx">/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
</span><span class="cx">/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
</span><span class="cx">/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
</span><span class="cx">/CalendarServer/branches/users/cdaboo/cross-pod-sharing:12038-12191
</span><span class="cx">/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
</span><span class="cx">/CalendarServer/branches/users/cdaboo/fix-no-ischedule:11607-11871
</span><span class="cx">/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
</span><span class="cx">/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
</span><span class="cx">/CalendarServer/branches/users/cdaboo/json:11622-11912
</span><span class="cx">/CalendarServer/branches/users/cdaboo/managed-attachments:9985-10145
</span><span class="cx">/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
</span><span class="cx">/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
</span><span class="cx">/CalendarServer/branches/users/cdaboo/performance-tweaks:11824-11836
</span><span class="cx">/CalendarServer/branches/users/cdaboo/pod2pod-migration:14338-14520
</span><span class="cx">/CalendarServer/branches/users/cdaboo/pods:7297-7377
</span><span class="cx">/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
</span><span class="cx">/CalendarServer/branches/users/cdaboo/pycard:7227-7237
</span><span class="cx">/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
</span><span class="cx">/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
</span><span class="cx">/CalendarServer/branches/users/cdaboo/reverse-proxy-pods:11875-11900
</span><span class="cx">/CalendarServer/branches/users/cdaboo/scheduling-queue-refresh:11783-12557
</span><span class="cx">/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
</span><span class="cx">/CalendarServer/branches/users/cdaboo/sharing-in-the-store:11935-12016
</span><span class="cx">/CalendarServer/branches/users/cdaboo/store-scheduling:10876-11129
</span><span class="cx">/CalendarServer/branches/users/cdaboo/timezones:7443-7699
</span><span class="cx">/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
</span><span class="cx">/CalendarServer/branches/users/gaya/cleanrevisions:12152-12334
</span><span class="cx">/CalendarServer/branches/users/gaya/groupsharee2:13669-13773
</span><span class="cx">/CalendarServer/branches/users/gaya/sharedgroupfixes:12120-12142
</span><span class="cx">/CalendarServer/branches/users/gaya/sharedgroups-3:11088-11204
</span><span class="cx">/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
</span><span class="cx">/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
</span><span class="cx">/CalendarServer/branches/users/glyph/conn-limit:6574-6577
</span><span class="cx">/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
</span><span class="cx">/CalendarServer/branches/users/glyph/dalify:6932-7023
</span><span class="cx">/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
</span><span class="cx">/CalendarServer/branches/users/glyph/deploybuild:7563-7572
</span><span class="cx">/CalendarServer/branches/users/glyph/digest-auth-redux:10624-10635
</span><span class="cx">/CalendarServer/branches/users/glyph/disable-quota:7718-7727
</span><span class="cx">/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
</span><span class="cx">/CalendarServer/branches/users/glyph/enforce-max-requests:11640-11643
</span><span class="cx">/CalendarServer/branches/users/glyph/hang-fix:11465-11491
</span><span class="cx">/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
</span><span class="cx">/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
</span><span class="cx">/CalendarServer/branches/users/glyph/launchd-wrapper-bis:11413-11436
</span><span class="cx">/CalendarServer/branches/users/glyph/linux-tests:6893-6900
</span><span class="cx">/CalendarServer/branches/users/glyph/log-cleanups:11691-11731
</span><span class="cx">/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
</span><span class="cx">/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
</span><span class="cx">/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
</span><span class="cx">/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
</span><span class="cx">/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
</span><span class="cx">/CalendarServer/branches/users/glyph/new-export:7444-7485
</span><span class="cx">/CalendarServer/branches/users/glyph/one-home-list-api:10048-10073
</span><span class="cx">/CalendarServer/branches/users/glyph/oracle:7106-7155
</span><span class="cx">/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
</span><span class="cx">/CalendarServer/branches/users/glyph/other-html:8062-8091
</span><span class="cx">/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
</span><span class="cx">/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
</span><span class="cx">/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
</span><span class="cx">/CalendarServer/branches/users/glyph/q:9560-9688
</span><span class="cx">/CalendarServer/branches/users/glyph/queue-locking-and-timing:10204-10289
</span><span class="cx">/CalendarServer/branches/users/glyph/quota:7604-7637
</span><span class="cx">/CalendarServer/branches/users/glyph/sendfdport:5388-5424
</span><span class="cx">/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
</span><span class="cx">/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
</span><span class="cx">/CalendarServer/branches/users/glyph/sharedpool:6490-6550
</span><span class="cx">/CalendarServer/branches/users/glyph/sharing-api:9192-9205
</span><span class="cx">/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
</span><span class="cx">/CalendarServer/branches/users/glyph/sql-store:5929-6073
</span><span class="cx">/CalendarServer/branches/users/glyph/start-service-start-loop:11060-11065
</span><span class="cx">/CalendarServer/branches/users/glyph/subtransactions:7248-7258
</span><span class="cx">/CalendarServer/branches/users/glyph/table-alias:8651-8664
</span><span class="cx">/CalendarServer/branches/users/glyph/uidexport:7673-7676
</span><span class="cx">/CalendarServer/branches/users/glyph/unshare-when-access-revoked:10562-10595
</span><span class="cx">/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
</span><span class="cx">/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
</span><span class="cx">/CalendarServer/branches/users/glyph/warning-cleanups:11347-11357
</span><span class="cx">/CalendarServer/branches/users/glyph/whenNotProposed:11881-11897
</span><span class="cx">/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
</span><span class="cx">/CalendarServer/branches/users/sagen/applepush:8126-8184
</span><span class="cx">/CalendarServer/branches/users/sagen/inboxitems:7380-7381
</span><span class="cx">/CalendarServer/branches/users/sagen/locations-resources:5032-5051
</span><span class="cx">/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who:12819-12860
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who-2:12861-12898
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who-3:12899-12913
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who-4:12914-13157
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who-5:13158-13163
</span><span class="cx">/CalendarServer/branches/users/sagen/newcua:13309-13327
</span><span class="cx">/CalendarServer/branches/users/sagen/newcua-1:13328-13330
</span><span class="cx">/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
</span><span class="cx">/CalendarServer/branches/users/sagen/recordtypes:13648-13656
</span><span class="cx">/CalendarServer/branches/users/sagen/recordtypes-2:13657
</span><span class="cx">/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
</span><span class="cx">/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
</span><span class="cx">/CalendarServer/branches/users/sagen/resources-2:5084-5093
</span><span class="cx">/CalendarServer/branches/users/sagen/testing:10827-10851,10853-10855
</span><span class="cx">/CalendarServer/branches/users/wsanchez/psycopg2cffi:14427-14439
</span><span class="cx">/CalendarServer/branches/users/wsanchez/transations:5515-5593
</span><a id="CalendarServertrunkcalendarserverpushapplepushpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/calendarserver/push/applepush.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/calendarserver/push/applepush.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/calendarserver/push/applepush.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -234,10 +234,7 @@
</span><span class="cx">                 self.log.debug(
</span><span class="cx">                     &quot;Sending %d APNS notifications for %s&quot; %
</span><span class="cx">                     (numSubscriptions, pushKey))
</span><del>-                tokens = []
-                for token, uid in subscriptions:
-                    if token and uid:
-                        tokens.append(token)
</del><ins>+                tokens = [record.token for record in subscriptions if record.token and record.subscriberGUID]
</ins><span class="cx">                 if tokens:
</span><span class="cx">                     provider.scheduleNotifications(
</span><span class="cx">                         tokens, pushKey,
</span><span class="lines">@@ -349,11 +346,11 @@
</span><span class="cx">                     (token,))
</span><span class="cx">                 txn = self.factory.store.newTransaction(label=&quot;APNProviderProtocol.processError&quot;)
</span><span class="cx">                 subscriptions = (yield txn.apnSubscriptionsByToken(token))
</span><del>-                for key, _ignore_modified, _ignore_uid in subscriptions:
</del><ins>+                for record in subscriptions:
</ins><span class="cx">                     self.log.debug(
</span><span class="cx">                         &quot;Removing subscription: %s %s&quot; %
</span><del>-                        (token, key))
-                    yield txn.removeAPNSubscription(token, key)
</del><ins>+                        (token, record.resourceKey))
+                    yield txn.removeAPNSubscription(token, record.resourceKey)
</ins><span class="cx">                 yield txn.commit()
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="lines">@@ -746,12 +743,12 @@
</span><span class="cx">         txn = self.factory.store.newTransaction(label=&quot;APNFeedbackProtocol.processFeedback&quot;)
</span><span class="cx">         subscriptions = (yield txn.apnSubscriptionsByToken(token))
</span><span class="cx"> 
</span><del>-        for key, modified, _ignore_uid in subscriptions:
-            if timestamp &gt; modified:
</del><ins>+        for record in subscriptions:
+            if timestamp &gt; record.modified:
</ins><span class="cx">                 self.log.debug(
</span><span class="cx">                     &quot;FeedbackProtocol removing subscription: %s %s&quot; %
</span><del>-                    (token, key))
-                yield txn.removeAPNSubscription(token, key)
</del><ins>+                    (token, record.resourceKey))
+                yield txn.removeAPNSubscription(token, record.resourceKey)
</ins><span class="cx">         yield txn.commit()
</span><span class="cx"> 
</span><span class="cx"> 
</span></span></pre></div>
<a id="CalendarServertrunkcalendarserverpushtesttest_applepushpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/calendarserver/push/test/test_applepush.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/calendarserver/push/test/test_applepush.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/calendarserver/push/test/test_applepush.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -88,6 +88,7 @@
</span><span class="cx">         yield txn.addAPNSubscription(token, key2, timestamp2, uid, userAgent, ipAddr)
</span><span class="cx"> 
</span><span class="cx">         subscriptions = (yield txn.apnSubscriptionsBySubscriber(uid))
</span><ins>+        subscriptions = [[record.token, record.resourceKey, record.modified, record.userAgent, record.ipAddr] for record in subscriptions]
</ins><span class="cx">         self.assertTrue([token, key1, timestamp1, userAgent, ipAddr] in subscriptions)
</span><span class="cx">         self.assertTrue([token, key2, timestamp2, userAgent, ipAddr] in subscriptions)
</span><span class="cx">         self.assertTrue([token2, key1, timestamp1, userAgent, ipAddr] in subscriptions)
</span><span class="lines">@@ -98,9 +99,11 @@
</span><span class="cx">         uid2 = &quot;D8FFB335-9D36-4CE8-A3B9-D1859E38C0DA&quot;
</span><span class="cx">         yield txn.addAPNSubscription(token, key2, timestamp3, uid2, userAgent, ipAddr)
</span><span class="cx">         subscriptions = (yield txn.apnSubscriptionsBySubscriber(uid))
</span><ins>+        subscriptions = [[record.token, record.resourceKey, record.modified, record.userAgent, record.ipAddr] for record in subscriptions]
</ins><span class="cx">         self.assertTrue([token, key1, timestamp1, userAgent, ipAddr] in subscriptions)
</span><span class="cx">         self.assertFalse([token, key2, timestamp3, userAgent, ipAddr] in subscriptions)
</span><span class="cx">         subscriptions = (yield txn.apnSubscriptionsBySubscriber(uid2))
</span><ins>+        subscriptions = [[record.token, record.resourceKey, record.modified, record.userAgent, record.ipAddr] for record in subscriptions]
</ins><span class="cx">         self.assertTrue([token, key2, timestamp3, userAgent, ipAddr] in subscriptions)
</span><span class="cx">         # Change it back
</span><span class="cx">         yield txn.addAPNSubscription(token, key2, timestamp2, uid, userAgent, ipAddr)
</span><span class="lines">@@ -284,10 +287,10 @@
</span><span class="cx">         txn = self._sqlCalendarStore.newTransaction()
</span><span class="cx">         subscriptions = (yield txn.apnSubscriptionsByToken(token))
</span><span class="cx">         yield txn.commit()
</span><del>-        self.assertEquals(
-            subscriptions,
-            [[&quot;/CalDAV/calendars.example.com/user02/calendar/&quot;, 3000, &quot;D2256BCC-48E2-42D1-BD89-CBA1E4CCDFFB&quot;]]
-        )
</del><ins>+        self.assertEquals(len(subscriptions), 1)
+        self.assertEqual(subscriptions[0].resourceKey, &quot;/CalDAV/calendars.example.com/user02/calendar/&quot;)
+        self.assertEqual(subscriptions[0].modified, 3000)
+        self.assertEqual(subscriptions[0].subscriberGUID, &quot;D2256BCC-48E2-42D1-BD89-CBA1E4CCDFFB&quot;)
</ins><span class="cx"> 
</span><span class="cx">         # Verify processError removes associated subscriptions and history
</span><span class="cx">         # First find the id corresponding to token2
</span><span class="lines">@@ -326,7 +329,7 @@
</span><span class="cx">         subscriptions = (yield txn.apnSubscriptionsByToken(token2))
</span><span class="cx">         yield txn.commit()
</span><span class="cx">         self.assertEquals(len(subscriptions), 1)
</span><del>-        self.assertEquals(subscriptions[0][0], key2)
</del><ins>+        self.assertEquals(subscriptions[0].resourceKey, key2)
</ins><span class="cx"> 
</span><span class="cx">         service.stopService()
</span><span class="cx"> 
</span></span></pre></div>
<a id="CalendarServertrunkcalendarserverpushtesttest_notifierpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/calendarserver/push/test/test_notifier.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/calendarserver/push/test/test_notifier.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/calendarserver/push/test/test_notifier.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -297,7 +297,7 @@
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def test_notificationNotifier(self):
</span><span class="cx"> 
</span><del>-        notifications = yield self.transactionUnderTest().notificationsWithUID(&quot;user01&quot;)
</del><ins>+        notifications = yield self.transactionUnderTest().notificationsWithUID(&quot;user01&quot;, create=True)
</ins><span class="cx">         yield notifications.notifyChanged(category=ChangeCategory.default)
</span><span class="cx">         self.assertEquals(
</span><span class="cx">             set(self.notifierFactory.history),
</span></span></pre></div>
<a id="CalendarServertrunkcalendarservertoolsexportpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/calendarserver/tools/export.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/calendarserver/tools/export.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/calendarserver/tools/export.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -203,7 +203,7 @@
</span><span class="cx">         for this calendar home.
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         uid = yield self.getHomeUID(exportService)
</span><del>-        home = yield txn.calendarHomeWithUID(uid, True)
</del><ins>+        home = yield txn.calendarHomeWithUID(uid, create=True)
</ins><span class="cx">         result = []
</span><span class="cx">         if self.collections:
</span><span class="cx">             for collection in self.collections:
</span><span class="lines">@@ -303,6 +303,7 @@
</span><span class="cx">     fileobj.write(comp.getTextWithTimezones(True))
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+
</ins><span class="cx"> @inlineCallbacks
</span><span class="cx"> def exportToDirectory(calendars, dirname):
</span><span class="cx">     &quot;&quot;&quot;
</span></span></pre></div>
<a id="CalendarServertrunkcalendarservertoolspod_migrationpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationcalendarservertoolspod_migrationpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/calendarserver/tools/pod_migration.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/calendarserver/tools/pod_migration.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/calendarserver/tools/pod_migration.py                                (rev 0)
+++ CalendarServer/trunk/calendarserver/tools/pod_migration.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,293 @@
</span><ins>+#!/usr/bin/env python
+# -*- test-case-name: calendarserver.tools.test.test_calverify -*-
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+from __future__ import print_function
+
+&quot;&quot;&quot;
+This tool manages an overall pod migration. Migration is done in a series of steps,
+with the system admin triggering each step individually by running this tool.
+&quot;&quot;&quot;
+
+import os
+import sys
+
+from twisted.internet.defer import inlineCallbacks
+from twisted.python.text import wordWrap
+from twisted.python.usage import Options, UsageError
+
+from twistedcaldav.stdconfig import DEFAULT_CONFIG_FILE
+from twistedcaldav.timezones import TimezoneCache
+
+from txdav.common.datastore.podding.migration.home_sync import CrossPodHomeSync
+
+from twext.python.log import Logger
+from twext.who.idirectory import RecordType
+
+from calendarserver.tools.cmdline import utilityMain, WorkerService
+
+
+log = Logger()
+
+VERSION = &quot;1&quot;
+
+
+
+def usage(e=None):
+    if e:
+        print(e)
+        print(&quot;&quot;)
+    try:
+        PodMigrationOptions().opt_help()
+    except SystemExit:
+        pass
+    if e:
+        sys.exit(64)
+    else:
+        sys.exit(0)
+
+
+description = ''.join(
+    wordWrap(
+        &quot;&quot;&quot;
+        Usage: calendarserver_pod_migration [options] [input specifiers]
+        &quot;&quot;&quot;,
+        int(os.environ.get('COLUMNS', '80'))
+    )
+)
+description += &quot;\nVersion: %s&quot; % (VERSION,)
+
+
+
+class ConfigError(Exception):
+    pass
+
+
+
+class PodMigrationOptions(Options):
+    &quot;&quot;&quot;
+    Command-line options for 'calendarserver_pod_migration'
+    &quot;&quot;&quot;
+
+    synopsis = description
+
+    optFlags = [
+        ['verbose', 'v', &quot;Verbose logging.&quot;],
+        ['debug', 'D', &quot;Debug logging.&quot;],
+        ['step1', '1', &quot;Run step 1 of the migration (initial sync)&quot;],
+        ['step2', '2', &quot;Run step 2 of the migration (incremental sync)&quot;],
+        ['step3', '3', &quot;Run step 3 of the migration (prepare for final sync)&quot;],
+        ['step4', '4', &quot;Run step 4 of the migration (final incremental sync)&quot;],
+        ['step5', '5', &quot;Run step 5 of the migration (final reconcile sync)&quot;],
+        ['step6', '6', &quot;Run step 6 of the migration (enable new home)&quot;],
+        ['step7', '7', &quot;Run step 7 of the migration (remove old home)&quot;],
+    ]
+
+    optParameters = [
+        ['config', 'f', DEFAULT_CONFIG_FILE, &quot;Specify caldavd.plist configuration path.&quot;],
+        ['uid', 'u', &quot;&quot;, &quot;Directory record uid of user to migrate [REQUIRED]&quot;],
+    ]
+
+    longdesc = &quot;Only one step option is allowed.&quot;
+
+    def __init__(self):
+        super(PodMigrationOptions, self).__init__()
+        self.outputName = '-'
+
+
+    def opt_output(self, filename):
+        &quot;&quot;&quot;
+        Specify output file path (default: '-', meaning stdout).
+        &quot;&quot;&quot;
+        self.outputName = filename
+
+    opt_o = opt_output
+
+
+    def openOutput(self):
+        &quot;&quot;&quot;
+        Open the appropriate output file based on the '--output' option.
+        &quot;&quot;&quot;
+        if self.outputName == '-':
+            return sys.stdout
+        else:
+            return open(self.outputName, 'wb')
+
+
+    def postOptions(self):
+        runstep = None
+        for step in range(7):
+            if self[&quot;step{}&quot;.format(step + 1)]:
+                if runstep is None:
+                    runstep = step
+                    self[&quot;runstep&quot;] = step + 1
+                else:
+                    raise UsageError(&quot;Only one step option allowed&quot;)
+        else:
+            if runstep is None:
+                raise UsageError(&quot;One step option must be present&quot;)
+        if not self[&quot;uid&quot;]:
+            raise UsageError(&quot;A uid is required&quot;)
+
+
+
+class PodMigrationService(WorkerService, object):
+    &quot;&quot;&quot;
+    Service which runs, does its stuff, then stops the reactor.
+    &quot;&quot;&quot;
+
+    def __init__(self, store, options, output, reactor, config):
+        super(PodMigrationService, self).__init__(store)
+        self.options = options
+        self.output = output
+        self.reactor = reactor
+        self.config = config
+        TimezoneCache.create()
+
+
+    @inlineCallbacks
+    def doWork(self):
+        &quot;&quot;&quot;
+        Do the work, stopping the reactor when done.
+        &quot;&quot;&quot;
+        self.output.write(&quot;\n---- Pod Migration version: %s ----\n&quot; % (VERSION,))
+
+        # Map short name to uid
+        record = yield self.store.directoryService().recordWithUID(self.options[&quot;uid&quot;])
+        if record is None:
+            record = yield self.store.directoryService().recordWithShortName(RecordType.user, self.options[&quot;uid&quot;])
+            if record is not None:
+                self.options[&quot;uid&quot;] = record.uid
+
+        try:
+            yield getattr(self, &quot;step{}&quot;.format(self.options[&quot;runstep&quot;]))()
+            self.output.close()
+        except ConfigError:
+            pass
+        except:
+            log.failure(&quot;doWork()&quot;)
+
+
+    @inlineCallbacks
+    def step1(self):
+        syncer = CrossPodHomeSync(
+            self.store,
+            self.options[&quot;uid&quot;],
+            uselog=self.output if self.options[&quot;verbose&quot;] else None
+        )
+        syncer.accounting(&quot;Pod Migration Step 1\n&quot;)
+        yield syncer.sync()
+
+
+    @inlineCallbacks
+    def step2(self):
+        syncer = CrossPodHomeSync(
+            self.store,
+            self.options[&quot;uid&quot;],
+            uselog=self.output if self.options[&quot;verbose&quot;] else None
+        )
+        syncer.accounting(&quot;Pod Migration Step 2\n&quot;)
+        yield syncer.sync()
+
+
+    @inlineCallbacks
+    def step3(self):
+        syncer = CrossPodHomeSync(
+            self.store,
+            self.options[&quot;uid&quot;],
+            uselog=self.output if self.options[&quot;verbose&quot;] else None
+        )
+        syncer.accounting(&quot;Pod Migration Step 3\n&quot;)
+        yield syncer.disableRemoteHome()
+
+
+    @inlineCallbacks
+    def step4(self):
+        syncer = CrossPodHomeSync(
+            self.store,
+            self.options[&quot;uid&quot;],
+            final=True,
+            uselog=self.output if self.options[&quot;verbose&quot;] else None
+        )
+        syncer.accounting(&quot;Pod Migration Step 4\n&quot;)
+        yield syncer.sync()
+
+
+    @inlineCallbacks
+    def step5(self):
+        syncer = CrossPodHomeSync(
+            self.store,
+            self.options[&quot;uid&quot;],
+            final=True,
+            uselog=self.output if self.options[&quot;verbose&quot;] else None
+        )
+        syncer.accounting(&quot;Pod Migration Step 5\n&quot;)
+        yield syncer.finalSync()
+
+
+    @inlineCallbacks
+    def step6(self):
+        syncer = CrossPodHomeSync(
+            self.store,
+            self.options[&quot;uid&quot;],
+            uselog=self.output if self.options[&quot;verbose&quot;] else None
+        )
+        syncer.accounting(&quot;Pod Migration Step 6\n&quot;)
+        yield syncer.enableLocalHome()
+
+
+    @inlineCallbacks
+    def step7(self):
+        syncer = CrossPodHomeSync(
+            self.store,
+            self.options[&quot;uid&quot;],
+            final=True,
+            uselog=self.output if self.options[&quot;verbose&quot;] else None
+        )
+        syncer.accounting(&quot;Pod Migration Step 7\n&quot;)
+        yield syncer.removeRemoteHome()
+
+
+
+def main(argv=sys.argv, stderr=sys.stderr, reactor=None):
+    &quot;&quot;&quot;
+    Do the export.
+    &quot;&quot;&quot;
+    if reactor is None:
+        from twisted.internet import reactor
+    options = PodMigrationOptions()
+    try:
+        options.parseOptions(argv[1:])
+    except UsageError as e:
+        stderr.write(&quot;Invalid options specified\n&quot;)
+        options.opt_help()
+
+    try:
+        output = options.openOutput()
+    except IOError, e:
+        stderr.write(&quot;Unable to open output file for writing: %s\n&quot; % (e))
+        sys.exit(1)
+
+
+    def makeService(store):
+        from twistedcaldav.config import config
+        config.TransactionTimeoutSeconds = 0
+        return PodMigrationService(store, options, output, reactor, config)
+
+    utilityMain(options['config'], makeService, reactor, verbose=options[&quot;debug&quot;])
+
+if __name__ == '__main__':
+    main()
</ins></span></pre></div>
<a id="CalendarServertrunkcalendarservertoolsprincipalspy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/calendarserver/tools/principals.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/calendarserver/tools/principals.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/calendarserver/tools/principals.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -775,15 +775,11 @@
</span><span class="cx">                 groupUIDs.append(record.uid)
</span><span class="cx"> 
</span><span class="cx">     for groupUID in groupUIDs:
</span><del>-        (
-            groupID, name, _ignore_membershipHash, modified, _ignore_extant
-        ) = yield txn.groupByUID(
-            groupUID
-        )
-        print(&quot;Group: \&quot;{name}\&quot; ({uid})&quot;.format(name=name, uid=groupUID))
</del><ins>+        group = yield txn.groupByUID(groupUID)
+        print(&quot;Group: \&quot;{name}\&quot; ({uid})&quot;.format(name=group.name, uid=group.groupUID))
</ins><span class="cx"> 
</span><span class="cx">         for txt, readWrite in ((&quot;read-only&quot;, False), (&quot;read-write&quot;, True)):
</span><del>-            delegatorUIDs = yield txn.delegatorsToGroup(groupID, readWrite)
</del><ins>+            delegatorUIDs = yield txn.delegatorsToGroup(group.groupID, readWrite)
</ins><span class="cx">             for delegatorUID in delegatorUIDs:
</span><span class="cx">                 delegator = yield directory.recordWithUID(delegatorUID)
</span><span class="cx">                 print(
</span><span class="lines">@@ -793,12 +789,12 @@
</span><span class="cx">                 )
</span><span class="cx"> 
</span><span class="cx">         print(&quot;Group members:&quot;)
</span><del>-        memberUIDs = yield txn.groupMemberUIDs(groupID)
</del><ins>+        memberUIDs = yield txn.groupMemberUIDs(group.groupID)
</ins><span class="cx">         for memberUID in memberUIDs:
</span><span class="cx">             record = yield directory.recordWithUID(memberUID)
</span><span class="cx">             print(prettyRecord(record))
</span><span class="cx"> 
</span><del>-        print(&quot;Last cached: {} GMT&quot;.format(modified))
</del><ins>+        print(&quot;Last cached: {} GMT&quot;.format(group.modified))
</ins><span class="cx">         print()
</span><span class="cx"> 
</span><span class="cx">     yield txn.commit()
</span></span></pre></div>
<a id="CalendarServertrunkcalendarservertoolspurgepy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/calendarserver/tools/purge.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/calendarserver/tools/purge.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/calendarserver/tools/purge.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -992,7 +992,7 @@
</span><span class="cx"> 
</span><span class="cx">         if not self.dryrun:
</span><span class="cx">             yield storeCalHome.removeUnacceptedShares()
</span><del>-            notificationHome = yield txn.notificationsWithUID(storeCalHome.uid(), create=False)
</del><ins>+            notificationHome = yield txn.notificationsWithUID(storeCalHome.uid())
</ins><span class="cx">             if notificationHome is not None:
</span><span class="cx">                 yield notificationHome.remove()
</span><span class="cx"> 
</span></span></pre></div>
<a id="CalendarServertrunkcalendarservertoolspushpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/calendarserver/tools/push.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/calendarserver/tools/push.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/calendarserver/tools/push.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -68,9 +68,9 @@
</span><span class="cx">             (yield txn.commit())
</span><span class="cx">             if subscriptions:
</span><span class="cx">                 byKey = {}
</span><del>-                for token, key, timestamp, userAgent, ipAddr in subscriptions:
-                    byKey.setdefault(key, []).append((token, timestamp, userAgent, ipAddr))
-                for key, tokens in byKey.iteritems():
</del><ins>+                for apnrecord in subscriptions:
+                    byKey.setdefault(apnrecord.resourceKey, []).append(apnrecord)
+                for key, apnsrecords in byKey.iteritems():
</ins><span class="cx">                     print
</span><span class="cx">                     protocol, _ignore_host, path = key.strip(&quot;/&quot;).split(&quot;/&quot;, 2)
</span><span class="cx">                     resource = {
</span><span class="lines">@@ -89,13 +89,13 @@
</span><span class="cx">                     else:
</span><span class="cx">                         print(&quot;...is subscribed to %s's %s home&quot; % (user, resource),)
</span><span class="cx">                         # print(&quot;   (key: %s)\n&quot; % (key,))
</span><del>-                    print(&quot;with %d device(s):&quot; % (len(tokens),))
-                    for token, timestamp, userAgent, ipAddr in tokens:
</del><ins>+                    print(&quot;with %d device(s):&quot; % (len(apnsrecords),))
+                    for apnrecords in apnsrecords:
</ins><span class="cx">                         print(&quot; %s\n   '%s' from %s\n   %s&quot; % (
</span><del>-                            token, userAgent, ipAddr,
</del><ins>+                            apnrecords.token, apnrecords.userAgent, apnrecords.ipAddr,
</ins><span class="cx">                             time.strftime(
</span><span class="cx">                                 &quot;on %a, %d %b %Y at %H:%M:%S %z(%Z)&quot;,
</span><del>-                                time.localtime(timestamp)
</del><ins>+                                time.localtime(apnrecords.modified)
</ins><span class="cx">                             )
</span><span class="cx">                         ))
</span><span class="cx">             else:
</span></span></pre></div>
<a id="CalendarServertrunkcalendarserverwebadminworkpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/calendarserver/webadmin/work.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/calendarserver/webadmin/work.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/calendarserver/webadmin/work.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -157,13 +157,13 @@
</span><span class="cx">                     if workType == PushNotificationWork:
</span><span class="cx">                         attrs += (&quot;pushID&quot;, &quot;priority&quot;)
</span><span class="cx">                     elif workType == ScheduleOrganizerWork:
</span><del>-                        attrs += (&quot;icalendarUid&quot;, &quot;attendeeCount&quot;)
</del><ins>+                        attrs += (&quot;icalendarUID&quot;, &quot;attendeeCount&quot;)
</ins><span class="cx">                     elif workType == ScheduleRefreshWork:
</span><del>-                        attrs += (&quot;icalendarUid&quot;, &quot;attendeeCount&quot;)
</del><ins>+                        attrs += (&quot;icalendarUID&quot;, &quot;attendeeCount&quot;)
</ins><span class="cx">                     elif workType == ScheduleReplyWork:
</span><del>-                        attrs += (&quot;icalendarUid&quot;,)
</del><ins>+                        attrs += (&quot;icalendarUID&quot;,)
</ins><span class="cx">                     elif workType == ScheduleAutoReplyWork:
</span><del>-                        attrs += (&quot;icalendarUid&quot;,)
</del><ins>+                        attrs += (&quot;icalendarUID&quot;,)
</ins><span class="cx">                     elif workType == GroupCacherPollingWork:
</span><span class="cx">                         attrs += ()
</span><span class="cx">                     elif workType == IMIPPollingWork:
</span></span></pre></div>
<a id="CalendarServertrunkrequirementsstabletxt"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/requirements-stable.txt (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/requirements-stable.txt        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/requirements-stable.txt        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -36,7 +36,7 @@
</span><span class="cx">             #pyOpenSSL
</span><span class="cx">         pycrypto==2.6.1
</span><span class="cx"> 
</span><del>-    --editable svn+http://svn.calendarserver.org/repository/calendarserver/twext/trunk@14404#egg=twextpy
</del><ins>+    --editable svn+http://svn.calendarserver.org/repository/calendarserver/twext/trunk@14521#egg=twextpy
</ins><span class="cx">         cffi==0.8.6
</span><span class="cx">             pycparser==2.10
</span><span class="cx">         #twisted
</span></span></pre></div>
<a id="CalendarServertrunktwistedcaldavresourcepy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/twistedcaldav/resource.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/twistedcaldav/resource.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/twistedcaldav/resource.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -2139,7 +2139,7 @@
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def createNotificationsCollection(self):
</span><span class="cx">         txn = self._associatedTransaction
</span><del>-        notifications = yield txn.notificationsWithUID(self._newStoreHome.uid())
</del><ins>+        notifications = yield txn.notificationsWithUID(self._newStoreHome.uid(), create=True)
</ins><span class="cx"> 
</span><span class="cx">         from twistedcaldav.storebridge import StoreNotificationCollectionResource
</span><span class="cx">         similar = StoreNotificationCollectionResource(
</span></span></pre></div>
<a id="CalendarServertrunktwistedcaldavstdconfigpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/twistedcaldav/stdconfig.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/twistedcaldav/stdconfig.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/twistedcaldav/stdconfig.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -405,6 +405,7 @@
</span><span class="cx">         &quot;Implicit Errors&quot;: False,
</span><span class="cx">         &quot;AutoScheduling&quot;: False,
</span><span class="cx">         &quot;iSchedule&quot;: False,
</span><ins>+        &quot;migration&quot;: False,
</ins><span class="cx">     },
</span><span class="cx">     &quot;AccountingPrincipals&quot;: [],
</span><span class="cx">     &quot;AccountingLogRoot&quot;   : &quot;accounting&quot;,
</span></span></pre></div>
<a id="CalendarServertrunktwistedcaldavtesttest_wrappingpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/twistedcaldav/test/test_wrapping.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/twistedcaldav/test/test_wrapping.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/twistedcaldav/test/test_wrapping.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -120,7 +120,7 @@
</span><span class="cx">         record = yield self.directory.recordWithShortName(RecordType.user, u&quot;wsanchez&quot;)
</span><span class="cx">         uid = record.uid
</span><span class="cx">         txn = self.transactionUnderTest()
</span><del>-        home = yield txn.calendarHomeWithUID(uid, True)
</del><ins>+        home = yield txn.calendarHomeWithUID(uid, create=True)
</ins><span class="cx">         cal = yield home.calendarWithName(&quot;calendar&quot;)
</span><span class="cx">         yield cal.createCalendarObjectWithName(objectName, VComponent.fromString(objectText))
</span><span class="cx">         yield self.commit()
</span><span class="lines">@@ -139,7 +139,7 @@
</span><span class="cx">         record = yield self.directory.recordWithShortName(RecordType.user, u&quot;wsanchez&quot;)
</span><span class="cx">         uid = record.uid
</span><span class="cx">         txn = self.transactionUnderTest()
</span><del>-        home = yield txn.addressbookHomeWithUID(uid, True)
</del><ins>+        home = yield txn.addressbookHomeWithUID(uid, create=True)
</ins><span class="cx">         adbk = yield home.addressbookWithName(&quot;addressbook&quot;)
</span><span class="cx">         yield adbk.createAddressBookObjectWithName(objectName, VCComponent.fromString(objectText))
</span><span class="cx">         yield self.commit()
</span></span></pre></div>
<a id="CalendarServertrunktxdavbasedatastoreutilpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/base/datastore/util.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/base/datastore/util.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/base/datastore/util.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -86,6 +86,18 @@
</span><span class="cx">         return self.delete(key)
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    # Home objects by UID
+
+    def keyForHomeWithUID(self, homeType, ownerUID, status):
+        return &quot;homeWithUID:%s:%s:%s&quot; % (homeType, status, ownerUID)
+
+
+    # Home objects by id
+
+    def keyForHomeWithID(self, homeType, homeResourceID, status):
+        return &quot;homeWithID:%s:%s:%s&quot; % (homeType, status, homeResourceID)
+
+
</ins><span class="cx">     # Home child objects by name
</span><span class="cx"> 
</span><span class="cx">     def keyForObjectWithName(self, homeResourceID, name):
</span><span class="lines">@@ -100,8 +112,8 @@
</span><span class="cx"> 
</span><span class="cx">     # Home child objects by external id
</span><span class="cx"> 
</span><del>-    def keyForObjectWithExternalID(self, homeResourceID, externalID):
-        return &quot;objectWithExternalID:%s:%s&quot; % (homeResourceID, externalID)
</del><ins>+    def keyForObjectWithBindUID(self, homeResourceID, bindUID):
+        return &quot;objectWithBindUID:%s:%s&quot; % (homeResourceID, bindUID)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     # Home metadata (Created/Modified)
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreschedulepy"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/caldav/datastore/schedule.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/schedule.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/schedule.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,214 +0,0 @@
</span><del>-# -*- test-case-name: txdav.caldav.datastore.test.test_scheduling -*-
-##
-# Copyright (c) 2010-2015 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-from zope.interface.declarations import implements
-from txdav.caldav.icalendarstore import ICalendarHome, ICalendar, ICalendarObject, \
-    ICalendarTransaction, ICalendarStore
-
-from twisted.python.util import FancyEqMixin
-from twisted.python.components import proxyForInterface
-from twisted.internet.defer import inlineCallbacks, returnValue
-
-
-
-class ImplicitTransaction(
-        proxyForInterface(ICalendarTransaction,
-                          originalAttribute=&quot;_transaction&quot;)):
-    &quot;&quot;&quot;
-    Wrapper around an L{ICalendarStoreTransaction}.
-    &quot;&quot;&quot;
-
-    def __init__(self, transaction):
-        &quot;&quot;&quot;
-        Initialize an L{ImplicitTransaction}.
-
-        @type transaction: L{ICalendarStoreTransaction}
-        &quot;&quot;&quot;
-        self._transaction = transaction
-
-
-    @inlineCallbacks
-    def calendarHomeWithUID(self, uid, create=False):
-        # FIXME: 'create' flag
-        newHome = yield super(ImplicitTransaction, self).calendarHomeWithUID(uid, create)
-#        return ImplicitCalendarHome(newHome, self)
-        if newHome is None:
-            returnValue(None)
-        else:
-            # FIXME: relay transaction
-            returnValue(ImplicitCalendarHome(newHome, None))
-
-
-
-class ImplicitCalendarHome(proxyForInterface(ICalendarHome, &quot;_calendarHome&quot;)):
-
-    implements(ICalendarHome)
-
-    def __init__(self, calendarHome, transaction):
-        &quot;&quot;&quot;
-        Initialize L{ImplicitCalendarHome} with an underlying
-        calendar home and L{ImplicitTransaction}.
-        &quot;&quot;&quot;
-        self._calendarHome = calendarHome
-        self._transaction = transaction
-
-
-#    def properties(self):
-#        # FIXME: wrap?
-#        return self._calendarHome.properties()
-
-    @inlineCallbacks
-    def calendars(self):
-        superCalendars = (yield super(ImplicitCalendarHome, self).calendars())
-        wrapped = []
-        for calendar in superCalendars:
-            wrapped.append(ImplicitCalendar(self, calendar))
-        returnValue(wrapped)
-
-
-    @inlineCallbacks
-    def loadCalendars(self):
-        superCalendars = (yield super(ImplicitCalendarHome, self).loadCalendars())
-        wrapped = []
-        for calendar in superCalendars:
-            wrapped.append(ImplicitCalendar(self, calendar))
-        returnValue(wrapped)
-
-
-    def createCalendarWithName(self, name):
-        self._calendarHome.createCalendarWithName(name)
-
-
-    def removeCalendarWithName(self, name):
-        self._calendarHome.removeCalendarWithName(name)
-
-
-    @inlineCallbacks
-    def calendarWithName(self, name):
-        calendar = yield self._calendarHome.calendarWithName(name)
-        if calendar is not None:
-            returnValue(ImplicitCalendar(self, calendar))
-        else:
-            returnValue(None)
-
-
-    def hasCalendarResourceUIDSomewhereElse(self, uid, ok_object, type):
-        return self._calendarHome.hasCalendarResourceUIDSomewhereElse(uid, ok_object, type)
-
-
-    def getCalendarResourcesForUID(self, uid):
-        return self._calendarHome.getCalendarResourcesForUID(uid)
-
-
-
-class ImplicitCalendarObject(object):
-    implements(ICalendarObject)
-
-    def setComponent(self, component):
-        pass
-
-
-    def component(self):
-        pass
-
-
-    def uid(self):
-        pass
-
-
-    def componentType(self):
-        pass
-
-
-    def organizer(self):
-        pass
-
-
-    def properties(self):
-        pass
-
-
-
-class ImplicitCalendar(FancyEqMixin,
-                       proxyForInterface(ICalendar, &quot;_subCalendar&quot;)):
-
-    compareAttributes = (
-        &quot;_subCalendar&quot;,
-        &quot;_parentHome&quot;,
-    )
-
-    def __init__(self, parentHome, subCalendar):
-        self._parentHome = parentHome
-        self._subCalendar = subCalendar
-        self._supportedComponents = None
-
-#    def ownerCalendarHome(self):
-#        return self._parentHome
-#    def calendarObjects(self):
-#        # FIXME: wrap
-#        return self._subCalendar.calendarObjects()
-#    def calendarObjectWithUID(self, uid): &quot;&quot;
-#    def createCalendarObjectWithName(self, name, component):
-#        # FIXME: implement most of StoreCalendarObjectResource here!
-#        self._subCalendar.createCalendarObjectWithName(name, component)
-#    def syncToken(self): &quot;&quot;
-#    def calendarObjectsInTimeRange(self, start, end, timeZone): &quot;&quot;
-#    def calendarObjectsSinceToken(self, token): &quot;&quot;
-#    def properties(self):
-#        # FIXME: probably need to wrap this as well
-#        return self._subCalendar.properties()
-#
-#    def calendarObjectWithName(self, name):
-#        #FIXME: wrap
-#        return self._subCalendar.calendarObjectWithName(name)
-
-
-    def _createCalendarObjectWithNameInternal(self, name, component, internal_state, options=None):
-        return self.createCalendarObjectWithName(name, component, options)
-
-
-    def setSupportedComponents(self, supported_components):
-        &quot;&quot;&quot;
-        Update the database column with the supported components. Technically this should only happen once
-        on collection creation, but for migration we may need to change after the fact - hence a separate api.
-        &quot;&quot;&quot;
-        self._supportedComponents = supported_components
-
-
-    def getSupportedComponents(self):
-        return self._supportedComponents
-
-
-
-class ImplicitStore(proxyForInterface(ICalendarStore, &quot;_calendarStore&quot;)):
-    &quot;&quot;&quot;
-    This is a wrapper around an L{ICalendarStore} that implements implicit
-    scheduling.
-    &quot;&quot;&quot;
-
-    def __init__(self, calendarStore):
-        &quot;&quot;&quot;
-        Create an L{ImplicitStore} wrapped around another
-        L{ICalendarStore} provider.
-        &quot;&quot;&quot;
-        self._calendarStore = calendarStore
-
-
-    def newTransaction(self, label=&quot;unlabeled&quot;):
-        &quot;&quot;&quot;
-        Wrap an underlying L{ITransaction}.
-        &quot;&quot;&quot;
-        return ImplicitTransaction(self._calendarStore.newTransaction(label))
</del></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreschedulingimipinboundpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/inbound.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/inbound.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/inbound.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -260,11 +260,11 @@
</span><span class="cx">             return
</span><span class="cx"> 
</span><span class="cx">         txn = self.store.newTransaction(label=&quot;MailReceiver.processDSN&quot;)
</span><del>-        result = (yield txn.imipLookupByToken(token))
</del><ins>+        records = (yield txn.imipLookupByToken(token))
</ins><span class="cx">         yield txn.commit()
</span><span class="cx">         try:
</span><span class="cx">             # Note the results are returned as utf-8 encoded strings
</span><del>-            organizer, attendee, _ignore_icaluid = result[0]
</del><ins>+            record = records[0]
</ins><span class="cx">         except:
</span><span class="cx">             # This isn't a token we recognize
</span><span class="cx">             log.error(
</span><span class="lines">@@ -272,7 +272,7 @@
</span><span class="cx">                 % (token, msgId))
</span><span class="cx">             returnValue(self.UNKNOWN_TOKEN)
</span><span class="cx"> 
</span><del>-        calendar.removeAllButOneAttendee(attendee)
</del><ins>+        calendar.removeAllButOneAttendee(record.attendee)
</ins><span class="cx">         calendar.getOrganizerProperty().setValue(organizer)
</span><span class="cx">         for comp in calendar.subcomponents():
</span><span class="cx">             if comp.name() == &quot;VEVENT&quot;:
</span><span class="lines">@@ -288,8 +288,11 @@
</span><span class="cx">         log.warn(&quot;Mail gateway processing DSN %s&quot; % (msgId,))
</span><span class="cx">         txn = self.store.newTransaction(label=&quot;MailReceiver.processDSN&quot;)
</span><span class="cx">         yield txn.enqueue(
</span><del>-            IMIPReplyWork, organizer=organizer, attendee=attendee,
-            icalendarText=str(calendar))
</del><ins>+            IMIPReplyWork,
+            organizer=record.organizer,
+            attendee=record.attendee,
+            icalendarText=str(calendar)
+        )
</ins><span class="cx">         yield txn.commit()
</span><span class="cx">         returnValue(self.INJECTION_SUBMITTED)
</span><span class="cx"> 
</span><span class="lines">@@ -313,11 +316,11 @@
</span><span class="cx">             returnValue(self.MALFORMED_TO_ADDRESS)
</span><span class="cx"> 
</span><span class="cx">         txn = self.store.newTransaction(label=&quot;MailReceiver.processReply&quot;)
</span><del>-        result = (yield txn.imipLookupByToken(token))
</del><ins>+        records = (yield txn.imipLookupByToken(token))
</ins><span class="cx">         yield txn.commit()
</span><span class="cx">         try:
</span><span class="cx">             # Note the results are returned as utf-8 encoded strings
</span><del>-            organizer, attendee, _ignore_icaluid = result[0]
</del><ins>+            record = records[0]
</ins><span class="cx">         except:
</span><span class="cx">             # This isn't a token we recognize
</span><span class="cx">             log.error(
</span><span class="lines">@@ -337,11 +340,11 @@
</span><span class="cx">                 &quot;in message %s&quot; % (msg['Message-ID'],))
</span><span class="cx"> 
</span><span class="cx">             toAddr = None
</span><del>-            fromAddr = attendee[7:]
-            if organizer.startswith(&quot;mailto:&quot;):
-                toAddr = organizer[7:]
-            elif organizer.startswith(&quot;urn:x-uid:&quot;):
-                uid = organizer[10:]
</del><ins>+            fromAddr = record.attendee[7:]
+            if record.organizer.startswith(&quot;mailto:&quot;):
+                toAddr = record.organizer[7:]
+            elif record.organizer.startswith(&quot;urn:x-uid:&quot;):
+                uid = record.organizer[10:]
</ins><span class="cx">                 record = yield self.directory.recordWithUID(uid)
</span><span class="cx">                 try:
</span><span class="cx">                     if record and record.emailAddresses:
</span><span class="lines">@@ -376,23 +379,23 @@
</span><span class="cx">         calendar = Component.fromString(calBody)
</span><span class="cx">         event = calendar.mainComponent()
</span><span class="cx"> 
</span><del>-        calendar.removeAllButOneAttendee(attendee)
</del><ins>+        calendar.removeAllButOneAttendee(record.attendee)
</ins><span class="cx">         organizerProperty = calendar.getOrganizerProperty()
</span><span class="cx">         if organizerProperty is None:
</span><span class="cx">             # ORGANIZER is required per rfc2446 section 3.2.3
</span><span class="cx">             log.warn(
</span><span class="cx">                 &quot;Mail gateway didn't find an ORGANIZER in REPLY %s&quot;
</span><span class="cx">                 % (msg['Message-ID'],))
</span><del>-            event.addProperty(Property(&quot;ORGANIZER&quot;, organizer))
</del><ins>+            event.addProperty(Property(&quot;ORGANIZER&quot;, record.organizer))
</ins><span class="cx">         else:
</span><del>-            organizerProperty.setValue(organizer)
</del><ins>+            organizerProperty.setValue(record.organizer)
</ins><span class="cx"> 
</span><span class="cx">         if not calendar.getAttendees():
</span><span class="cx">             # The attendee we're expecting isn't there, so add it back
</span><span class="cx">             # with a SCHEDULE-STATUS of SERVICE_UNAVAILABLE.
</span><span class="cx">             # The organizer will then see that the reply was not successful.
</span><span class="cx">             attendeeProp = Property(
</span><del>-                &quot;ATTENDEE&quot;, attendee,
</del><ins>+                &quot;ATTENDEE&quot;, record.attendee,
</ins><span class="cx">                 params={
</span><span class="cx">                     &quot;SCHEDULE-STATUS&quot;: iTIPRequestStatus.SERVICE_UNAVAILABLE,
</span><span class="cx">                 }
</span><span class="lines">@@ -406,8 +409,11 @@
</span><span class="cx"> 
</span><span class="cx">         txn = self.store.newTransaction(label=&quot;MailReceiver.processReply&quot;)
</span><span class="cx">         yield txn.enqueue(
</span><del>-            IMIPReplyWork, organizer=organizer, attendee=attendee,
-            icalendarText=str(calendar))
</del><ins>+            IMIPReplyWork,
+            organizer=record.organizer,
+            attendee=record.attendee,
+            icalendarText=str(calendar)
+        )
</ins><span class="cx">         yield txn.commit()
</span><span class="cx">         returnValue(self.INJECTION_SUBMITTED)
</span><span class="cx"> 
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreschedulingimipoutboundpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/outbound.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/outbound.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/outbound.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -364,28 +364,29 @@
</span><span class="cx"> 
</span><span class="cx">             # Reuse or generate a token based on originator, toAddr, and
</span><span class="cx">             # event uid
</span><del>-            token = (yield txn.imipGetToken(originator, toAddr.lower(), icaluid))
-            if token is None:
</del><ins>+            record = (yield txn.imipGetToken(originator, toAddr.lower(), icaluid))
+            if record is None:
</ins><span class="cx"> 
</span><span class="cx">                 # Because in the past the originator was sometimes in mailto:
</span><span class="cx">                 # form, lookup an existing token by mailto: as well
</span><span class="cx">                 organizerProperty = calendar.getOrganizerProperty()
</span><span class="cx">                 organizerEmailAddress = organizerProperty.parameterValue(&quot;EMAIL&quot;, None)
</span><span class="cx">                 if organizerEmailAddress is not None:
</span><del>-                    token = (yield txn.imipGetToken(&quot;mailto:%s&quot; % (organizerEmailAddress.lower(),), toAddr.lower(), icaluid))
</del><ins>+                    record = (yield txn.imipGetToken(&quot;mailto:%s&quot; % (organizerEmailAddress.lower(),), toAddr.lower(), icaluid))
</ins><span class="cx"> 
</span><del>-            if token is None:
-                token = (yield txn.imipCreateToken(originator, toAddr.lower(), icaluid))
</del><ins>+            if record is None:
+                record = (yield txn.imipCreateToken(originator, toAddr.lower(), icaluid))
</ins><span class="cx">                 self.log.debug(&quot;Mail gateway created token %s for %s &quot;
</span><span class="cx">                                &quot;(originator), %s (recipient) and %s (icaluid)&quot;
</span><del>-                               % (token, originator, toAddr, icaluid))
</del><ins>+                               % (record.token, originator, toAddr, icaluid))
</ins><span class="cx">                 inviteState = &quot;new&quot;
</span><span class="cx"> 
</span><span class="cx">             else:
</span><span class="cx">                 self.log.debug(&quot;Mail gateway reusing token %s for %s &quot;
</span><span class="cx">                                &quot;(originator), %s (recipient) and %s (icaluid)&quot;
</span><del>-                               % (token, originator, toAddr, icaluid))
</del><ins>+                               % (record.token, originator, toAddr, icaluid))
</ins><span class="cx">                 inviteState = &quot;update&quot;
</span><ins>+            token = record.token
</ins><span class="cx"> 
</span><span class="cx">             fullServerAddress = self.address
</span><span class="cx">             _ignore_name, serverAddress = email.utils.parseaddr(fullServerAddress)
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreschedulingimiptesttest_inboundpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_inbound.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_inbound.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_inbound.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -175,13 +175,13 @@
</span><span class="cx"> 
</span><span class="cx">         # Make sure a known token *is* processed
</span><span class="cx">         txn = self.store.newTransaction()
</span><del>-        token = (yield txn.imipCreateToken(
</del><ins>+        record = (yield txn.imipCreateToken(
</ins><span class="cx">             &quot;urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500&quot;,
</span><span class="cx">             &quot;mailto:user02@example.com&quot;,
</span><span class="cx">             &quot;1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C&quot;
</span><span class="cx">         ))
</span><span class="cx">         yield txn.commit()
</span><del>-        calBody = template % token
</del><ins>+        calBody = template % record.token
</ins><span class="cx">         result = (yield self.receiver.processDSN(calBody, &quot;xyzzy&quot;))
</span><span class="cx">         self.assertEquals(result, MailReceiver.INJECTION_SUBMITTED)
</span><span class="cx"> 
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreschedulingimiptesttest_mailgatewaypy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_mailgateway.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_mailgateway.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_mailgateway.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -44,9 +44,8 @@
</span><span class="cx">             &quot;icaluid1&quot;, token=&quot;token1&quot;)
</span><span class="cx">         yield migrateTokensToStore(self.path, self.store)
</span><span class="cx">         txn = self.store.newTransaction()
</span><del>-        results = yield (txn.imipLookupByToken(&quot;token1&quot;))
-        organizer, attendee, icaluid = results[0]
</del><ins>+        records = yield (txn.imipLookupByToken(&quot;token1&quot;))
</ins><span class="cx">         yield txn.commit()
</span><del>-        self.assertEquals(organizer, &quot;urn:uuid:user01&quot;)
-        self.assertEquals(attendee, &quot;mailto:attendee@example.com&quot;)
-        self.assertEquals(icaluid, &quot;icaluid1&quot;)
</del><ins>+        self.assertEquals(records[0].organizer, &quot;urn:uuid:user01&quot;)
+        self.assertEquals(records[0].attendee, &quot;mailto:attendee@example.com&quot;)
+        self.assertEquals(records[0].icaluid, &quot;icaluid1&quot;)
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreschedulingimiptesttest_outboundpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_outbound.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_outbound.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_outbound.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -316,17 +316,17 @@
</span><span class="cx">         yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
</span><span class="cx"> 
</span><span class="cx">         txn = self.store.newTransaction()
</span><del>-        token = (yield txn.imipGetToken(
</del><ins>+        record = (yield txn.imipGetToken(
</ins><span class="cx">             ORGANIZER,
</span><span class="cx">             ATTENDEE,
</span><span class="cx">             ICALUID
</span><span class="cx">         ))
</span><del>-        self.assertTrue(token)
-        organizer, attendee, icaluid = (yield txn.imipLookupByToken(token))[0]
</del><ins>+        self.assertTrue(record is not None)
+        record = (yield txn.imipLookupByToken(record.token))[0]
</ins><span class="cx">         yield txn.commit()
</span><del>-        self.assertEquals(organizer, ORGANIZER)
-        self.assertEquals(attendee, ATTENDEE)
-        self.assertEquals(icaluid, ICALUID)
</del><ins>+        self.assertEquals(record.organizer, ORGANIZER)
+        self.assertEquals(record.attendee, ATTENDEE)
+        self.assertEquals(record.icaluid, ICALUID)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="lines">@@ -492,12 +492,12 @@
</span><span class="cx">             if UID: # The organizer is local, and server is sending to remote
</span><span class="cx">                     # attendee
</span><span class="cx">                 txn = self.store.newTransaction()
</span><del>-                token = (yield txn.imipGetToken(inputOriginator, inputRecipient, UID))
</del><ins>+                record = (yield txn.imipGetToken(inputOriginator, inputRecipient, UID))
</ins><span class="cx">                 yield txn.commit()
</span><del>-                self.assertNotEquals(token, None)
</del><ins>+                self.assertNotEquals(record, None)
</ins><span class="cx">                 self.assertEquals(
</span><span class="cx">                     msg[&quot;Reply-To&quot;],
</span><del>-                    &quot;server+%s@example.com&quot; % (token,))
</del><ins>+                    &quot;server+%s@example.com&quot; % (record.token,))
</ins><span class="cx"> 
</span><span class="cx">                 # Make sure attendee property for organizer exists and matches
</span><span class="cx">                 # the CUA of the organizer property
</span><span class="lines">@@ -529,31 +529,31 @@
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def test_tokens(self):
</span><span class="cx">         txn = self.store.newTransaction()
</span><del>-        token = (yield txn.imipLookupByToken(&quot;xyzzy&quot;))
</del><ins>+        self.assertEquals((yield txn.imipLookupByToken(&quot;xyzzy&quot;)), [])
</ins><span class="cx">         yield txn.commit()
</span><del>-        self.assertEquals(token, [])
</del><span class="cx"> 
</span><span class="cx">         txn = self.store.newTransaction()
</span><del>-        token1 = (yield txn.imipCreateToken(&quot;organizer&quot;, &quot;attendee&quot;, &quot;icaluid&quot;))
</del><ins>+        record1 = (yield txn.imipCreateToken(&quot;organizer&quot;, &quot;attendee&quot;, &quot;icaluid&quot;))
</ins><span class="cx">         yield txn.commit()
</span><span class="cx"> 
</span><span class="cx">         txn = self.store.newTransaction()
</span><del>-        token2 = (yield txn.imipGetToken(&quot;organizer&quot;, &quot;attendee&quot;, &quot;icaluid&quot;))
</del><ins>+        record2 = (yield txn.imipGetToken(&quot;organizer&quot;, &quot;attendee&quot;, &quot;icaluid&quot;))
</ins><span class="cx">         yield txn.commit()
</span><del>-        self.assertEquals(token1, token2)
</del><ins>+        self.assertEquals(record1.token, record2.token)
</ins><span class="cx"> 
</span><span class="cx">         txn = self.store.newTransaction()
</span><ins>+        record = (yield txn.imipLookupByToken(record1.token))[0]
</ins><span class="cx">         self.assertEquals(
</span><del>-            map(list, (yield txn.imipLookupByToken(token1))),
-            [[&quot;organizer&quot;, &quot;attendee&quot;, &quot;icaluid&quot;]])
</del><ins>+            [record.organizer, record.attendee, record.icaluid],
+            [&quot;organizer&quot;, &quot;attendee&quot;, &quot;icaluid&quot;])
</ins><span class="cx">         yield txn.commit()
</span><span class="cx"> 
</span><span class="cx">         txn = self.store.newTransaction()
</span><del>-        yield txn.imipRemoveToken(token1)
</del><ins>+        yield txn.imipRemoveToken(record1.token)
</ins><span class="cx">         yield txn.commit()
</span><span class="cx"> 
</span><span class="cx">         txn = self.store.newTransaction()
</span><del>-        self.assertEquals((yield txn.imipLookupByToken(token1)), [])
</del><ins>+        self.assertEquals((yield txn.imipLookupByToken(record1.token)), [])
</ins><span class="cx">         yield txn.commit()
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="lines">@@ -568,7 +568,7 @@
</span><span class="cx">         # Explictly store a token with mailto: CUA for organizer
</span><span class="cx">         # (something that doesn't happen any more, but did in the past)
</span><span class="cx">         txn = self.store.newTransaction()
</span><del>-        origToken = (yield txn.imipCreateToken(
</del><ins>+        origRecord = (yield txn.imipCreateToken(
</ins><span class="cx">             organizerEmail,
</span><span class="cx">             &quot;mailto:attendee@example.com&quot;,
</span><span class="cx">             &quot;CFDD5E46-4F74-478A-9311-B3FF905449C3&quot;
</span><span class="lines">@@ -588,15 +588,15 @@
</span><span class="cx"> 
</span><span class="cx">         # Verify we didn't create a new token...
</span><span class="cx">         txn = self.store.newTransaction()
</span><del>-        token = (yield txn.imipGetToken(inputOriginator, inputRecipient, UID))
</del><ins>+        record = (yield txn.imipGetToken(inputOriginator, inputRecipient, UID))
</ins><span class="cx">         yield txn.commit()
</span><del>-        self.assertEquals(token, None)
</del><ins>+        self.assertEquals(record, None)
</ins><span class="cx"> 
</span><span class="cx">         # But instead kept the old one...
</span><span class="cx">         txn = self.store.newTransaction()
</span><del>-        token = (yield txn.imipGetToken(organizerEmail, inputRecipient, UID))
</del><ins>+        record = (yield txn.imipGetToken(organizerEmail, inputRecipient, UID))
</ins><span class="cx">         yield txn.commit()
</span><del>-        self.assertEquals(token, origToken)
</del><ins>+        self.assertEquals(record.token, origRecord.token)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     def generateSampleEmail(self, caltext=initialInviteText):
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreschedulingischeduledeliverypy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/delivery.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/delivery.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/delivery.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -448,15 +448,6 @@
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def _processRequest(self, ssl, host, port, path):
</span><del>-        from twisted.internet import reactor
-        f = Factory()
-        f.protocol = HTTPClientProtocol
-        if ssl:
-            ep = GAIEndpoint(reactor, host, port, _configuredClientContextFactory())
-        else:
-            ep = GAIEndpoint(reactor, host, port)
-        proto = (yield ep.connect(f))
-
</del><span class="cx">         if not self.server.podding() and config.Scheduling.iSchedule.DKIM.Enabled:
</span><span class="cx">             domain, selector, key_file, algorithm, useDNSKey, useHTTPKey, usePrivateExchangeKey, expire = DKIMUtils.getConfiguration(config)
</span><span class="cx">             request = DKIMRequest(
</span><span class="lines">@@ -481,6 +472,21 @@
</span><span class="cx">         if accountingEnabledForCategory(&quot;iSchedule&quot;):
</span><span class="cx">             self.loggedRequest = yield self.logRequest(request)
</span><span class="cx"> 
</span><ins>+        response = yield self._submitRequest(ssl, host, port, request)
+        returnValue(response)
+
+
+    @inlineCallbacks
+    def _submitRequest(self, ssl, host, port, request):
+        from twisted.internet import reactor
+        f = Factory()
+        f.protocol = HTTPClientProtocol
+        if ssl:
+            ep = GAIEndpoint(reactor, host, port, _configuredClientContextFactory())
+        else:
+            ep = GAIEndpoint(reactor, host, port)
+        proto = (yield ep.connect(f))
+
</ins><span class="cx">         response = (yield proto.submitRequest(request))
</span><span class="cx"> 
</span><span class="cx">         returnValue(response)
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreschedulingtesttest_workpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_work.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_work.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_work.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -250,7 +250,7 @@
</span><span class="cx"> 
</span><span class="cx">         work = yield jobs[0].workItem()
</span><span class="cx">         self.assertTrue(isinstance(work, ScheduleOrganizerWork))
</span><del>-        self.assertEqual(work.icalendarUid, &quot;12345-67890&quot;)
</del><ins>+        self.assertEqual(work.icalendarUID, &quot;12345-67890&quot;)
</ins><span class="cx">         self.assertEqual(scheduleActionFromSQL[work.scheduleAction], &quot;create&quot;)
</span><span class="cx"> 
</span><span class="cx">         yield work.delete()
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreschedulingworkpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/work.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/work.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/work.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -77,7 +77,7 @@
</span><span class="cx"> 
</span><span class="cx">         baseargs = {
</span><span class="cx">             &quot;jobID&quot;: kwargs.pop(&quot;jobID&quot;),
</span><del>-            &quot;icalendarUid&quot;: kwargs.pop(&quot;icalendarUid&quot;),
</del><ins>+            &quot;icalendarUID&quot;: kwargs.pop(&quot;icalendarUID&quot;),
</ins><span class="cx">             &quot;workType&quot;: cls.workType()
</span><span class="cx">         }
</span><span class="cx"> 
</span><span class="lines">@@ -121,7 +121,7 @@
</span><span class="cx">         # cause deadlocks if done in the wrong order
</span><span class="cx"> 
</span><span class="cx">         # Row level lock on this item
</span><del>-        locked = yield self.baseWork.trylock(ScheduleWork.icalendarUid == self.icalendarUid)
</del><ins>+        locked = yield self.baseWork.trylock(ScheduleWork.icalendarUID == self.icalendarUID)
</ins><span class="cx">         if locked:
</span><span class="cx">             yield self.trylock()
</span><span class="cx">         returnValue(locked)
</span><span class="lines">@@ -136,7 +136,7 @@
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         self.__dict__[&quot;baseWork&quot;] = baseWork
</span><span class="cx">         self.__dict__[&quot;jobID&quot;] = baseWork.jobID
</span><del>-        self.__dict__[&quot;icalendarUid&quot;] = baseWork.icalendarUid
</del><ins>+        self.__dict__[&quot;icalendarUID&quot;] = baseWork.icalendarUID
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     def delete(self):
</span><span class="lines">@@ -174,7 +174,7 @@
</span><span class="cx">         if self.workType() == ScheduleOrganizerSendWork.workType():
</span><span class="cx">             all = yield self.baseWork.query(
</span><span class="cx">                 self.transaction,
</span><del>-                (ScheduleWork.icalendarUid == self.icalendarUid).And(ScheduleWork.workID != self.workID),
</del><ins>+                (ScheduleWork.icalendarUID == self.icalendarUID).And(ScheduleWork.workID != self.workID),
</ins><span class="cx">                 order=ScheduleWork.workID,
</span><span class="cx">                 limit=1,
</span><span class="cx">             )
</span><span class="lines">@@ -183,7 +183,7 @@
</span><span class="cx">                 if work.workType == self.workType():
</span><span class="cx">                     job = yield JobItem.load(self.transaction, work.jobID)
</span><span class="cx">                     yield job.update(notBefore=datetime.datetime.utcnow())
</span><del>-                    log.debug(&quot;ScheduleOrganizerSendWork - promoted job: {id}, UID: '{uid}'&quot;, id=work.workID, uid=self.icalendarUid)
</del><ins>+                    log.debug(&quot;ScheduleOrganizerSendWork - promoted job: {id}, UID: '{uid}'&quot;, id=work.workID, uid=self.icalendarUID)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><span class="lines">@@ -323,7 +323,7 @@
</span><span class="cx">         proposal = (yield txn.enqueue(
</span><span class="cx">             cls,
</span><span class="cx">             notBefore=notBefore,
</span><del>-            icalendarUid=uid,
</del><ins>+            icalendarUID=uid,
</ins><span class="cx">             scheduleAction=scheduleActionToSQL[action],
</span><span class="cx">             homeResourceID=home.id(),
</span><span class="cx">             resourceID=resource.id() if resource else None,
</span><span class="lines">@@ -347,10 +347,10 @@
</span><span class="cx">             calendar_old = Component.fromString(self.icalendarTextOld) if self.icalendarTextOld else None
</span><span class="cx">             calendar_new = Component.fromString(self.icalendarTextNew) if self.icalendarTextNew else None
</span><span class="cx"> 
</span><del>-            log.debug(&quot;ScheduleOrganizerWork - running for ID: {id}, UID: {uid}, organizer: {org}&quot;, id=self.workID, uid=self.icalendarUid, org=organizer)
</del><ins>+            log.debug(&quot;ScheduleOrganizerWork - running for ID: {id}, UID: {uid}, organizer: {org}&quot;, id=self.workID, uid=self.icalendarUID, org=organizer)
</ins><span class="cx"> 
</span><span class="cx">             # We need to get the UID lock for implicit processing.
</span><del>-            yield NamedLock.acquire(self.transaction, &quot;ImplicitUIDLock:%s&quot; % (hashlib.md5(self.icalendarUid).hexdigest(),))
</del><ins>+            yield NamedLock.acquire(self.transaction, &quot;ImplicitUIDLock:%s&quot; % (hashlib.md5(self.icalendarUID).hexdigest(),))
</ins><span class="cx"> 
</span><span class="cx">             from txdav.caldav.datastore.scheduling.implicit import ImplicitScheduler
</span><span class="cx">             scheduler = ImplicitScheduler()
</span><span class="lines">@@ -359,7 +359,7 @@
</span><span class="cx">                 scheduleActionFromSQL[self.scheduleAction],
</span><span class="cx">                 home,
</span><span class="cx">                 resource,
</span><del>-                self.icalendarUid,
</del><ins>+                self.icalendarUID,
</ins><span class="cx">                 calendar_old,
</span><span class="cx">                 calendar_new,
</span><span class="cx">                 self.smartMerge
</span><span class="lines">@@ -368,15 +368,15 @@
</span><span class="cx">             self._dequeued()
</span><span class="cx"> 
</span><span class="cx">         except Exception, e:
</span><del>-            log.debug(&quot;ScheduleOrganizerWork - exception ID: {id}, UID: '{uid}', {err}&quot;, id=self.workID, uid=self.icalendarUid, err=str(e))
</del><ins>+            log.debug(&quot;ScheduleOrganizerWork - exception ID: {id}, UID: '{uid}', {err}&quot;, id=self.workID, uid=self.icalendarUID, err=str(e))
</ins><span class="cx">             log.debug(traceback.format_exc())
</span><span class="cx">             raise
</span><span class="cx">         except:
</span><del>-            log.debug(&quot;ScheduleOrganizerWork - bare exception ID: {id}, UID: '{uid}'&quot;, id=self.workID, uid=self.icalendarUid)
</del><ins>+            log.debug(&quot;ScheduleOrganizerWork - bare exception ID: {id}, UID: '{uid}'&quot;, id=self.workID, uid=self.icalendarUID)
</ins><span class="cx">             log.debug(traceback.format_exc())
</span><span class="cx">             raise
</span><span class="cx"> 
</span><del>-        log.debug(&quot;ScheduleOrganizerWork - done for ID: {id}, UID: {uid}, organizer: {org}&quot;, id=self.workID, uid=self.icalendarUid, org=organizer)
</del><ins>+        log.debug(&quot;ScheduleOrganizerWork - done for ID: {id}, UID: {uid}, organizer: {org}&quot;, id=self.workID, uid=self.icalendarUID, org=organizer)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="lines">@@ -418,7 +418,7 @@
</span><span class="cx">         proposal = (yield txn.enqueue(
</span><span class="cx">             cls,
</span><span class="cx">             notBefore=notBefore,
</span><del>-            icalendarUid=uid,
</del><ins>+            icalendarUID=uid,
</ins><span class="cx">             scheduleAction=scheduleActionToSQL[action],
</span><span class="cx">             homeResourceID=home.id(),
</span><span class="cx">             resourceID=resource.id() if resource else None,
</span><span class="lines">@@ -449,13 +449,13 @@
</span><span class="cx">             log.debug(
</span><span class="cx">                 &quot;ScheduleOrganizerSendWork - running for ID: {id}, UID: {uid}, organizer: {org}, attendee: {att}&quot;,
</span><span class="cx">                 id=self.workID,
</span><del>-                uid=self.icalendarUid,
</del><ins>+                uid=self.icalendarUID,
</ins><span class="cx">                 org=organizer,
</span><span class="cx">                 att=self.attendee
</span><span class="cx">             )
</span><span class="cx"> 
</span><span class="cx">             # We need to get the UID lock for implicit processing.
</span><del>-            yield NamedLock.acquire(self.transaction, &quot;ImplicitUIDLock:%s&quot; % (hashlib.md5(self.icalendarUid).hexdigest(),))
</del><ins>+            yield NamedLock.acquire(self.transaction, &quot;ImplicitUIDLock:%s&quot; % (hashlib.md5(self.icalendarUID).hexdigest(),))
</ins><span class="cx"> 
</span><span class="cx">             from txdav.caldav.datastore.scheduling.implicit import ImplicitScheduler
</span><span class="cx">             scheduler = ImplicitScheduler()
</span><span class="lines">@@ -464,7 +464,7 @@
</span><span class="cx">                 scheduleActionFromSQL[self.scheduleAction],
</span><span class="cx">                 home,
</span><span class="cx">                 resource,
</span><del>-                self.icalendarUid,
</del><ins>+                self.icalendarUID,
</ins><span class="cx">                 organizer,
</span><span class="cx">                 self.attendee,
</span><span class="cx">                 itipmsg,
</span><span class="lines">@@ -486,18 +486,18 @@
</span><span class="cx">             self._dequeued()
</span><span class="cx"> 
</span><span class="cx">         except Exception, e:
</span><del>-            log.debug(&quot;ScheduleOrganizerSendWork - exception ID: {id}, UID: '{uid}', {err}&quot;, id=self.workID, uid=self.icalendarUid, err=str(e))
</del><ins>+            log.debug(&quot;ScheduleOrganizerSendWork - exception ID: {id}, UID: '{uid}', {err}&quot;, id=self.workID, uid=self.icalendarUID, err=str(e))
</ins><span class="cx">             log.debug(traceback.format_exc())
</span><span class="cx">             raise
</span><span class="cx">         except:
</span><del>-            log.debug(&quot;ScheduleOrganizerSendWork - bare exception ID: {id}, UID: '{uid}'&quot;, id=self.workID, uid=self.icalendarUid)
</del><ins>+            log.debug(&quot;ScheduleOrganizerSendWork - bare exception ID: {id}, UID: '{uid}'&quot;, id=self.workID, uid=self.icalendarUID)
</ins><span class="cx">             log.debug(traceback.format_exc())
</span><span class="cx">             raise
</span><span class="cx"> 
</span><span class="cx">         log.debug(
</span><span class="cx">             &quot;ScheduleOrganizerSendWork - for ID: {id}, UID: {uid}, organizer: {org}, attendee: {att}&quot;,
</span><span class="cx">             id=self.workID,
</span><del>-            uid=self.icalendarUid,
</del><ins>+            uid=self.icalendarUID,
</ins><span class="cx">             org=organizer,
</span><span class="cx">             att=self.attendee
</span><span class="cx">         )
</span><span class="lines">@@ -521,7 +521,7 @@
</span><span class="cx">         proposal = (yield txn.enqueue(
</span><span class="cx">             cls,
</span><span class="cx">             notBefore=notBefore,
</span><del>-            icalendarUid=uid,
</del><ins>+            icalendarUID=uid,
</ins><span class="cx">             homeResourceID=home.id(),
</span><span class="cx">             resourceID=resource.id() if resource else None,
</span><span class="cx">             itipMsg=itipmsg.getTextWithTimezones(includeTimezones=not config.EnableTimezonesByReference),
</span><span class="lines">@@ -649,7 +649,7 @@
</span><span class="cx">         notBefore = datetime.datetime.utcnow() + datetime.timedelta(seconds=config.Scheduling.Options.WorkQueues.AttendeeRefreshBatchDelaySeconds)
</span><span class="cx">         proposal = (yield txn.enqueue(
</span><span class="cx">             cls,
</span><del>-            icalendarUid=organizer_resource.uid(),
</del><ins>+            icalendarUID=organizer_resource.uid(),
</ins><span class="cx">             homeResourceID=organizer_resource._home.id(),
</span><span class="cx">             resourceID=organizer_resource.id(),
</span><span class="cx">             attendeeCount=len(attendees),
</span><span class="lines">@@ -676,7 +676,7 @@
</span><span class="cx">             log.debug(&quot;Schedule refresh for resource-id: {rid} - ignored&quot;, rid=self.resourceID)
</span><span class="cx">             returnValue(None)
</span><span class="cx"> 
</span><del>-        log.debug(&quot;ScheduleRefreshWork - running for ID: {id}, UID: {uid}&quot;, id=self.workID, uid=self.icalendarUid)
</del><ins>+        log.debug(&quot;ScheduleRefreshWork - running for ID: {id}, UID: {uid}&quot;, id=self.workID, uid=self.icalendarUID)
</ins><span class="cx"> 
</span><span class="cx">         # Get the unique list of pending attendees and split into batch to process
</span><span class="cx">         # TODO: do a DELETE ... and rownum &lt;= N returning attendee - but have to fix Oracle to
</span><span class="lines">@@ -707,7 +707,7 @@
</span><span class="cx">             notBefore = datetime.datetime.utcnow() + datetime.timedelta(seconds=config.Scheduling.Options.WorkQueues.AttendeeRefreshBatchIntervalSeconds)
</span><span class="cx">             yield self.transaction.enqueue(
</span><span class="cx">                 self.__class__,
</span><del>-                icalendarUid=self.icalendarUid,
</del><ins>+                icalendarUID=self.icalendarUID,
</ins><span class="cx">                 homeResourceID=self.homeResourceID,
</span><span class="cx">                 resourceID=self.resourceID,
</span><span class="cx">                 attendeeCount=len(pendingAttendees),
</span><span class="lines">@@ -721,7 +721,7 @@
</span><span class="cx"> 
</span><span class="cx">         self._dequeued()
</span><span class="cx"> 
</span><del>-        log.debug(&quot;ScheduleRefreshWork - done for ID: {id}, UID: {uid}&quot;, id=self.workID, uid=self.icalendarUid)
</del><ins>+        log.debug(&quot;ScheduleRefreshWork - done for ID: {id}, UID: {uid}&quot;, id=self.workID, uid=self.icalendarUID)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="lines">@@ -790,7 +790,7 @@
</span><span class="cx">         notBefore = datetime.datetime.utcnow() + datetime.timedelta(seconds=config.Scheduling.Options.WorkQueues.AutoReplyDelaySeconds)
</span><span class="cx">         proposal = (yield txn.enqueue(
</span><span class="cx">             cls,
</span><del>-            icalendarUid=resource.uid(),
</del><ins>+            icalendarUID=resource.uid(),
</ins><span class="cx">             homeResourceID=resource._home.id(),
</span><span class="cx">             resourceID=resource.id(),
</span><span class="cx">             partstat=partstat,
</span><span class="lines">@@ -803,7 +803,7 @@
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def doWork(self):
</span><span class="cx"> 
</span><del>-        log.debug(&quot;ScheduleAutoReplyWork - running for ID: {id}, UID: {uid}&quot;, id=self.workID, uid=self.icalendarUid)
</del><ins>+        log.debug(&quot;ScheduleAutoReplyWork - running for ID: {id}, UID: {uid}&quot;, id=self.workID, uid=self.icalendarUID)
</ins><span class="cx"> 
</span><span class="cx">         # Delete all other work items with the same pushID
</span><span class="cx">         yield Delete(
</span><span class="lines">@@ -816,7 +816,7 @@
</span><span class="cx"> 
</span><span class="cx">         self._dequeued()
</span><span class="cx"> 
</span><del>-        log.debug(&quot;ScheduleAutoReplyWork - done for ID: {id}, UID: {uid}&quot;, id=self.workID, uid=self.icalendarUid)
</del><ins>+        log.debug(&quot;ScheduleAutoReplyWork - done for ID: {id}, UID: {uid}&quot;, id=self.workID, uid=self.icalendarUID)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoresqlpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/sql.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/sql.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/sql.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -26,19 +26,17 @@
</span><span class="cx">     &quot;CalendarObject&quot;,
</span><span class="cx"> ]
</span><span class="cx"> 
</span><del>-from twext.enterprise.dal.record import fromTable
</del><ins>+from twext.enterprise.dal.record import fromTable, SerializableRecord
</ins><span class="cx"> from twext.enterprise.dal.syntax import Count, ColumnSyntax, Delete, \
</span><span class="cx">     Insert, Len, Max, Parameter, Select, Update, utcNowSQL
</span><span class="cx"> from twext.enterprise.locking import NamedLock
</span><span class="cx"> from twext.enterprise.jobqueue import WorkItem, AggregatedWorkItem, \
</span><span class="cx">     WORK_PRIORITY_LOW, WORK_WEIGHT_5, WORK_WEIGHT_3
</span><del>-from twext.enterprise.util import parseSQLTimestamp
</del><span class="cx"> from twext.python.clsprop import classproperty
</span><del>-from twext.python.filepath import CachingFilePath
</del><span class="cx"> from twext.python.log import Logger
</span><span class="cx"> from twext.who.idirectory import RecordType
</span><span class="cx"> from twistedcaldav.ical import Component as VComponent
</span><del>-from txweb2.http_headers import MimeType, generateContentType
</del><ins>+from txweb2.http_headers import MimeType
</ins><span class="cx"> from txweb2.stream import readStream
</span><span class="cx"> 
</span><span class="cx"> from twisted.internet.defer import inlineCallbacks, returnValue, succeed
</span><span class="lines">@@ -48,11 +46,10 @@
</span><span class="cx"> from twistedcaldav import customxml, ical
</span><span class="cx"> from twistedcaldav.stdconfig import config
</span><span class="cx"> from twistedcaldav.datafilters.peruserdata import PerUserDataFilter
</span><del>-from twistedcaldav.dateops import normalizeForIndex, datetimeMktime, \
</del><ins>+from twistedcaldav.dateops import normalizeForIndex, \
</ins><span class="cx">     pyCalendarTodatetime, parseSQLDateToPyCalendar
</span><span class="cx"> from twistedcaldav.ical import Component, InvalidICalendarDataError, Property
</span><span class="cx"> from twistedcaldav.instance import InvalidOverriddenInstanceError
</span><del>-from twistedcaldav.memcacher import Memcacher
</del><span class="cx"> from twistedcaldav.timezones import TimezoneException
</span><span class="cx"> 
</span><span class="cx"> from txdav.base.propertystore.base import PropertyName
</span><span class="lines">@@ -64,14 +61,15 @@
</span><span class="cx"> from txdav.caldav.datastore.scheduling.icalsplitter import iCalSplitter
</span><span class="cx"> from txdav.caldav.datastore.scheduling.implicit import ImplicitScheduler
</span><span class="cx"> from txdav.caldav.datastore.scheduling.utils import uidFromCalendarUserAddress
</span><del>-from txdav.caldav.datastore.util import AttachmentRetrievalTransport, \
-    normalizationLookup
</del><ins>+from txdav.caldav.datastore.sql_attachment import Attachment, DropBoxAttachment, \
+    AttachmentLink, ManagedAttachment
+from txdav.caldav.datastore.sql_directory import GroupAttendeeRecord, \
+    GroupShareeRecord
+from txdav.caldav.datastore.util import normalizationLookup
</ins><span class="cx"> from txdav.caldav.datastore.util import CalendarObjectBase
</span><del>-from txdav.caldav.datastore.util import StorageTransportBase
</del><span class="cx"> from txdav.caldav.datastore.util import dropboxIDFromCalendarObject
</span><span class="cx"> from txdav.caldav.icalendarstore import ICalendarHome, ICalendar, ICalendarObject, \
</span><del>-    IAttachment, AttachmentStoreFailed, AttachmentStoreValidManagedID, \
-    AttachmentMigrationFailed, AttachmentDropboxNotAllowed, \
</del><ins>+    AttachmentStoreFailed, AttachmentStoreValidManagedID, \
</ins><span class="cx">     TooManyAttendeesError, InvalidComponentTypeError, InvalidCalendarAccessError, \
</span><span class="cx">     ResourceDeletedError, \
</span><span class="cx">     AttendeeAllowedError, InvalidPerUserDataMerge, ComponentUpdateState, \
</span><span class="lines">@@ -79,15 +77,16 @@
</span><span class="cx">     InvalidDefaultCalendar, \
</span><span class="cx">     InvalidAttachmentOperation, DuplicatePrivateCommentsError, \
</span><span class="cx">     TimeRangeUpperLimit, TimeRangeLowerLimit, InvalidSplit, \
</span><del>-    AttachmentSizeTooLarge, UnknownTimezone, SetComponentOptions
-from txdav.caldav.icalendarstore import QuotaExceeded
</del><ins>+    UnknownTimezone, SetComponentOptions
</ins><span class="cx"> from txdav.common.datastore.sql import CommonHome, CommonHomeChild, \
</span><del>-    CommonObjectResource, ECALENDARTYPE, SharingInvitation
</del><ins>+    CommonObjectResource, ECALENDARTYPE
+from txdav.common.datastore.sql_directory import GroupsRecord
</ins><span class="cx"> from txdav.common.datastore.sql_tables import _ATTACHMENTS_MODE_NONE, \
</span><span class="cx">     _ATTACHMENTS_MODE_READ, _ATTACHMENTS_MODE_WRITE, _BIND_MODE_DIRECT, \
</span><span class="cx">     _BIND_MODE_GROUP, _BIND_MODE_GROUP_READ, _BIND_MODE_GROUP_WRITE, \
</span><span class="cx">     _BIND_MODE_OWN, _BIND_MODE_READ, _BIND_MODE_WRITE, _BIND_STATUS_ACCEPTED, \
</span><span class="cx">     _TRANSP_OPAQUE, _TRANSP_TRANSPARENT, schema
</span><ins>+from txdav.common.datastore.sql_sharing import SharingInvitation
</ins><span class="cx"> from txdav.common.icommondatastore import IndexedSearchException, \
</span><span class="cx">     InternalDataStoreError, HomeChildNameAlreadyExistsError, \
</span><span class="cx">     HomeChildNameNotAllowedError, ObjectResourceTooBigError, \
</span><span class="lines">@@ -111,8 +110,7 @@
</span><span class="cx"> from urlparse import urlparse, urlunparse
</span><span class="cx"> import collections
</span><span class="cx"> import datetime
</span><del>-import os
-import tempfile
</del><ins>+import itertools
</ins><span class="cx"> import urllib
</span><span class="cx"> import uuid
</span><span class="cx"> 
</span><span class="lines">@@ -142,7 +140,7 @@
</span><span class="cx">         @type txn: L{txdav.common.datastore.sql.CommonStoreTransaction}
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx"> 
</span><del>-        at = schema.ATTACHMENT
</del><ins>+        at = Attachment._attachmentSchema
</ins><span class="cx">         rows = (yield Select(
</span><span class="cx">             (at.DROPBOX_ID,),
</span><span class="cx">             From=at,
</span><span class="lines">@@ -174,8 +172,8 @@
</span><span class="cx">         txn = self._store.newTransaction(&quot;CalendarStoreFeatures.upgradeToManagedAttachments - preliminary work&quot;)
</span><span class="cx">         try:
</span><span class="cx">             # Clear out unused CALENDAR_OBJECT.DROPBOX_IDs
</span><del>-            co = schema.CALENDAR_OBJECT
-            at = schema.ATTACHMENT
</del><ins>+            co = CalendarObject._objectSchema
+            at = Attachment._attachmentSchema
</ins><span class="cx">             yield Update(
</span><span class="cx">                 {co.DROPBOX_ID: None},
</span><span class="cx">                 Where=co.RESOURCE_ID.In(Select(
</span><span class="lines">@@ -248,7 +246,7 @@
</span><span class="cx">         log.debug(&quot;  {0} affected calendar objects&quot;.format(len(cobjs),))
</span><span class="cx"> 
</span><span class="cx">         # Get names of each matching attachment
</span><del>-        at = schema.ATTACHMENT
</del><ins>+        at = Attachment._attachmentSchema
</ins><span class="cx">         names = (yield Select(
</span><span class="cx">             (at.PATH,),
</span><span class="cx">             From=at,
</span><span class="lines">@@ -317,8 +315,8 @@
</span><span class="cx">         @type dropbox_id: C{str}
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx"> 
</span><del>-        co = schema.CALENDAR_OBJECT
-        cb = schema.CALENDAR_BIND
</del><ins>+        co = CalendarObject._objectSchema
+        cb = Calendar._bindSchema
</ins><span class="cx">         rows = (yield Select(
</span><span class="cx">             (cb.CALENDAR_HOME_RESOURCE_ID, co.CALENDAR_RESOURCE_ID, co.RESOURCE_ID,),
</span><span class="cx">             From=co.join(cb, co.CALENDAR_RESOURCE_ID == cb.CALENDAR_RESOURCE_ID),
</span><span class="lines">@@ -404,6 +402,33 @@
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+class CalendarHomeRecord(SerializableRecord, fromTable(schema.CALENDAR_HOME)):
+    &quot;&quot;&quot;
+    @DynamicAttrs
+    L{Record} for L{schema.CALENDAR_HOME}.
+    &quot;&quot;&quot;
+    pass
+
+
+
+class CalendarMetaDataRecord(SerializableRecord, fromTable(schema.CALENDAR_METADATA)):
+    &quot;&quot;&quot;
+    @DynamicAttrs
+    L{Record} for L{schema.CALENDAR_METADATA}.
+    &quot;&quot;&quot;
+    pass
+
+
+
+class CalendarBindRecord(SerializableRecord, fromTable(schema.CALENDAR_BIND)):
+    &quot;&quot;&quot;
+    @DynamicAttrs
+    L{Record} for L{schema.CALENDAR_BIND}.
+    &quot;&quot;&quot;
+    pass
+
+
+
</ins><span class="cx"> class CalendarHome(CommonHome):
</span><span class="cx"> 
</span><span class="cx">     implements(ICalendarHome)
</span><span class="lines">@@ -412,16 +437,15 @@
</span><span class="cx"> 
</span><span class="cx">     # structured tables.  (new, preferred)
</span><span class="cx">     _homeSchema = schema.CALENDAR_HOME
</span><del>-    _bindSchema = schema.CALENDAR_BIND
</del><span class="cx">     _homeMetaDataSchema = schema.CALENDAR_HOME_METADATA
</span><ins>+
+    _bindSchema = schema.CALENDAR_BIND
</ins><span class="cx">     _revisionsSchema = schema.CALENDAR_OBJECT_REVISIONS
</span><span class="cx">     _objectSchema = schema.CALENDAR_OBJECT
</span><span class="cx"> 
</span><span class="cx">     _notifierPrefix = &quot;CalDAV&quot;
</span><span class="cx">     _dataVersionKey = &quot;CALENDAR-DATAVERSION&quot;
</span><span class="cx"> 
</span><del>-    _cacher = Memcacher(&quot;SQL.calhome&quot;, pickle=True, key_normalization=False)
-
</del><span class="cx">     _componentCalendarName = {
</span><span class="cx">         &quot;VEVENT&quot;: &quot;calendar&quot;,
</span><span class="cx">         &quot;VTODO&quot;: &quot;tasks&quot;,
</span><span class="lines">@@ -494,37 +518,36 @@
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def remove(self):
</span><del>-        ch = schema.CALENDAR_HOME
-        cb = schema.CALENDAR_BIND
-        cor = schema.CALENDAR_OBJECT_REVISIONS
-        rp = schema.RESOURCE_PROPERTY
-
</del><span class="cx">         # delete attachments corresponding to this home, also removing from disk
</span><span class="cx">         yield Attachment.removedHome(self._txn, self._resourceID)
</span><span class="cx"> 
</span><del>-        yield Delete(
-            From=cb,
-            Where=cb.CALENDAR_HOME_RESOURCE_ID == self._resourceID
-        ).on(self._txn)
</del><ins>+        yield super(CalendarHome, self).remove()
</ins><span class="cx"> 
</span><del>-        yield Delete(
-            From=cor,
-            Where=cor.CALENDAR_HOME_RESOURCE_ID == self._resourceID
-        ).on(self._txn)
</del><span class="cx"> 
</span><del>-        yield Delete(
-            From=ch,
-            Where=ch.RESOURCE_ID == self._resourceID
-        ).on(self._txn)
</del><ins>+    @inlineCallbacks
+    def copyMetadata(self, other, calendarIDMap):
+        &quot;&quot;&quot;
+        Copy metadata from one L{CalendarObjectResource} to another. This is only
+        used during a migration step.
+        &quot;&quot;&quot;
</ins><span class="cx"> 
</span><del>-        yield Delete(
-            From=rp,
-            Where=rp.RESOURCE_ID == self._resourceID
</del><ins>+        # Simple attributes that can be copied over as-is, but the calendar id's need to be mapped
+        chm = self._homeMetaDataSchema
+        values = {}
+        for attr, col in zip(self.metadataAttributes(), self.metadataColumns()):
+            value = getattr(other, attr)
+            if attr in self._componentDefaultAttribute.values():
+                value = calendarIDMap.get(value)
+            setattr(self, attr, value)
+            values[col] = value
+
+        # Update the local data
+        yield Update(
+            values,
+            Where=chm.RESOURCE_ID == self._resourceID
</ins><span class="cx">         ).on(self._txn)
</span><span class="cx"> 
</span><del>-        yield self._cacher.delete(str(self._ownerUID))
</del><span class="cx"> 
</span><del>-
</del><span class="cx">     @inlineCallbacks
</span><span class="cx">     def hasCalendarResourceUIDSomewhereElse(self, uid, ok_object, mode):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="lines">@@ -598,8 +621,8 @@
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Implement lookup via queries.
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        co = schema.CALENDAR_OBJECT
-        cb = schema.CALENDAR_BIND
</del><ins>+        co = self._objectSchema
+        cb = self._bindSchema
</ins><span class="cx">         rows = (yield Select(
</span><span class="cx">             [co.PARENT_RESOURCE_ID,
</span><span class="cx">              co.RESOURCE_ID],
</span><span class="lines">@@ -618,10 +641,34 @@
</span><span class="cx">         returnValue(None)
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    def getAllAttachments(self):
+        &quot;&quot;&quot;
+        Return all the L{Attachment} objects associated with this calendar home.
+        Needed during migration.
+        &quot;&quot;&quot;
+        return Attachment.loadAllAttachments(self)
+
+
+    def getAttachmentLinks(self):
+        &quot;&quot;&quot;
+        Read the attachment&lt;-&gt;calendar object mapping data associated with this calendar home.
+        Needed during migration only.
+        &quot;&quot;&quot;
+        return AttachmentLink.linksForHome(self)
+
+
+    def getAttachmentByID(self, id):
+        &quot;&quot;&quot;
+        Return a specific attachment associated with this calendar home.
+        Needed during migration only.
+        &quot;&quot;&quot;
+        return Attachment.loadAttachmentByID(self, id)
+
+
</ins><span class="cx">     @inlineCallbacks
</span><span class="cx">     def getAllDropboxIDs(self):
</span><del>-        co = schema.CALENDAR_OBJECT
-        cb = schema.CALENDAR_BIND
</del><ins>+        co = self._objectSchema
+        cb = self._bindSchema
</ins><span class="cx">         rows = (yield Select(
</span><span class="cx">             [co.DROPBOX_ID],
</span><span class="cx">             From=co.join(cb, co.PARENT_RESOURCE_ID == cb.RESOURCE_ID),
</span><span class="lines">@@ -634,7 +681,7 @@
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def getAllAttachmentNames(self):
</span><del>-        att = schema.ATTACHMENT
</del><ins>+        att = Attachment._attachmentSchema
</ins><span class="cx">         rows = (yield Select(
</span><span class="cx">             [att.DROPBOX_ID],
</span><span class="cx">             From=att,
</span><span class="lines">@@ -646,8 +693,8 @@
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def getAllManagedIDs(self):
</span><del>-        at = schema.ATTACHMENT
-        attco = schema.ATTACHMENT_CALENDAR_OBJECT
</del><ins>+        at = Attachment._attachmentSchema
+        attco = Attachment._attachmentLinkSchema
</ins><span class="cx">         rows = (yield Select(
</span><span class="cx">             [attco.MANAGED_ID, ],
</span><span class="cx">             From=attco.join(at, attco.ATTACHMENT_ID == at.ATTACHMENT_ID),
</span><span class="lines">@@ -658,6 +705,27 @@
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><ins>+    def getAllGroupAttendees(self):
+        &quot;&quot;&quot;
+        Return a list of L{GroupAttendeeRecord},L{GroupRecord} for each group attendee referenced in calendar data
+        owned by this home.
+        &quot;&quot;&quot;
+
+        results = []
+        calendars = yield self.loadChildren()
+        for calendar in calendars:
+            if not calendar.owned():
+                continue
+            children = yield calendar.objectResources()
+            cobjs = [child.id() for child in children]
+            if cobjs:
+                result = yield GroupAttendeeRecord.groupAttendeesForObjects(self._txn, cobjs)
+                results.extend(result)
+
+        returnValue(results)
+
+
+    @inlineCallbacks
</ins><span class="cx">     def createdHome(self):
</span><span class="cx"> 
</span><span class="cx">         # Check whether components type must be separate
</span><span class="lines">@@ -960,6 +1028,12 @@
</span><span class="cx">     _objectSchema = schema.CALENDAR_OBJECT
</span><span class="cx">     _timeRangeSchema = schema.TIME_RANGE
</span><span class="cx"> 
</span><ins>+    _homeRecordClass = CalendarHomeRecord
+    _metadataRecordClass = CalendarMetaDataRecord
+    _bindRecordClass = CalendarBindRecord
+    _bindHomeIDAttributeName = &quot;calendarHomeResourceID&quot;
+    _bindResourceIDAttributeName = &quot;calendarResourceID&quot;
+
</ins><span class="cx">     # Mapping of iCalendar property name to DB column name
</span><span class="cx">     _queryFields = {
</span><span class="cx">         &quot;UID&quot;: _objectSchema.UID,
</span><span class="lines">@@ -1054,6 +1128,46 @@
</span><span class="cx">     def _calendarHome(self):
</span><span class="cx">         return self._home
</span><span class="cx"> 
</span><ins>+
+    @inlineCallbacks
+    def copyMetadata(self, other):
+        &quot;&quot;&quot;
+        Copy metadata from one L{Calendar} to another. This is only
+        used during a migration step.
+        &quot;&quot;&quot;
+
+        # Copy over list of attributes and the name
+        self._name = other._name
+        for attr in itertools.chain(self.metadataAttributes(), self.additionalBindAttributes()):
+            if attr in (&quot;_created&quot;, &quot;_modified&quot;):
+                continue
+            if hasattr(other, attr):
+                setattr(self, attr, getattr(other, attr))
+
+        # Update the metadata table
+        cm = self._homeChildMetaDataSchema
+        values = {}
+        for attr, column in itertools.izip(self.metadataAttributes(), self.metadataColumns()):
+            if attr in (&quot;_created&quot;, &quot;_modified&quot;):
+                continue
+            values[column] = getattr(self, attr)
+        yield Update(
+            values,
+            Where=(cm.RESOURCE_ID == self._resourceID)
+        ).on(self._txn)
+
+        # Update the bind table
+        cb = self._bindSchema
+        values = {
+            cb.RESOURCE_NAME: self._name
+        }
+        for attr, column in itertools.izip(self.additionalBindAttributes(), self.additionalBindColumns()):
+            values[column] = getattr(self, attr)
+        yield Update(
+            values,
+            Where=(cb.CALENDAR_HOME_RESOURCE_ID == self.viewerHome()._resourceID).And(cb.CALENDAR_RESOURCE_ID == self._resourceID)
+        ).on(self._txn)
+
</ins><span class="cx">     ownerCalendarHome = CommonHomeChild.ownerHome
</span><span class="cx">     viewerCalendarHome = CommonHomeChild.viewerHome
</span><span class="cx">     calendarObjects = CommonHomeChild.objectResources
</span><span class="lines">@@ -1488,7 +1602,7 @@
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Query to find resources that need to be re-expanded
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        co = schema.CALENDAR_OBJECT
</del><ins>+        co = cls._objectSchema
</ins><span class="cx">         return Select(
</span><span class="cx">             [co.RESOURCE_NAME],
</span><span class="cx">             From=co,
</span><span class="lines">@@ -1795,8 +1909,8 @@
</span><span class="cx"> 
</span><span class="cx">         # First check that the actual group membership has changed
</span><span class="cx">         if (yield self.updateShareeGroupLink(groupUID)):
</span><del>-            groupID = (yield self._txn.groupByUID(groupUID))[0]
-            memberUIDs = yield self._txn.groupMemberUIDs(groupID)
</del><ins>+            group = yield self._txn.groupByUID(groupUID)
+            memberUIDs = yield self._txn.groupMemberUIDs(group.groupID)
</ins><span class="cx">             boundUIDs = set()
</span><span class="cx"> 
</span><span class="cx">             home = self._homeSchema
</span><span class="lines">@@ -1946,39 +2060,36 @@
</span><span class="cx">         update schema.GROUP_SHAREE
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         changed = False
</span><del>-        (
-            groupID, _ignore_name, membershipHash, _ignore_modDate,
-            _ignore_extant
-        ) = yield self._txn.groupByUID(groupUID)
</del><ins>+        group = yield self._txn.groupByUID(groupUID)
</ins><span class="cx"> 
</span><span class="cx">         gs = schema.GROUP_SHAREE
</span><span class="cx">         rows = yield Select(
</span><span class="cx">             [gs.MEMBERSHIP_HASH, gs.GROUP_BIND_MODE],
</span><span class="cx">             From=gs,
</span><span class="cx">             Where=(gs.CALENDAR_ID == self._resourceID).And(
</span><del>-                gs.GROUP_ID == groupID)
</del><ins>+                gs.GROUP_ID == group.groupID)
</ins><span class="cx">         ).on(self._txn)
</span><span class="cx">         if rows:
</span><span class="cx">             [[gsMembershipHash, gsMode]] = rows
</span><span class="cx">             updateMap = {}
</span><del>-            if gsMembershipHash != membershipHash:
-                updateMap[gs.MEMBERSHIP_HASH] = membershipHash
</del><ins>+            if gsMembershipHash != group.membershipHash:
+                updateMap[gs.MEMBERSHIP_HASH] = group.membershipHash
</ins><span class="cx">             if mode is not None and gsMode != mode:
</span><span class="cx">                 updateMap[gs.GROUP_BIND_MODE] = mode
</span><span class="cx">             if updateMap:
</span><span class="cx">                 yield Update(
</span><span class="cx">                     updateMap,
</span><span class="cx">                     Where=(gs.CALENDAR_ID == self._resourceID).And(
</span><del>-                        gs.GROUP_ID == groupID
</del><ins>+                        gs.GROUP_ID == group.groupID
</ins><span class="cx">                     )
</span><span class="cx">                 ).on(self._txn)
</span><span class="cx">                 changed = True
</span><span class="cx">         else:
</span><span class="cx">             yield Insert({
</span><del>-                gs.MEMBERSHIP_HASH: membershipHash,
</del><ins>+                gs.MEMBERSHIP_HASH: group.membershipHash,
</ins><span class="cx">                 gs.GROUP_BIND_MODE: mode,
</span><span class="cx">                 gs.CALENDAR_ID: self._resourceID,
</span><del>-                gs.GROUP_ID: groupID,
</del><ins>+                gs.GROUP_ID: group.groupID,
</ins><span class="cx">             }).on(self._txn)
</span><span class="cx">             changed = True
</span><span class="cx"> 
</span><span class="lines">@@ -2065,8 +2176,8 @@
</span><span class="cx"> 
</span><span class="cx">         # invite every member of group
</span><span class="cx">         shareeViews = []
</span><del>-        groupID = (yield self._txn.groupByUID(shareeUID))[0]
-        memberUIDs = yield self._txn.groupMemberUIDs(groupID)
</del><ins>+        group = yield self._txn.groupByUID(shareeUID)
+        memberUIDs = yield self._txn.groupMemberUIDs(group.groupID)
</ins><span class="cx">         for memberUID in memberUIDs:
</span><span class="cx">             if memberUID != self._home.uid():
</span><span class="cx">                 shareeView = yield self.shareeView(memberUID)
</span><span class="lines">@@ -2206,6 +2317,14 @@
</span><span class="cx">         returnValue(invitations)
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    @inlineCallbacks
+    def groupSharees(self):
+        sharees = yield GroupShareeRecord.querysimple(self._txn, calendarID=self.id())
+        groups = set([sharee.groupID for sharee in sharees])
+        groups = (yield GroupsRecord.query(self._txn, GroupsRecord.groupID.In(groups))) if groups else []
+        returnValue({&quot;groups&quot;: groups, &quot;sharees&quot;: sharees})
+
+
</ins><span class="cx"> icalfbtype_to_indexfbtype = {
</span><span class="cx">     &quot;UNKNOWN&quot;         : 0,
</span><span class="cx">     &quot;FREE&quot;            : 1,
</span><span class="lines">@@ -2413,9 +2532,9 @@
</span><span class="cx">             groupRecord = yield self.directoryService().recordWithCalendarUserAddress(groupCUA)
</span><span class="cx">             if groupRecord:
</span><span class="cx">                 # get members
</span><del>-                groupID = (yield self._txn.groupByUID(groupRecord.uid))[0]
-                if groupID is not None:
-                    members = yield self._txn.groupMembers(groupID)
</del><ins>+                group = yield self._txn.groupByUID(groupRecord.uid)
+                if group is not None:
+                    members = yield self._txn.groupMembers(group.groupID)
</ins><span class="cx">                     groupCUAToAttendeeMemberPropMap[groupRecord.canonicalCalendarUserAddress()] = tuple(
</span><span class="cx">                         [member.attendeeProperty(params={&quot;MEMBER&quot;: groupCUA}) for member in sorted(members, key=lambda x: x.uid)]
</span><span class="cx">                     )
</span><span class="lines">@@ -2439,19 +2558,14 @@
</span><span class="cx">         @return: a L{dict} with group ids as the key and membership hash as the value
</span><span class="cx">         @rtype: L{dict}
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        ga = schema.GROUP_ATTENDEE
-        rows = yield Select(
-            [ga.GROUP_ID, ga.MEMBERSHIP_HASH],
-            From=ga,
-            Where=ga.RESOURCE_ID == self._resourceID,
-        ).on(self._txn)
-        returnValue(dict(rows))
</del><ins>+        records = yield GroupAttendeeRecord.querysimple(self._txn, resourceID=self._resourceID)
+        returnValue(dict([(record.groupID, record,) for record in records]))
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def updateEventGroupLink(self, groupCUAToAttendeeMemberPropMap=None):
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        update schema.GROUP_ATTENDEE
</del><ins>+        update group event links
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         if groupCUAToAttendeeMemberPropMap is None:
</span><span class="cx">             if hasattr(self, &quot;_groupCUAToAttendeeMemberPropMap&quot;):
</span><span class="lines">@@ -2468,42 +2582,27 @@
</span><span class="cx">                 groupUID = groupRecord.uid
</span><span class="cx">             else:
</span><span class="cx">                 groupUID = uidFromCalendarUserAddress(groupCUA)
</span><del>-            (
-                groupID, _ignore_name, membershipHash, _ignore_modDate,
-                _ignore_extant
-            ) = yield self._txn.groupByUID(groupUID)
</del><ins>+            group = yield self._txn.groupByUID(groupUID)
</ins><span class="cx"> 
</span><del>-            ga = schema.GROUP_ATTENDEE
-            if groupID in groupIDToMembershipHashMap:
-                if groupIDToMembershipHashMap[groupID] != membershipHash:
-                    yield Update(
-                        {ga.MEMBERSHIP_HASH: membershipHash, },
-                        Where=(ga.RESOURCE_ID == self._resourceID).And(
-                            ga.GROUP_ID == groupID)
-                    ).on(self._txn)
</del><ins>+            if group.groupID in groupIDToMembershipHashMap:
+                if groupIDToMembershipHashMap[group.groupID].membershipHash != group.membershipHash:
+                    yield groupIDToMembershipHashMap[group.groupID].update(membershipHash=group.membershipHash)
</ins><span class="cx">                     changed = True
</span><del>-                del groupIDToMembershipHashMap[groupID]
</del><ins>+                del groupIDToMembershipHashMap[group.groupID]
</ins><span class="cx">             else:
</span><del>-                yield Insert({
-                    ga.RESOURCE_ID: self._resourceID,
-                    ga.GROUP_ID: groupID,
-                    ga.MEMBERSHIP_HASH: membershipHash,
-                }).on(self._txn)
</del><ins>+                yield GroupAttendeeRecord.create(
+                    self._txn,
+                    resourceID=self._resourceID,
+                    groupID=group.groupID,
+                    membershipHash=group.membershipHash,
+                )
</ins><span class="cx">                 changed = True
</span><span class="cx"> 
</span><span class="cx">         if groupIDToMembershipHashMap:
</span><del>-            groupIDsToRemove = groupIDToMembershipHashMap.keys()
-            yield Delete(
-                From=ga,
-                Where=(ga.RESOURCE_ID == self._resourceID).And(
-                    ga.GROUP_ID.In(
-                        Parameter(
-                            &quot;groupIDsToRemove&quot;,
-                            len(groupIDsToRemove)
-                        )
-                    )
-                )
-            ).on(self._txn, groupIDsToRemove=groupIDsToRemove)
</del><ins>+            yield GroupAttendeeRecord.deletesome(
+                self._txn,
+                GroupAttendeeRecord.groupID.In(groupIDToMembershipHashMap.keys()),
+            )
</ins><span class="cx">             changed = True
</span><span class="cx"> 
</span><span class="cx">         returnValue(changed)
</span><span class="lines">@@ -2564,11 +2663,7 @@
</span><span class="cx">                     del self._groupCUAToAttendeeMemberPropMap
</span><span class="cx">                 else:
</span><span class="cx">                     # delete existing group rows
</span><del>-                    ga = schema.GROUP_ATTENDEE
-                    yield Delete(
-                        From=ga,
-                        Where=ga.RESOURCE_ID == self._resourceID,
-                    ).on(txn)
</del><ins>+                    yield GroupAttendeeRecord.deletesimple(self._txn, resourceID=self._resourceID)
</ins><span class="cx"> 
</span><span class="cx">         returnValue(isOldEventWithGroupAttendees)
</span><span class="cx"> 
</span><span class="lines">@@ -2614,13 +2709,11 @@
</span><span class="cx">                     # remove group link to ensure update (update to unknown hash would work too)
</span><span class="cx">                     # FIXME: its possible that more than one group id gets updated during this single work item, so we
</span><span class="cx">                     # need to make sure that ALL the group_id's are removed by this query.
</span><del>-                    ga = schema.GROUP_ATTENDEE
-                    yield Delete(
-                        From=ga,
-                        Where=(ga.RESOURCE_ID == self._resourceID).And(
-                            ga.GROUP_ID == groupID
-                        )
-                    ).on(self._txn)
</del><ins>+                    yield GroupAttendeeRecord.deletesimple(
+                        self._txn,
+                        resourceID=self._resourceID,
+                        groupID=groupID,
+                    )
</ins><span class="cx"> 
</span><span class="cx">                     # update group attendee in remaining component
</span><span class="cx">                     component = yield self.componentForUser()
</span><span class="lines">@@ -3521,7 +3614,7 @@
</span><span class="cx">                 recurrenceLowerLimit = None
</span><span class="cx">                 recurrenceLimit = DateTime(1900, 1, 1, 0, 0, 0, tzid=Timezone(utc=True))
</span><span class="cx"> 
</span><del>-        co = schema.CALENDAR_OBJECT
</del><ins>+        co = self._objectSchema
</ins><span class="cx">         tr = schema.TIME_RANGE
</span><span class="cx"> 
</span><span class="cx">         # Do not update if reCreate (re-indexing - we don't want to re-write data
</span><span class="lines">@@ -3725,6 +3818,29 @@
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><ins>+    def copyMetadata(self, other):
+        &quot;&quot;&quot;
+        Copy metadata from one L{CalendarObjectResource} to another. This is only
+        used during a migration step.
+        &quot;&quot;&quot;
+        co = self._objectSchema
+        values = {
+            co.ATTACHMENTS_MODE                : other._attachment,
+            co.DROPBOX_ID                      : other._dropboxID,
+            co.ACCESS                          : other._access,
+            co.SCHEDULE_OBJECT                 : other._schedule_object,
+            co.SCHEDULE_TAG                    : other._schedule_tag,
+            co.SCHEDULE_ETAGS                  : other._schedule_etags,
+            co.PRIVATE_COMMENTS                : other._private_comments,
+        }
+
+        yield Update(
+            values,
+            Where=co.RESOURCE_ID == self._resourceID
+        ).on(self._txn)
+
+
+    @inlineCallbacks
</ins><span class="cx">     def component(self, doUpdate=False):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Read calendar data and validate/fix it. Do not raise a store error here
</span><span class="lines">@@ -3890,6 +4006,15 @@
</span><span class="cx">         )
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    def purge(self):
+        &quot;&quot;&quot;
+        Do a &quot;silent&quot; removal of this object resource.
+        &quot;&quot;&quot;
+        return self._removeInternal(
+            ComponentRemoveState.NORMAL_NO_IMPLICIT
+        )
+
+
</ins><span class="cx">     @inlineCallbacks
</span><span class="cx">     def _removeInternal(self, internal_state=ComponentRemoveState.NORMAL):
</span><span class="cx"> 
</span><span class="lines">@@ -3947,7 +4072,7 @@
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         DAL query to load RECURRANCE_MIN, RECURRANCE_MAX via an object's resource ID.
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        co = schema.CALENDAR_OBJECT
</del><ins>+        co = cls._objectSchema
</ins><span class="cx">         return Select(
</span><span class="cx">             [co.RECURRANCE_MIN, co.RECURRANCE_MAX, ],
</span><span class="cx">             From=co,
</span><span class="lines">@@ -4482,8 +4607,8 @@
</span><span class="cx">         Get a list of managed attachments where the names returned are for the last path segment
</span><span class="cx">         of the attachment URI.
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        at = schema.ATTACHMENT
-        attco = schema.ATTACHMENT_CALENDAR_OBJECT
</del><ins>+        at = Attachment._attachmentSchema
+        attco = Attachment._attachmentLinkSchema
</ins><span class="cx">         rows = (yield Select(
</span><span class="cx">             [attco.MANAGED_ID, at.PATH, ],
</span><span class="cx">             From=attco.join(at, attco.ATTACHMENT_ID == at.ATTACHMENT_ID),
</span><span class="lines">@@ -4499,8 +4624,8 @@
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx"> 
</span><span class="cx">         # Scan all the associated attachments for the one that matches
</span><del>-        at = schema.ATTACHMENT
-        attco = schema.ATTACHMENT_CALENDAR_OBJECT
</del><ins>+        at = Attachment._attachmentSchema
+        attco = Attachment._attachmentLinkSchema
</ins><span class="cx">         rows = (yield Select(
</span><span class="cx">             [attco.MANAGED_ID, at.PATH, ],
</span><span class="cx">             From=attco.join(at, attco.ATTACHMENT_ID == at.ATTACHMENT_ID),
</span><span class="lines">@@ -4553,8 +4678,10 @@
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def attachments(self):
</span><span class="cx">         if self._dropboxID:
</span><del>-            rows = yield self._attachmentsQuery.on(self._txn,
-                                                   dropboxID=self._dropboxID)
</del><ins>+            rows = yield self._attachmentsQuery.on(
+                self._txn,
+                dropboxID=self._dropboxID,
+            )
</ins><span class="cx">             result = []
</span><span class="cx">             for row in rows:
</span><span class="cx">                 result.append((yield self.attachmentWithName(row[0])))
</span><span class="lines">@@ -4823,900 +4950,6 @@
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-class AttachmentStorageTransport(StorageTransportBase):
-
-    _TEMPORARY_UPLOADS_DIRECTORY = &quot;Temporary&quot;
-
-    def __init__(self, attachment, contentType, dispositionName, creating=False):
-        super(AttachmentStorageTransport, self).__init__(
-            attachment, contentType, dispositionName)
-
-        fileDescriptor, fileName = self._temporaryFile()
-        # Wrap the file descriptor in a file object we can write to
-        self._file = os.fdopen(fileDescriptor, &quot;w&quot;)
-        self._path = CachingFilePath(fileName)
-        self._hash = hashlib.md5()
-        self._creating = creating
-
-        self._txn.postAbort(self.aborted)
-
-
-    def _temporaryFile(self):
-        &quot;&quot;&quot;
-        Returns a (file descriptor, absolute path) tuple for a temporary file within
-        the Attachments/Temporary directory (creating the Temporary subdirectory
-        if it doesn't exist).  It is the caller's responsibility to remove the
-        file.
-        &quot;&quot;&quot;
-        attachmentRoot = self._txn._store.attachmentsPath
-        tempUploadsPath = attachmentRoot.child(self._TEMPORARY_UPLOADS_DIRECTORY)
-        if not tempUploadsPath.exists():
-            tempUploadsPath.createDirectory()
-        return tempfile.mkstemp(dir=tempUploadsPath.path)
-
-
-    @property
-    def _txn(self):
-        return self._attachment._txn
-
-
-    def aborted(self):
-        &quot;&quot;&quot;
-        Transaction aborted - clean up temp files.
-        &quot;&quot;&quot;
-        if self._path.exists():
-            self._path.remove()
-
-
-    def write(self, data):
-        if isinstance(data, buffer):
-            data = str(data)
-        self._file.write(data)
-        self._hash.update(data)
-
-
-    @inlineCallbacks
-    def loseConnection(self):
-
-        # FIXME: this should be synchronously accessible; IAttachment should
-        # have a method for getting its parent just as CalendarObject/Calendar
-        # do.
-
-        # FIXME: If this method isn't called, the transaction should be
-        # prevented from committing successfully.  It's not valid to have an
-        # attachment that doesn't point to a real file.
-
-        home = (yield self._txn.calendarHomeWithResourceID(self._attachment._ownerHomeID))
-
-        oldSize = self._attachment.size()
-        newSize = self._file.tell()
-        self._file.close()
-
-        # Check max size for attachment
-        if newSize &gt; config.MaximumAttachmentSize:
-            self._path.remove()
-            if self._creating:
-                yield self._attachment._internalRemove()
-            raise AttachmentSizeTooLarge()
-
-        # Check overall user quota
-        allowed = home.quotaAllowedBytes()
-        if allowed is not None and allowed &lt; ((yield home.quotaUsedBytes())
-                                              + (newSize - oldSize)):
-            self._path.remove()
-            if self._creating:
-                yield self._attachment._internalRemove()
-            raise QuotaExceeded()
-
-        self._path.moveTo(self._attachment._path)
-
-        yield self._attachment.changed(
-            self._contentType,
-            self._dispositionName,
-            self._hash.hexdigest(),
-            newSize
-        )
-
-        if home:
-            # Adjust quota
-            yield home.adjustQuotaUsedBytes(self._attachment.size() - oldSize)
-
-            # Send change notification to home
-            yield home.notifyChanged()
-
-
-
-def sqltime(value):
-    return datetimeMktime(parseSQLTimestamp(value))
-
-
-
-class Attachment(object):
-
-    implements(IAttachment)
-
-    def __init__(self, txn, a_id, dropboxID, name, ownerHomeID=None, justCreated=False):
-        self._txn = txn
-        self._attachmentID = a_id
-        self._ownerHomeID = ownerHomeID
-        self._dropboxID = dropboxID
-        self._contentType = None
-        self._size = 0
-        self._md5 = None
-        self._created = None
-        self._modified = None
-        self._name = name
-        self._justCreated = justCreated
-
-
-    def __repr__(self):
-        return (
-            &quot;&lt;{self.__class__.__name__}: {self._attachmentID}&gt;&quot;
-            .format(self=self)
-        )
-
-
-    def _attachmentPathRoot(self):
-        return self._txn._store.attachmentsPath
-
-
-    @inlineCallbacks
-    def initFromStore(self):
-        &quot;&quot;&quot;
-        Execute necessary SQL queries to retrieve attributes.
-
-        @return: C{True} if this attachment exists, C{False} otherwise.
-        &quot;&quot;&quot;
-        att = schema.ATTACHMENT
-        if self._dropboxID:
-            where = (att.DROPBOX_ID == self._dropboxID).And(
-                att.PATH == self._name)
-        else:
-            where = (att.ATTACHMENT_ID == self._attachmentID)
-        rows = (yield Select(
-            [
-                att.ATTACHMENT_ID,
-                att.DROPBOX_ID,
-                att.CALENDAR_HOME_RESOURCE_ID,
-                att.CONTENT_TYPE,
-                att.SIZE,
-                att.MD5,
-                att.CREATED,
-                att.MODIFIED,
-                att.PATH,
-            ],
-            From=att,
-            Where=where
-        ).on(self._txn))
-
-        if not rows:
-            returnValue(None)
-
-        row_iter = iter(rows[0])
-        self._attachmentID = row_iter.next()
-        self._dropboxID = row_iter.next()
-        self._ownerHomeID = row_iter.next()
-        self._contentType = MimeType.fromString(row_iter.next())
-        self._size = row_iter.next()
-        self._md5 = row_iter.next()
-        self._created = sqltime(row_iter.next())
-        self._modified = sqltime(row_iter.next())
-        self._name = row_iter.next()
-
-        returnValue(self)
-
-
-    def dropboxID(self):
-        return self._dropboxID
-
-
-    def isManaged(self):
-        return self._dropboxID == &quot;.&quot;
-
-
-    def name(self):
-        return self._name
-
-
-    def properties(self):
-        pass  # stub
-
-
-    def store(self, contentType, dispositionName=None):
-        if not self._name:
-            self._name = dispositionName
-        return AttachmentStorageTransport(self, contentType, dispositionName, self._justCreated)
-
-
-    def retrieve(self, protocol):
-        return AttachmentRetrievalTransport(self._path).start(protocol)
-
-
-    def changed(self, contentType, dispositionName, md5, size):
-        raise NotImplementedError
-
-    _removeStatement = Delete(
-        From=schema.ATTACHMENT,
-        Where=(schema.ATTACHMENT.ATTACHMENT_ID == Parameter(&quot;attachmentID&quot;))
-    )
-
-
-    @inlineCallbacks
-    def remove(self):
-        oldSize = self._size
-        self._txn.postCommit(self.removePaths)
-        yield self._internalRemove()
-        # Adjust quota
-        home = (yield self._txn.calendarHomeWithResourceID(self._ownerHomeID))
-        if home:
-            yield home.adjustQuotaUsedBytes(-oldSize)
-
-            # Send change notification to home
-            yield home.notifyChanged()
-
-
-    def removePaths(self):
-        &quot;&quot;&quot;
-        Remove the actual file and up to attachment parent directory if empty.
-        &quot;&quot;&quot;
-        self._path.remove()
-        self.removeParentPaths()
-
-
-    def removeParentPaths(self):
-        &quot;&quot;&quot;
-        Remove up to attachment parent directory if empty.
-        &quot;&quot;&quot;
-        parent = self._path.parent()
-        toppath = self._attachmentPathRoot().path
-        while parent.path != toppath:
-            if len(parent.listdir()) == 0:
-                parent.remove()
-                parent = parent.parent()
-            else:
-                break
-
-
-    def _internalRemove(self):
-        &quot;&quot;&quot;
-        Just delete the row; don't do any accounting / bookkeeping.  (This is
-        for attachments that have failed to be created due to errors during
-        storage.)
-        &quot;&quot;&quot;
-        return self._removeStatement.on(self._txn, attachmentID=self._attachmentID)
-
-
-    @classmethod
-    @inlineCallbacks
-    def removedHome(cls, txn, homeID):
-        &quot;&quot;&quot;
-        A calendar home is being removed so all of its attachments must go too. When removing,
-        we don't care about quota adjustment as there will be no quota once the home is removed.
-
-        TODO: this needs to be transactional wrt the actual file deletes.
-        &quot;&quot;&quot;
-        att = schema.ATTACHMENT
-        attco = schema.ATTACHMENT_CALENDAR_OBJECT
-
-        rows = (yield Select(
-            [att.ATTACHMENT_ID, att.DROPBOX_ID, ],
-            From=att,
-            Where=(
-                att.CALENDAR_HOME_RESOURCE_ID == homeID
-            ),
-        ).on(txn))
-
-        for attachmentID, dropboxID in rows:
-            if dropboxID:
-                attachment = DropBoxAttachment(txn, attachmentID, None, None)
-            else:
-                attachment = ManagedAttachment(txn, attachmentID, None, None)
-            attachment = (yield attachment.initFromStore())
-            if attachment._path.exists():
-                attachment.removePaths()
-
-        yield Delete(
-            From=attco,
-            Where=(
-                attco.ATTACHMENT_ID.In(Select(
-                    [att.ATTACHMENT_ID, ],
-                    From=att,
-                    Where=(
-                        att.CALENDAR_HOME_RESOURCE_ID == homeID
-                    ),
-                ))
-            ),
-        ).on(txn)
-
-        yield Delete(
-            From=att,
-            Where=(
-                att.CALENDAR_HOME_RESOURCE_ID == homeID
-            ),
-        ).on(txn)
-
-
-    # IDataStoreObject
-    def contentType(self):
-        return self._contentType
-
-
-    def md5(self):
-        return self._md5
-
-
-    def size(self):
-        return self._size
-
-
-    def created(self):
-        return self._created
-
-
-    def modified(self):
-        return self._modified
-
-
-
-class DropBoxAttachment(Attachment):
-
-    @classmethod
-    @inlineCallbacks
-    def create(cls, txn, dropboxID, name, ownerHomeID):
-        &quot;&quot;&quot;
-        Create a new Attachment object.
-
-        @param txn: The transaction to use
-        @type txn: L{CommonStoreTransaction}
-        @param dropboxID: the identifier for the attachment (dropbox id or managed id)
-        @type dropboxID: C{str}
-        @param name: the name of the attachment
-        @type name: C{str}
-        @param ownerHomeID: the resource-id of the home collection of the attachment owner
-        @type ownerHomeID: C{int}
-        &quot;&quot;&quot;
-
-        # If store has already migrated to managed attachments we will prevent creation of dropbox attachments
-        dropbox = (yield txn.store().dropboxAllowed(txn))
-        if not dropbox:
-            raise AttachmentDropboxNotAllowed
-
-        # Now create the DB entry
-        att = schema.ATTACHMENT
-        rows = (yield Insert({
-            att.CALENDAR_HOME_RESOURCE_ID : ownerHomeID,
-            att.DROPBOX_ID                : dropboxID,
-            att.CONTENT_TYPE              : &quot;&quot;,
-            att.SIZE                      : 0,
-            att.MD5                       : &quot;&quot;,
-            att.PATH                      : name,
-        }, Return=(att.ATTACHMENT_ID, att.CREATED, att.MODIFIED)).on(txn))
-
-        row_iter = iter(rows[0])
-        a_id = row_iter.next()
-        created = sqltime(row_iter.next())
-        modified = sqltime(row_iter.next())
-
-        attachment = cls(txn, a_id, dropboxID, name, ownerHomeID, True)
-        attachment._created = created
-        attachment._modified = modified
-
-        # File system paths need to exist
-        try:
-            attachment._path.parent().makedirs()
-        except:
-            pass
-
-        returnValue(attachment)
-
-
-    @classmethod
-    @inlineCallbacks
-    def load(cls, txn, dropboxID, name):
-        attachment = cls(txn, None, dropboxID, name)
-        attachment = (yield attachment.initFromStore())
-        returnValue(attachment)
-
-
-    @property
-    def _path(self):
-        # Use directory hashing scheme based on MD5 of dropboxID
-        hasheduid = hashlib.md5(self._dropboxID).hexdigest()
-        attachmentRoot = self._attachmentPathRoot().child(hasheduid[0:2]).child(hasheduid[2:4]).child(hasheduid)
-        return attachmentRoot.child(self.name())
-
-
-    @classmethod
-    @inlineCallbacks
-    def resourceRemoved(cls, txn, resourceID, dropboxID):
-        &quot;&quot;&quot;
-        Remove all attachments referencing the specified resource.
-        &quot;&quot;&quot;
-
-        # See if any other resources still reference this dropbox ID
-        co = schema.CALENDAR_OBJECT
-        rows = (yield Select(
-            [co.RESOURCE_ID, ],
-            From=co,
-            Where=(co.DROPBOX_ID == dropboxID).And(
-                co.RESOURCE_ID != resourceID)
-        ).on(txn))
-
-        if not rows:
-            # Find each attachment with matching dropbox ID
-            att = schema.ATTACHMENT
-            rows = (yield Select(
-                [att.PATH],
-                From=att,
-                Where=(att.DROPBOX_ID == dropboxID)
-            ).on(txn))
-            for name in rows:
-                name = name[0]
-                attachment = yield cls.load(txn, dropboxID, name)
-                yield attachment.remove()
-
-
-    @inlineCallbacks
-    def changed(self, contentType, dispositionName, md5, size):
-        &quot;&quot;&quot;
-        Dropbox attachments never change their path - ignore dispositionName.
-        &quot;&quot;&quot;
-
-        self._contentType = contentType
-        self._md5 = md5
-        self._size = size
-
-        att = schema.ATTACHMENT
-        self._created, self._modified = map(
-            sqltime,
-            (yield Update(
-                {
-                    att.CONTENT_TYPE    : generateContentType(self._contentType),
-                    att.SIZE            : self._size,
-                    att.MD5             : self._md5,
-                    att.MODIFIED        : utcNowSQL,
-                },
-                Where=(att.ATTACHMENT_ID == self._attachmentID),
-                Return=(att.CREATED, att.MODIFIED)).on(self._txn))[0]
-        )
-
-
-    @inlineCallbacks
-    def convertToManaged(self):
-        &quot;&quot;&quot;
-        Convert this dropbox attachment into a managed attachment by updating the
-        database and returning a new ManagedAttachment object that does not reference
-        any calendar object. Referencing will be added later.
-
-        @return: the managed attachment object
-        @rtype: L{ManagedAttachment}
-        &quot;&quot;&quot;
-
-        # Change the DROPBOX_ID to a single &quot;.&quot; to indicate a managed attachment.
-        att = schema.ATTACHMENT
-        (yield Update(
-            {att.DROPBOX_ID    : &quot;.&quot;, },
-            Where=(att.ATTACHMENT_ID == self._attachmentID),
-        ).on(self._txn))
-
-        # Create an &quot;orphaned&quot; ManagedAttachment that points to the updated data but without
-        # an actual managed-id (which only exists when there is a reference to a calendar object).
-        mattach = (yield ManagedAttachment.load(self._txn, None, None, attachmentID=self._attachmentID))
-        mattach._managedID = str(uuid.uuid4())
-        if mattach is None:
-            raise AttachmentMigrationFailed
-
-        # Then move the file on disk from the old path to the new one
-        try:
-            mattach._path.parent().makedirs()
-        except Exception:
-            # OK to fail if it already exists, otherwise must raise
-            if not mattach._path.parent().exists():
-                raise
-        oldpath = self._path
-        newpath = mattach._path
-        oldpath.moveTo(newpath)
-        self.removeParentPaths()
-
-        returnValue(mattach)
-
-
-
-class ManagedAttachment(Attachment):
-    &quot;&quot;&quot;
-    Managed attachments are ones that the server is in total control of. Clients do POSTs on calendar objects
-    to store the attachment data and have ATTACH properties added, updated or remove from the calendar objects.
-    Each ATTACH property in a calendar object has a MANAGED-ID iCalendar parameter that is used in the POST requests
-    to target a specific attachment. The MANAGED-ID values are unique to each calendar object resource, though
-    multiple calendar object resources can point to the same underlying attachment as there is a separate database
-    table that maps calendar objects/managed-ids to actual attachments.
-    &quot;&quot;&quot;
-
-    @classmethod
-    @inlineCallbacks
-    def _create(cls, txn, managedID, ownerHomeID):
-        &quot;&quot;&quot;
-        Create a new managed Attachment object.
-
-        @param txn: The transaction to use
-        @type txn: L{CommonStoreTransaction}
-        @param managedID: the identifier for the attachment
-        @type managedID: C{str}
-        @param ownerHomeID: the resource-id of the home collection of the attachment owner
-        @type ownerHomeID: C{int}
-        &quot;&quot;&quot;
-
-        # Now create the DB entry
-        att = schema.ATTACHMENT
-        rows = (yield Insert({
-            att.CALENDAR_HOME_RESOURCE_ID : ownerHomeID,
-            att.DROPBOX_ID                : &quot;.&quot;,
-            att.CONTENT_TYPE              : &quot;&quot;,
-            att.SIZE                      : 0,
-            att.MD5                       : &quot;&quot;,
-            att.PATH                      : &quot;&quot;,
-        }, Return=(att.ATTACHMENT_ID, att.CREATED, att.MODIFIED)).on(txn))
-
-        row_iter = iter(rows[0])
-        a_id = row_iter.next()
-        created = sqltime(row_iter.next())
-        modified = sqltime(row_iter.next())
-
-        attachment = cls(txn, a_id, &quot;.&quot;, None, ownerHomeID, True)
-        attachment._managedID = managedID
-        attachment._created = created
-        attachment._modified = modified
-
-        # File system paths need to exist
-        try:
-            attachment._path.parent().makedirs()
-        except:
-            pass
-
-        returnValue(attachment)
-
-
-    @classmethod
-    @inlineCallbacks
-    def create(cls, txn, managedID, ownerHomeID, referencedBy):
-        &quot;&quot;&quot;
-        Create a new Attachment object.
-
-        @param txn: The transaction to use
-        @type txn: L{CommonStoreTransaction}
-        @param managedID: the identifier for the attachment
-        @type managedID: C{str}
-        @param ownerHomeID: the resource-id of the home collection of the attachment owner
-        @type ownerHomeID: C{int}
-        @param referencedBy: the resource-id of the calendar object referencing the attachment
-        @type referencedBy: C{int}
-        &quot;&quot;&quot;
-
-        # Now create the DB entry
-        attachment = (yield cls._create(txn, managedID, ownerHomeID))
-        attachment._objectResourceID = referencedBy
-
-        # Create the attachment&lt;-&gt;calendar object relationship for managed attachments
-        attco = schema.ATTACHMENT_CALENDAR_OBJECT
-        yield Insert({
-            attco.ATTACHMENT_ID               : attachment._attachmentID,
-            attco.MANAGED_ID                  : attachment._managedID,
-            attco.CALENDAR_OBJECT_RESOURCE_ID : attachment._objectResourceID,
-        }).on(txn)
-
-        returnValue(attachment)
-
-
-    @classmethod
-    @inlineCallbacks
-    def update(cls, txn, oldManagedID, ownerHomeID, referencedBy, oldAttachmentID):
-        &quot;&quot;&quot;
-        Create a new Attachment object.
-
-        @param txn: The transaction to use
-        @type txn: L{CommonStoreTransaction}
-        @param oldManagedID: the identifier for the original attachment
-        @type oldManagedID: C{str}
-        @param ownerHomeID: the resource-id of the home collection of the attachment owner
-        @type ownerHomeID: C{int}
-        @param referencedBy: the resource-id of the calendar object referencing the attachment
-        @type referencedBy: C{int}
-        @param oldAttachmentID: the attachment-id of the existing attachment being updated
-        @type oldAttachmentID: C{int}
-        &quot;&quot;&quot;
-
-        # Now create the DB entry with a new managed-ID
-        managed_id = str(uuid.uuid4())
-        attachment = (yield cls._create(txn, managed_id, ownerHomeID))
-        attachment._objectResourceID = referencedBy
-
-        # Update the attachment&lt;-&gt;calendar object relationship for managed attachments
-        attco = schema.ATTACHMENT_CALENDAR_OBJECT
-        yield Update(
-            {
-                attco.ATTACHMENT_ID    : attachment._attachmentID,
-                attco.MANAGED_ID       : attachment._managedID,
-            },
-            Where=(attco.MANAGED_ID == oldManagedID).And(
-                attco.CALENDAR_OBJECT_RESOURCE_ID == attachment._objectResourceID
-            ),
-        ).on(txn)
-
-        # Now check whether old attachmentID is still referenced - if not delete it
-        rows = (yield Select(
-            [attco.ATTACHMENT_ID, ],
-            From=attco,
-            Where=(attco.ATTACHMENT_ID == oldAttachmentID),
-        ).on(txn))
-        aids = [row[0] for row in rows] if rows is not None else ()
-        if len(aids) == 0:
-            oldattachment = ManagedAttachment(txn, oldAttachmentID, None, None)
-            oldattachment = (yield oldattachment.initFromStore())
-            yield oldattachment.remove()
-
-        returnValue(attachment)
-
-
-    @classmethod
-    @inlineCallbacks
-    def load(cls, txn, referencedID, managedID, attachmentID=None):
-        &quot;&quot;&quot;
-        Load a ManagedAttachment via either its managedID or attachmentID.
-        &quot;&quot;&quot;
-
-        if managedID:
-            attco = schema.ATTACHMENT_CALENDAR_OBJECT
-            where = (attco.MANAGED_ID == managedID)
-            if referencedID is not None:
-                where = where.And(attco.CALENDAR_OBJECT_RESOURCE_ID == referencedID)
-            rows = (yield Select(
-                [attco.ATTACHMENT_ID, ],
-                From=attco,
-                Where=where,
-            ).on(txn))
-            if len(rows) == 0:
-                returnValue(None)
-            elif referencedID is not None and len(rows) != 1:
-                raise AttachmentStoreValidManagedID
-            attachmentID = rows[0][0]
-
-        attachment = cls(txn, attachmentID, None, None)
-        attachment = (yield attachment.initFromStore())
-        attachment._managedID = managedID
-        attachment._objectResourceID = referencedID
-        returnValue(attachment)
-
-
-    @classmethod
-    @inlineCallbacks
-    def referencesTo(cls, txn, managedID):
-        &quot;&quot;&quot;
-        Find all the calendar object resourceIds referenced by this supplied managed-id.
-        &quot;&quot;&quot;
-        attco = schema.ATTACHMENT_CALENDAR_OBJECT
-        rows = (yield Select(
-            [attco.CALENDAR_OBJECT_RESOURCE_ID, ],
-            From=attco,
-            Where=(attco.MANAGED_ID == managedID),
-        ).on(txn))
-        cobjs = set([row[0] for row in rows]) if rows is not None else set()
-        returnValue(cobjs)
-
-
-    @classmethod
-    @inlineCallbacks
-    def usedManagedID(cls, txn, managedID):
-        &quot;&quot;&quot;
-        Return the &quot;owner&quot; home and referencing resource is, and UID for a managed-id.
-        &quot;&quot;&quot;
-        att = schema.ATTACHMENT
-        attco = schema.ATTACHMENT_CALENDAR_OBJECT
-        co = schema.CALENDAR_OBJECT
-        rows = (yield Select(
-            [
-                att.CALENDAR_HOME_RESOURCE_ID,
-                attco.CALENDAR_OBJECT_RESOURCE_ID,
-                co.ICALENDAR_UID,
-            ],
-            From=att.join(
-                attco, att.ATTACHMENT_ID == attco.ATTACHMENT_ID, &quot;left outer&quot;
-            ).join(co, co.RESOURCE_ID == attco.CALENDAR_OBJECT_RESOURCE_ID),
-            Where=(attco.MANAGED_ID == managedID),
-        ).on(txn))
-        returnValue(rows)
-
-
-    @classmethod
-    @inlineCallbacks
-    def resourceRemoved(cls, txn, resourceID):
-        &quot;&quot;&quot;
-        Remove all attachments referencing the specified resource.
-        &quot;&quot;&quot;
-
-        # Find all reference attachment-ids and dereference
-        attco = schema.ATTACHMENT_CALENDAR_OBJECT
-        rows = (yield Select(
-            [attco.MANAGED_ID, ],
-            From=attco,
-            Where=(attco.CALENDAR_OBJECT_RESOURCE_ID == resourceID),
-        ).on(txn))
-        mids = set([row[0] for row in rows]) if rows is not None else set()
-        for managedID in mids:
-            attachment = (yield ManagedAttachment.load(txn, resourceID, managedID))
-            (yield attachment.removeFromResource(resourceID))
-
-
-    @classmethod
-    @inlineCallbacks
-    def copyManagedID(cls, txn, managedID, referencedBy):
-        &quot;&quot;&quot;
-        Associate an existing attachment with the new resource.
-        &quot;&quot;&quot;
-
-        # Find the associated attachment-id and insert new reference
-        attco = schema.ATTACHMENT_CALENDAR_OBJECT
-        aid = (yield Select(
-            [attco.ATTACHMENT_ID, ],
-            From=attco,
-            Where=(attco.MANAGED_ID == managedID),
-        ).on(txn))[0][0]
-
-        yield Insert({
-            attco.ATTACHMENT_ID               : aid,
-            attco.MANAGED_ID                  : managedID,
-            attco.CALENDAR_OBJECT_RESOURCE_ID : referencedBy,
-        }).on(txn)
-
-
-    def managedID(self):
-        return self._managedID
-
-
-    @inlineCallbacks
-    def objectResource(self):
-        &quot;&quot;&quot;
-        Return the calendar object resource associated with this attachment.
-        &quot;&quot;&quot;
-
-        home = (yield self._txn.calendarHomeWithResourceID(self._ownerHomeID))
-        obj = (yield home.objectResourceWithID(self._objectResourceID))
-        returnValue(obj)
-
-
-    @property
-    def _path(self):
-        # Use directory hashing scheme based on MD5 of attachmentID
-        hasheduid = hashlib.md5(str(self._attachmentID)).hexdigest()
-        return self._attachmentPathRoot().child(hasheduid[0:2]).child(hasheduid[2:4]).child(hasheduid)
-
-
-    @inlineCallbacks
-    def location(self):
-        &quot;&quot;&quot;
-        Return the URI location of the attachment.
-        &quot;&quot;&quot;
-        if not hasattr(self, &quot;_ownerName&quot;):
-            home = (yield self._txn.calendarHomeWithResourceID(self._ownerHomeID))
-            self._ownerName = home.name()
-        if not hasattr(self, &quot;_objectDropboxID&quot;):
-            if not hasattr(self, &quot;_objectResource&quot;):
-                self._objectResource = (yield self.objectResource())
-            self._objectDropboxID = self._objectResource._dropboxID
-
-        fname = self.lastSegmentOfUriPath(self._managedID, self._name)
-        location = self._txn._store.attachmentsURIPattern % {
-            &quot;home&quot;: self._ownerName,
-            &quot;dropbox_id&quot;: urllib.quote(self._objectDropboxID),
-            &quot;name&quot;: urllib.quote(fname),
-        }
-        returnValue(location)
-
-
-    @classmethod
-    def lastSegmentOfUriPath(cls, managed_id, name):
-        splits = name.rsplit(&quot;.&quot;, 1)
-        fname = splits[0]
-        suffix = splits[1] if len(splits) == 2 else &quot;unknown&quot;
-        return &quot;{0}-{1}.{2}&quot;.format(fname, managed_id[:8], suffix)
-
-
-    @inlineCallbacks
-    def changed(self, contentType, dispositionName, md5, size):
-        &quot;&quot;&quot;
-        Always update name to current disposition name.
-        &quot;&quot;&quot;
-
-        self._contentType = contentType
-        self._name = dispositionName
-        self._md5 = md5
-        self._size = size
-        att = schema.ATTACHMENT
-        self._created, self._modified = map(
-            sqltime,
-            (yield Update(
-                {
-                    att.CONTENT_TYPE    : generateContentType(self._contentType),
-                    att.SIZE            : self._size,
-                    att.MD5             : self._md5,
-                    att.MODIFIED        : utcNowSQL,
-                    att.PATH            : self._name,
-                },
-                Where=(att.ATTACHMENT_ID == self._attachmentID),
-                Return=(att.CREATED, att.MODIFIED)).on(self._txn))[0]
-        )
-
-
-    @inlineCallbacks
-    def newReference(self, resourceID):
-        &quot;&quot;&quot;
-        Create a new reference of this attachment to the supplied calendar object resource id, and
-        return a ManagedAttachment for the new reference.
-
-        @param resourceID: the resource id to reference
-        @type resourceID: C{int}
-
-        @return: the new managed attachment
-        @rtype: L{ManagedAttachment}
-        &quot;&quot;&quot;
-
-        attco = schema.ATTACHMENT_CALENDAR_OBJECT
-        yield Insert({
-            attco.ATTACHMENT_ID               : self._attachmentID,
-            attco.MANAGED_ID                  : self._managedID,
-            attco.CALENDAR_OBJECT_RESOURCE_ID : resourceID,
-        }).on(self._txn)
-
-        mattach = (yield ManagedAttachment.load(self._txn, resourceID, self._managedID))
-        returnValue(mattach)
-
-
-    @inlineCallbacks
-    def removeFromResource(self, resourceID):
-
-        # Delete the reference
-        attco = schema.ATTACHMENT_CALENDAR_OBJECT
-        yield Delete(
-            From=attco,
-            Where=(attco.ATTACHMENT_ID == self._attachmentID).And(
-                attco.CALENDAR_OBJECT_RESOURCE_ID == resourceID),
-        ).on(self._txn)
-
-        # References still exist - if not remove actual attachment
-        rows = (yield Select(
-            [attco.CALENDAR_OBJECT_RESOURCE_ID, ],
-            From=attco,
-            Where=(attco.ATTACHMENT_ID == self._attachmentID),
-        ).on(self._txn))
-        if len(rows) == 0:
-            yield self.remove()
-
-
-    @inlineCallbacks
-    def attachProperty(self):
-        &quot;&quot;&quot;
-        Return an iCalendar ATTACH property for this attachment.
-        &quot;&quot;&quot;
-        attach = Property(&quot;ATTACH&quot;, &quot;&quot;, valuetype=Value.VALUETYPE_URI)
-        location = (yield self.updateProperty(attach))
-        returnValue((attach, location,))
-
-
-    @inlineCallbacks
-    def updateProperty(self, attach):
-        &quot;&quot;&quot;
-        Update an iCalendar ATTACH property for this attachment.
-        &quot;&quot;&quot;
-
-        location = (yield self.location())
-
-        attach.setParameter(&quot;MANAGED-ID&quot;, self.managedID())
-        attach.setParameter(&quot;FMTTYPE&quot;, &quot;{0}/{1}&quot;.format(self.contentType().mediaType, self.contentType().mediaSubtype))
-        attach.setParameter(&quot;FILENAME&quot;, self.name())
-        attach.setParameter(&quot;SIZE&quot;, str(self.size()))
-        attach.setValue(location)
-
-        returnValue(location)
-
</del><span class="cx"> # Hook-up class relationships at the end after they have all been defined
</span><span class="cx"> from txdav.caldav.datastore.sql_external import CalendarHomeExternal, CalendarExternal, CalendarObjectExternal
</span><span class="cx"> CalendarHome._externalClass = CalendarHomeExternal
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoresql_attachmentpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcaldavdatastoresql_attachmentpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/caldav/datastore/sql_attachment.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/caldav/datastore/sql_attachment.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/sql_attachment.py                                (rev 0)
+++ CalendarServer/trunk/txdav/caldav/datastore/sql_attachment.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,1205 @@
</span><ins>+# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from pycalendar.value import Value
+
+from twext.enterprise.dal.syntax import Select, Insert, Delete, Parameter, \
+    Update, utcNowSQL
+from twext.enterprise.util import parseSQLTimestamp
+from twext.python.filepath import CachingFilePath
+
+from twisted.internet.defer import inlineCallbacks, returnValue
+
+from twistedcaldav.config import config
+from twistedcaldav.dateops import datetimeMktime
+from twistedcaldav.ical import Property
+
+from txdav.caldav.datastore.util import StorageTransportBase, \
+    AttachmentRetrievalTransport
+from txdav.caldav.icalendarstore import AttachmentSizeTooLarge, QuotaExceeded, \
+    IAttachment, AttachmentDropboxNotAllowed, AttachmentMigrationFailed, \
+    AttachmentStoreValidManagedID
+from txdav.common.datastore.sql_tables import schema
+
+from txweb2.http_headers import MimeType, generateContentType
+
+from zope.interface.declarations import implements
+
+import hashlib
+import itertools
+import os
+import tempfile
+import urllib
+import uuid
+
+&quot;&quot;&quot;
+Classes and methods that relate to CalDAV attachments in the SQL store.
+&quot;&quot;&quot;
+
+
+class AttachmentStorageTransport(StorageTransportBase):
+
+    _TEMPORARY_UPLOADS_DIRECTORY = &quot;Temporary&quot;
+
+    def __init__(self, attachment, contentType, dispositionName, creating=False, migrating=False):
+        super(AttachmentStorageTransport, self).__init__(
+            attachment, contentType, dispositionName)
+
+        fileDescriptor, fileName = self._temporaryFile()
+        # Wrap the file descriptor in a file object we can write to
+        self._file = os.fdopen(fileDescriptor, &quot;w&quot;)
+        self._path = CachingFilePath(fileName)
+        self._hash = hashlib.md5()
+        self._creating = creating
+        self._migrating = migrating
+
+        self._txn.postAbort(self.aborted)
+
+
+    def _temporaryFile(self):
+        &quot;&quot;&quot;
+        Returns a (file descriptor, absolute path) tuple for a temporary file within
+        the Attachments/Temporary directory (creating the Temporary subdirectory
+        if it doesn't exist).  It is the caller's responsibility to remove the
+        file.
+        &quot;&quot;&quot;
+        attachmentRoot = self._txn._store.attachmentsPath
+        tempUploadsPath = attachmentRoot.child(self._TEMPORARY_UPLOADS_DIRECTORY)
+        if not tempUploadsPath.exists():
+            tempUploadsPath.createDirectory()
+        return tempfile.mkstemp(dir=tempUploadsPath.path)
+
+
+    @property
+    def _txn(self):
+        return self._attachment._txn
+
+
+    def aborted(self):
+        &quot;&quot;&quot;
+        Transaction aborted - clean up temp files.
+        &quot;&quot;&quot;
+        if self._path.exists():
+            self._path.remove()
+
+
+    def write(self, data):
+        if isinstance(data, buffer):
+            data = str(data)
+        self._file.write(data)
+        self._hash.update(data)
+
+
+    @inlineCallbacks
+    def loseConnection(self):
+        &quot;&quot;&quot;
+        Note that when self._migrating is set we only care about the data and don't need to
+        do any quota checks/adjustments.
+        &quot;&quot;&quot;
+
+        # FIXME: this should be synchronously accessible; IAttachment should
+        # have a method for getting its parent just as CalendarObject/Calendar
+        # do.
+
+        # FIXME: If this method isn't called, the transaction should be
+        # prevented from committing successfully.  It's not valid to have an
+        # attachment that doesn't point to a real file.
+
+        home = (yield self._txn.calendarHomeWithResourceID(self._attachment._ownerHomeID))
+
+        oldSize = self._attachment.size()
+        newSize = self._file.tell()
+        self._file.close()
+
+        # Check max size for attachment
+        if not self._migrating and newSize &gt; config.MaximumAttachmentSize:
+            self._path.remove()
+            if self._creating:
+                yield self._attachment._internalRemove()
+            raise AttachmentSizeTooLarge()
+
+        # Check overall user quota
+        if not self._migrating:
+            allowed = home.quotaAllowedBytes()
+            if allowed is not None and allowed &lt; ((yield home.quotaUsedBytes())
+                                                  + (newSize - oldSize)):
+                self._path.remove()
+                if self._creating:
+                    yield self._attachment._internalRemove()
+                raise QuotaExceeded()
+
+        self._path.moveTo(self._attachment._path)
+
+        yield self._attachment.changed(
+            self._contentType,
+            self._dispositionName,
+            self._hash.hexdigest(),
+            newSize
+        )
+
+        if not self._migrating and home:
+            # Adjust quota
+            yield home.adjustQuotaUsedBytes(self._attachment.size() - oldSize)
+
+            # Send change notification to home
+            yield home.notifyChanged()
+
+
+
+def sqltime(value):
+    return datetimeMktime(parseSQLTimestamp(value))
+
+
+
+class AttachmentLink(object):
+    &quot;&quot;&quot;
+    A binding between an L{Attachment} and an L{CalendarObject}.
+    &quot;&quot;&quot;
+
+    _attachmentSchema = schema.ATTACHMENT
+    _attachmentLinkSchema = schema.ATTACHMENT_CALENDAR_OBJECT
+
+    @classmethod
+    def makeClass(cls, txn, linkData):
+        &quot;&quot;&quot;
+        Given the various database rows, build the actual class.
+
+        @param objectData: the standard set of object columns
+        @type objectData: C{list}
+
+        @return: the constructed child class
+        @rtype: L{CommonHomeChild}
+        &quot;&quot;&quot;
+
+        child = cls(txn)
+        for attr, value in zip(child._rowAttributes(), linkData):
+            setattr(child, attr, value)
+        return child
+
+
+    @classmethod
+    def _allColumns(cls):
+        &quot;&quot;&quot;
+        Full set of columns in the object table that need to be loaded to
+        initialize the object resource state.
+        &quot;&quot;&quot;
+        aco = cls._attachmentLinkSchema
+        return [
+            aco.ATTACHMENT_ID,
+            aco.MANAGED_ID,
+            aco.CALENDAR_OBJECT_RESOURCE_ID,
+        ]
+
+
+    @classmethod
+    def _rowAttributes(cls):
+        &quot;&quot;&quot;
+        Object attributes used to store the column values from L{_allColumns}. This used to create
+        a mapping when serializing the object for cross-pod requests.
+        &quot;&quot;&quot;
+        return (
+            &quot;_attachmentID&quot;,
+            &quot;_managedID&quot;,
+            &quot;_calendarObjectID&quot;,
+        )
+
+
+    @classmethod
+    @inlineCallbacks
+    def linksForHome(cls, home):
+        &quot;&quot;&quot;
+        Load all attachment&lt;-&gt;calendar object mappings for the specified home collection.
+        &quot;&quot;&quot;
+
+        # Load from the main table first
+        att = cls._attachmentSchema
+        attco = cls._attachmentLinkSchema
+        dataRows = yield Select(
+            cls._allColumns(),
+            From=attco.join(att, on=(attco.ATTACHMENT_ID == att.ATTACHMENT_ID)),
+            Where=att.CALENDAR_HOME_RESOURCE_ID == home.id(),
+        ).on(home._txn)
+
+        # Create the actual objects
+        returnValue([cls.makeClass(home._txn, row) for row in dataRows])
+
+
+    def __init__(self, txn):
+        self._txn = txn
+        for attr in self._rowAttributes():
+            setattr(self, attr, None)
+
+
+    def serialize(self):
+        &quot;&quot;&quot;
+        Create a dictionary mapping key attributes so this object can be sent over a cross-pod call
+        and reconstituted at the other end. Note that the other end may have a different schema so
+        the attributes may not match exactly and will need to be processed accordingly.
+        &quot;&quot;&quot;
+        return dict([(attr[1:], getattr(self, attr, None)) for attr in self._rowAttributes()])
+
+
+    @classmethod
+    def deserialize(cls, txn, mapping):
+        &quot;&quot;&quot;
+        Given a mapping generated by L{serialize}, convert the values into an array of database
+        like items that conforms to the ordering of L{_allColumns} so it can be fed into L{makeClass}.
+        Note that there may be a schema mismatch with the external data, so treat missing items as
+        C{None} and ignore extra items.
+        &quot;&quot;&quot;
+
+        return cls.makeClass(txn, [mapping.get(row[1:]) for row in cls._rowAttributes()])
+
+
+    def insert(self):
+        &quot;&quot;&quot;
+        Insert the object.
+        &quot;&quot;&quot;
+
+        row = dict([(column, getattr(self, attr)) for column, attr in itertools.izip(self._allColumns(), self._rowAttributes())])
+        return Insert(row).on(self._txn)
+
+
+
+class Attachment(object):
+
+    implements(IAttachment)
+
+    _attachmentSchema = schema.ATTACHMENT
+    _attachmentLinkSchema = schema.ATTACHMENT_CALENDAR_OBJECT
+
+    @classmethod
+    def makeClass(cls, txn, attachmentData):
+        &quot;&quot;&quot;
+        Given the various database rows, build the actual class.
+
+        @param attachmentData: the standard set of attachment columns
+        @type attachmentData: C{list}
+
+        @return: the constructed child class
+        @rtype: L{Attachment}
+        &quot;&quot;&quot;
+
+        att = cls._attachmentSchema
+        dropbox_id = attachmentData[cls._allColumns().index(att.DROPBOX_ID)]
+        c = ManagedAttachment if dropbox_id == &quot;.&quot; else DropBoxAttachment
+        child = c(
+            txn,
+            attachmentData[cls._allColumns().index(att.ATTACHMENT_ID)],
+            attachmentData[cls._allColumns().index(att.DROPBOX_ID)],
+            attachmentData[cls._allColumns().index(att.PATH)],
+        )
+
+        for attr, value in zip(child._rowAttributes(), attachmentData):
+            setattr(child, attr, value)
+        child._contentType = MimeType.fromString(child._contentType)
+
+        return child
+
+
+    @classmethod
+    def _allColumns(cls):
+        &quot;&quot;&quot;
+        Full set of columns in the object table that need to be loaded to
+        initialize the object resource state.
+        &quot;&quot;&quot;
+        att = cls._attachmentSchema
+        return [
+            att.ATTACHMENT_ID,
+            att.DROPBOX_ID,
+            att.CALENDAR_HOME_RESOURCE_ID,
+            att.CONTENT_TYPE,
+            att.SIZE,
+            att.MD5,
+            att.CREATED,
+            att.MODIFIED,
+            att.PATH,
+        ]
+
+
+    @classmethod
+    def _rowAttributes(cls):
+        &quot;&quot;&quot;
+        Object attributes used to store the column values from L{_allColumns}. This used to create
+        a mapping when serializing the object for cross-pod requests.
+        &quot;&quot;&quot;
+        return (
+            &quot;_attachmentID&quot;,
+            &quot;_dropboxID&quot;,
+            &quot;_ownerHomeID&quot;,
+            &quot;_contentType&quot;,
+            &quot;_size&quot;,
+            &quot;_md5&quot;,
+            &quot;_created&quot;,
+            &quot;_modified&quot;,
+            &quot;_name&quot;,
+        )
+
+
+    @classmethod
+    @inlineCallbacks
+    def loadAllAttachments(cls, home):
+        &quot;&quot;&quot;
+        Load all attachments assigned to the specified home collection. This should only be
+        used when sync'ing an entire home's set of attachments.
+        &quot;&quot;&quot;
+
+        results = []
+
+        # Load from the main table first
+        att = cls._attachmentSchema
+        dataRows = yield Select(
+            cls._allColumns(),
+            From=att,
+            Where=att.CALENDAR_HOME_RESOURCE_ID == home.id(),
+        ).on(home._txn)
+
+        # Create the actual objects
+        for row in dataRows:
+            child = cls.makeClass(home._txn, row)
+            results.append(child)
+
+        returnValue(results)
+
+
+    @classmethod
+    @inlineCallbacks
+    def loadAttachmentByID(cls, home, id):
+        &quot;&quot;&quot;
+        Load one attachments assigned to the specified home collection. This should only be
+        used when sync'ing an entire home's set of attachments.
+        &quot;&quot;&quot;
+
+        # Load from the main table first
+        att = cls._attachmentSchema
+        rows = yield Select(
+            cls._allColumns(),
+            From=att,
+            Where=(att.CALENDAR_HOME_RESOURCE_ID == home.id()).And(
+                att.ATTACHMENT_ID == id),
+        ).on(home._txn)
+
+        # Create the actual object
+        returnValue(cls.makeClass(home._txn, rows[0]) if len(rows) == 1 else None)
+
+
+    def serialize(self):
+        &quot;&quot;&quot;
+        Create a dictionary mapping key attributes so this object can be sent over a cross-pod call
+        and reconstituted at the other end. Note that the other end may have a different schema so
+        the attributes may not match exactly and will need to be processed accordingly.
+        &quot;&quot;&quot;
+        result = dict([(attr[1:], getattr(self, attr, None)) for attr in self._rowAttributes()])
+        result[&quot;contentType&quot;] = generateContentType(result[&quot;contentType&quot;])
+        return result
+
+
+    @classmethod
+    def deserialize(cls, txn, mapping):
+        &quot;&quot;&quot;
+        Given a mapping generated by L{serialize}, convert the values into an array of database
+        like items that conforms to the ordering of L{_allColumns} so it can be fed into L{makeClass}.
+        Note that there may be a schema mismatch with the external data, so treat missing items as
+        C{None} and ignore extra items.
+        &quot;&quot;&quot;
+
+        return cls.makeClass(txn, [mapping.get(row[1:]) for row in cls._rowAttributes()])
+
+
+    def __init__(self, txn, a_id, dropboxID, name, ownerHomeID=None, justCreated=False):
+        self._txn = txn
+        self._attachmentID = a_id
+        self._ownerHomeID = ownerHomeID
+        self._dropboxID = dropboxID
+        self._contentType = None
+        self._size = 0
+        self._md5 = None
+        self._created = None
+        self._modified = None
+        self._name = name
+        self._justCreated = justCreated
+
+
+    def __repr__(self):
+        return (
+            &quot;&lt;{self.__class__.__name__}: {self._attachmentID}&gt;&quot;
+            .format(self=self)
+        )
+
+
+    def _attachmentPathRoot(self):
+        return self._txn._store.attachmentsPath
+
+
+    @inlineCallbacks
+    def initFromStore(self):
+        &quot;&quot;&quot;
+        Execute necessary SQL queries to retrieve attributes.
+
+        @return: C{True} if this attachment exists, C{False} otherwise.
+        &quot;&quot;&quot;
+        att = self._attachmentSchema
+        if self._dropboxID and self._dropboxID != &quot;.&quot;:
+            where = (att.DROPBOX_ID == self._dropboxID).And(
+                att.PATH == self._name)
+        else:
+            where = (att.ATTACHMENT_ID == self._attachmentID)
+        rows = (yield Select(
+            self._allColumns(),
+            From=att,
+            Where=where
+        ).on(self._txn))
+
+        if not rows:
+            returnValue(None)
+
+        for attr, value in zip(self._rowAttributes(), rows[0]):
+            setattr(self, attr, value)
+        self._contentType = MimeType.fromString(self._contentType)
+        self._created = sqltime(self._created)
+        self._modified = sqltime(self._modified)
+
+        returnValue(self)
+
+
+    def copyRemote(self, remote):
+        &quot;&quot;&quot;
+        Copy properties from a remote (external) attachment that is being migrated.
+
+        @param remote: the external attachment
+        @type remote: L{Attachment}
+        &quot;&quot;&quot;
+        return self.changed(remote.contentType(), remote.name(), remote.md5(), remote.size())
+
+
+    def id(self):
+        return self._attachmentID
+
+
+    def dropboxID(self):
+        return self._dropboxID
+
+
+    def isManaged(self):
+        return self._dropboxID == &quot;.&quot;
+
+
+    def name(self):
+        return self._name
+
+
+    def properties(self):
+        pass  # stub
+
+
+    def store(self, contentType, dispositionName=None, migrating=False):
+        if not self._name:
+            self._name = dispositionName
+        return AttachmentStorageTransport(self, contentType, dispositionName, self._justCreated, migrating=migrating)
+
+
+    def retrieve(self, protocol):
+        return AttachmentRetrievalTransport(self._path).start(protocol)
+
+
+    def changed(self, contentType, dispositionName, md5, size):
+        raise NotImplementedError
+
+    _removeStatement = Delete(
+        From=schema.ATTACHMENT,
+        Where=(schema.ATTACHMENT.ATTACHMENT_ID == Parameter(&quot;attachmentID&quot;))
+    )
+
+
+    @inlineCallbacks
+    def remove(self, adjustQuota=True):
+        oldSize = self._size
+        self._txn.postCommit(self.removePaths)
+        yield self._internalRemove()
+
+        # Adjust quota
+        if adjustQuota:
+            home = (yield self._txn.calendarHomeWithResourceID(self._ownerHomeID))
+            if home:
+                yield home.adjustQuotaUsedBytes(-oldSize)
+
+                # Send change notification to home
+                yield home.notifyChanged()
+
+
+    def removePaths(self):
+        &quot;&quot;&quot;
+        Remove the actual file and up to attachment parent directory if empty.
+        &quot;&quot;&quot;
+        self._path.remove()
+        self.removeParentPaths()
+
+
+    def removeParentPaths(self):
+        &quot;&quot;&quot;
+        Remove up to attachment parent directory if empty.
+        &quot;&quot;&quot;
+        parent = self._path.parent()
+        toppath = self._attachmentPathRoot().path
+        while parent.path != toppath:
+            if len(parent.listdir()) == 0:
+                parent.remove()
+                parent = parent.parent()
+            else:
+                break
+
+
+    def _internalRemove(self):
+        &quot;&quot;&quot;
+        Just delete the row; don't do any accounting / bookkeeping.  (This is
+        for attachments that have failed to be created due to errors during
+        storage.)
+        &quot;&quot;&quot;
+        return self._removeStatement.on(self._txn, attachmentID=self._attachmentID)
+
+
+    @classmethod
+    @inlineCallbacks
+    def removedHome(cls, txn, homeID):
+        &quot;&quot;&quot;
+        A calendar home is being removed so all of its attachments must go too. When removing,
+        we don't care about quota adjustment as there will be no quota once the home is removed.
+
+        TODO: this needs to be transactional wrt the actual file deletes.
+        &quot;&quot;&quot;
+        att = cls._attachmentSchema
+        attco = cls._attachmentLinkSchema
+
+        rows = (yield Select(
+            [att.ATTACHMENT_ID, att.DROPBOX_ID, ],
+            From=att,
+            Where=(
+                att.CALENDAR_HOME_RESOURCE_ID == homeID
+            ),
+        ).on(txn))
+
+        for attachmentID, dropboxID in rows:
+            if dropboxID != &quot;.&quot;:
+                attachment = DropBoxAttachment(txn, attachmentID, None, None)
+            else:
+                attachment = ManagedAttachment(txn, attachmentID, None, None)
+            attachment = (yield attachment.initFromStore())
+            if attachment._path.exists():
+                attachment.removePaths()
+
+        yield Delete(
+            From=attco,
+            Where=(
+                attco.ATTACHMENT_ID.In(Select(
+                    [att.ATTACHMENT_ID, ],
+                    From=att,
+                    Where=(
+                        att.CALENDAR_HOME_RESOURCE_ID == homeID
+                    ),
+                ))
+            ),
+        ).on(txn)
+
+        yield Delete(
+            From=att,
+            Where=(
+                att.CALENDAR_HOME_RESOURCE_ID == homeID
+            ),
+        ).on(txn)
+
+
+    # IDataStoreObject
+    def contentType(self):
+        return self._contentType
+
+
+    def md5(self):
+        return self._md5
+
+
+    def size(self):
+        return self._size
+
+
+    def created(self):
+        return self._created
+
+
+    def modified(self):
+        return self._modified
+
+
+
+class DropBoxAttachment(Attachment):
+
+    @classmethod
+    @inlineCallbacks
+    def create(cls, txn, dropboxID, name, ownerHomeID):
+        &quot;&quot;&quot;
+        Create a new Attachment object.
+
+        @param txn: The transaction to use
+        @type txn: L{CommonStoreTransaction}
+        @param dropboxID: the identifier for the attachment (dropbox id or managed id)
+        @type dropboxID: C{str}
+        @param name: the name of the attachment
+        @type name: C{str}
+        @param ownerHomeID: the resource-id of the home collection of the attachment owner
+        @type ownerHomeID: C{int}
+        &quot;&quot;&quot;
+
+        # If store has already migrated to managed attachments we will prevent creation of dropbox attachments
+        dropbox = (yield txn.store().dropboxAllowed(txn))
+        if not dropbox:
+            raise AttachmentDropboxNotAllowed
+
+        # Now create the DB entry
+        att = cls._attachmentSchema
+        rows = (yield Insert({
+            att.CALENDAR_HOME_RESOURCE_ID : ownerHomeID,
+            att.DROPBOX_ID                : dropboxID,
+            att.CONTENT_TYPE              : &quot;&quot;,
+            att.SIZE                      : 0,
+            att.MD5                       : &quot;&quot;,
+            att.PATH                      : name,
+        }, Return=(att.ATTACHMENT_ID, att.CREATED, att.MODIFIED)).on(txn))
+
+        row_iter = iter(rows[0])
+        a_id = row_iter.next()
+        created = sqltime(row_iter.next())
+        modified = sqltime(row_iter.next())
+
+        attachment = cls(txn, a_id, dropboxID, name, ownerHomeID, True)
+        attachment._created = created
+        attachment._modified = modified
+
+        # File system paths need to exist
+        try:
+            attachment._path.parent().makedirs()
+        except:
+            pass
+
+        returnValue(attachment)
+
+
+    @classmethod
+    @inlineCallbacks
+    def load(cls, txn, dropboxID, name):
+        attachment = cls(txn, None, dropboxID, name)
+        attachment = (yield attachment.initFromStore())
+        returnValue(attachment)
+
+
+    @property
+    def _path(self):
+        # Use directory hashing scheme based on MD5 of dropboxID
+        hasheduid = hashlib.md5(self._dropboxID).hexdigest()
+        attachmentRoot = self._attachmentPathRoot().child(hasheduid[0:2]).child(hasheduid[2:4]).child(hasheduid)
+        return attachmentRoot.child(self.name())
+
+
+    @classmethod
+    @inlineCallbacks
+    def resourceRemoved(cls, txn, resourceID, dropboxID):
+        &quot;&quot;&quot;
+        Remove all attachments referencing the specified resource.
+        &quot;&quot;&quot;
+
+        # See if any other resources still reference this dropbox ID
+        co = schema.CALENDAR_OBJECT
+        rows = (yield Select(
+            [co.RESOURCE_ID, ],
+            From=co,
+            Where=(co.DROPBOX_ID == dropboxID).And(
+                co.RESOURCE_ID != resourceID)
+        ).on(txn))
+
+        if not rows:
+            # Find each attachment with matching dropbox ID
+            att = cls._attachmentSchema
+            rows = (yield Select(
+                [att.PATH],
+                From=att,
+                Where=(att.DROPBOX_ID == dropboxID)
+            ).on(txn))
+            for name in rows:
+                name = name[0]
+                attachment = yield cls.load(txn, dropboxID, name)
+                yield attachment.remove()
+
+
+    @inlineCallbacks
+    def changed(self, contentType, dispositionName, md5, size):
+        &quot;&quot;&quot;
+        Dropbox attachments never change their path - ignore dispositionName.
+        &quot;&quot;&quot;
+
+        self._contentType = contentType
+        self._md5 = md5
+        self._size = size
+
+        att = self._attachmentSchema
+        self._created, self._modified = map(
+            sqltime,
+            (yield Update(
+                {
+                    att.CONTENT_TYPE    : generateContentType(self._contentType),
+                    att.SIZE            : self._size,
+                    att.MD5             : self._md5,
+                    att.MODIFIED        : utcNowSQL,
+                },
+                Where=(att.ATTACHMENT_ID == self._attachmentID),
+                Return=(att.CREATED, att.MODIFIED)).on(self._txn))[0]
+        )
+
+
+    @inlineCallbacks
+    def convertToManaged(self):
+        &quot;&quot;&quot;
+        Convert this dropbox attachment into a managed attachment by updating the
+        database and returning a new ManagedAttachment object that does not reference
+        any calendar object. Referencing will be added later.
+
+        @return: the managed attachment object
+        @rtype: L{ManagedAttachment}
+        &quot;&quot;&quot;
+
+        # Change the DROPBOX_ID to a single &quot;.&quot; to indicate a managed attachment.
+        att = self._attachmentSchema
+        (yield Update(
+            {att.DROPBOX_ID    : &quot;.&quot;, },
+            Where=(att.ATTACHMENT_ID == self._attachmentID),
+        ).on(self._txn))
+
+        # Create an &quot;orphaned&quot; ManagedAttachment that points to the updated data but without
+        # an actual managed-id (which only exists when there is a reference to a calendar object).
+        mattach = (yield ManagedAttachment.load(self._txn, None, None, attachmentID=self._attachmentID))
+        mattach._managedID = str(uuid.uuid4())
+        if mattach is None:
+            raise AttachmentMigrationFailed
+
+        # Then move the file on disk from the old path to the new one
+        try:
+            mattach._path.parent().makedirs()
+        except Exception:
+            # OK to fail if it already exists, otherwise must raise
+            if not mattach._path.parent().exists():
+                raise
+        oldpath = self._path
+        newpath = mattach._path
+        oldpath.moveTo(newpath)
+        self.removeParentPaths()
+
+        returnValue(mattach)
+
+
+
+class ManagedAttachment(Attachment):
+    &quot;&quot;&quot;
+    Managed attachments are ones that the server is in total control of. Clients do POSTs on calendar objects
+    to store the attachment data and have ATTACH properties added, updated or remove from the calendar objects.
+    Each ATTACH property in a calendar object has a MANAGED-ID iCalendar parameter that is used in the POST requests
+    to target a specific attachment. The MANAGED-ID values are unique to each calendar object resource, though
+    multiple calendar object resources can point to the same underlying attachment as there is a separate database
+    table that maps calendar objects/managed-ids to actual attachments.
+    &quot;&quot;&quot;
+
+    @classmethod
+    @inlineCallbacks
+    def _create(cls, txn, managedID, ownerHomeID):
+        &quot;&quot;&quot;
+        Create a new managed Attachment object.
+
+        @param txn: The transaction to use
+        @type txn: L{CommonStoreTransaction}
+        @param managedID: the identifier for the attachment
+        @type managedID: C{str}
+        @param ownerHomeID: the resource-id of the home collection of the attachment owner
+        @type ownerHomeID: C{int}
+        &quot;&quot;&quot;
+
+        # Now create the DB entry
+        att = cls._attachmentSchema
+        rows = (yield Insert({
+            att.CALENDAR_HOME_RESOURCE_ID : ownerHomeID,
+            att.DROPBOX_ID                : &quot;.&quot;,
+            att.CONTENT_TYPE              : &quot;&quot;,
+            att.SIZE                      : 0,
+            att.MD5                       : &quot;&quot;,
+            att.PATH                      : &quot;&quot;,
+        }, Return=(att.ATTACHMENT_ID, att.CREATED, att.MODIFIED)).on(txn))
+
+        row_iter = iter(rows[0])
+        a_id = row_iter.next()
+        created = sqltime(row_iter.next())
+        modified = sqltime(row_iter.next())
+
+        attachment = cls(txn, a_id, &quot;.&quot;, None, ownerHomeID, True)
+        attachment._managedID = managedID
+        attachment._created = created
+        attachment._modified = modified
+
+        # File system paths need to exist
+        try:
+            attachment._path.parent().makedirs()
+        except:
+            pass
+
+        returnValue(attachment)
+
+
+    @classmethod
+    @inlineCallbacks
+    def create(cls, txn, managedID, ownerHomeID, referencedBy):
+        &quot;&quot;&quot;
+        Create a new Attachment object and reference it.
+
+        @param txn: The transaction to use
+        @type txn: L{CommonStoreTransaction}
+        @param managedID: the identifier for the attachment
+        @type managedID: C{str}
+        @param ownerHomeID: the resource-id of the home collection of the attachment owner
+        @type ownerHomeID: C{int}
+        @param referencedBy: the resource-id of the calendar object referencing the attachment
+        @type referencedBy: C{int}
+        &quot;&quot;&quot;
+
+        # Now create the DB entry
+        attachment = (yield cls._create(txn, managedID, ownerHomeID))
+        attachment._objectResourceID = referencedBy
+
+        # Create the attachment&lt;-&gt;calendar object relationship for managed attachments
+        attco = cls._attachmentLinkSchema
+        yield Insert({
+            attco.ATTACHMENT_ID               : attachment._attachmentID,
+            attco.MANAGED_ID                  : attachment._managedID,
+            attco.CALENDAR_OBJECT_RESOURCE_ID : attachment._objectResourceID,
+        }).on(txn)
+
+        returnValue(attachment)
+
+
+    @classmethod
+    @inlineCallbacks
+    def update(cls, txn, oldManagedID, ownerHomeID, referencedBy, oldAttachmentID):
+        &quot;&quot;&quot;
+        Update an Attachment object. This creates a new one and adjusts the reference to the old
+        one to point to the new one. If the old one is no longer referenced at all, it is deleted.
+
+        @param txn: The transaction to use
+        @type txn: L{CommonStoreTransaction}
+        @param oldManagedID: the identifier for the original attachment
+        @type oldManagedID: C{str}
+        @param ownerHomeID: the resource-id of the home collection of the attachment owner
+        @type ownerHomeID: C{int}
+        @param referencedBy: the resource-id of the calendar object referencing the attachment
+        @type referencedBy: C{int}
+        @param oldAttachmentID: the attachment-id of the existing attachment being updated
+        @type oldAttachmentID: C{int}
+        &quot;&quot;&quot;
+
+        # Now create the DB entry with a new managed-ID
+        managed_id = str(uuid.uuid4())
+        attachment = (yield cls._create(txn, managed_id, ownerHomeID))
+        attachment._objectResourceID = referencedBy
+
+        # Update the attachment&lt;-&gt;calendar object relationship for managed attachments
+        attco = cls._attachmentLinkSchema
+        yield Update(
+            {
+                attco.ATTACHMENT_ID    : attachment._attachmentID,
+                attco.MANAGED_ID       : attachment._managedID,
+            },
+            Where=(attco.MANAGED_ID == oldManagedID).And(
+                attco.CALENDAR_OBJECT_RESOURCE_ID == attachment._objectResourceID
+            ),
+        ).on(txn)
+
+        # Now check whether old attachmentID is still referenced - if not delete it
+        rows = (yield Select(
+            [attco.ATTACHMENT_ID, ],
+            From=attco,
+            Where=(attco.ATTACHMENT_ID == oldAttachmentID),
+        ).on(txn))
+        aids = [row[0] for row in rows] if rows is not None else ()
+        if len(aids) == 0:
+            oldattachment = ManagedAttachment(txn, oldAttachmentID, None, None)
+            oldattachment = (yield oldattachment.initFromStore())
+            yield oldattachment.remove()
+
+        returnValue(attachment)
+
+
+    @classmethod
+    @inlineCallbacks
+    def load(cls, txn, referencedID, managedID, attachmentID=None):
+        &quot;&quot;&quot;
+        Load a ManagedAttachment via either its managedID or attachmentID.
+        &quot;&quot;&quot;
+
+        if managedID:
+            attco = cls._attachmentLinkSchema
+            where = (attco.MANAGED_ID == managedID)
+            if referencedID is not None:
+                where = where.And(attco.CALENDAR_OBJECT_RESOURCE_ID == referencedID)
+            rows = (yield Select(
+                [attco.ATTACHMENT_ID, ],
+                From=attco,
+                Where=where,
+            ).on(txn))
+            if len(rows) == 0:
+                returnValue(None)
+            elif referencedID is not None and len(rows) != 1:
+                raise AttachmentStoreValidManagedID
+            attachmentID = rows[0][0]
+
+        attachment = cls(txn, attachmentID, None, None)
+        attachment = (yield attachment.initFromStore())
+        attachment._managedID = managedID
+        attachment._objectResourceID = referencedID
+        returnValue(attachment)
+
+
+    @classmethod
+    @inlineCallbacks
+    def referencesTo(cls, txn, managedID):
+        &quot;&quot;&quot;
+        Find all the calendar object resourceIds referenced by this supplied managed-id.
+        &quot;&quot;&quot;
+        attco = cls._attachmentLinkSchema
+        rows = (yield Select(
+            [attco.CALENDAR_OBJECT_RESOURCE_ID, ],
+            From=attco,
+            Where=(attco.MANAGED_ID == managedID),
+        ).on(txn))
+        cobjs = set([row[0] for row in rows]) if rows is not None else set()
+        returnValue(cobjs)
+
+
+    @classmethod
+    @inlineCallbacks
+    def usedManagedID(cls, txn, managedID):
+        &quot;&quot;&quot;
+        Return the &quot;owner&quot; home and referencing resource is, and UID for a managed-id.
+        &quot;&quot;&quot;
+        att = cls._attachmentSchema
+        attco = cls._attachmentLinkSchema
+        co = schema.CALENDAR_OBJECT
+        rows = (yield Select(
+            [
+                att.CALENDAR_HOME_RESOURCE_ID,
+                attco.CALENDAR_OBJECT_RESOURCE_ID,
+                co.ICALENDAR_UID,
+            ],
+            From=att.join(
+                attco, att.ATTACHMENT_ID == attco.ATTACHMENT_ID, &quot;left outer&quot;
+            ).join(co, co.RESOURCE_ID == attco.CALENDAR_OBJECT_RESOURCE_ID),
+            Where=(attco.MANAGED_ID == managedID),
+        ).on(txn))
+        returnValue(rows)
+
+
+    @classmethod
+    @inlineCallbacks
+    def resourceRemoved(cls, txn, resourceID):
+        &quot;&quot;&quot;
+        Remove all attachments referencing the specified resource.
+        &quot;&quot;&quot;
+
+        # Find all reference attachment-ids and dereference
+        attco = cls._attachmentLinkSchema
+        rows = (yield Select(
+            [attco.MANAGED_ID, ],
+            From=attco,
+            Where=(attco.CALENDAR_OBJECT_RESOURCE_ID == resourceID),
+        ).on(txn))
+        mids = set([row[0] for row in rows]) if rows is not None else set()
+        for managedID in mids:
+            attachment = (yield ManagedAttachment.load(txn, resourceID, managedID))
+            (yield attachment.removeFromResource(resourceID))
+
+
+    @classmethod
+    @inlineCallbacks
+    def copyManagedID(cls, txn, managedID, referencedBy):
+        &quot;&quot;&quot;
+        Associate an existing attachment with the new resource.
+        &quot;&quot;&quot;
+
+        # Find the associated attachment-id and insert new reference
+        attco = cls._attachmentLinkSchema
+        aid = (yield Select(
+            [attco.ATTACHMENT_ID, ],
+            From=attco,
+            Where=(attco.MANAGED_ID == managedID),
+        ).on(txn))[0][0]
+
+        yield Insert({
+            attco.ATTACHMENT_ID               : aid,
+            attco.MANAGED_ID                  : managedID,
+            attco.CALENDAR_OBJECT_RESOURCE_ID : referencedBy,
+        }).on(txn)
+
+
+    def managedID(self):
+        return self._managedID
+
+
+    @inlineCallbacks
+    def objectResource(self):
+        &quot;&quot;&quot;
+        Return the calendar object resource associated with this attachment.
+        &quot;&quot;&quot;
+
+        home = (yield self._txn.calendarHomeWithResourceID(self._ownerHomeID))
+        obj = (yield home.objectResourceWithID(self._objectResourceID))
+        returnValue(obj)
+
+
+    @property
+    def _path(self):
+        # Use directory hashing scheme based on MD5 of attachmentID
+        hasheduid = hashlib.md5(str(self._attachmentID)).hexdigest()
+        return self._attachmentPathRoot().child(hasheduid[0:2]).child(hasheduid[2:4]).child(hasheduid)
+
+
+    @inlineCallbacks
+    def location(self):
+        &quot;&quot;&quot;
+        Return the URI location of the attachment.
+        &quot;&quot;&quot;
+        if not hasattr(self, &quot;_ownerName&quot;):
+            home = (yield self._txn.calendarHomeWithResourceID(self._ownerHomeID))
+            self._ownerName = home.name()
+        if not hasattr(self, &quot;_objectDropboxID&quot;):
+            if not hasattr(self, &quot;_objectResource&quot;):
+                self._objectResource = (yield self.objectResource())
+            self._objectDropboxID = self._objectResource._dropboxID
+
+        fname = self.lastSegmentOfUriPath(self._managedID, self._name)
+        location = self._txn._store.attachmentsURIPattern % {
+            &quot;home&quot;: self._ownerName,
+            &quot;dropbox_id&quot;: urllib.quote(self._objectDropboxID),
+            &quot;name&quot;: urllib.quote(fname),
+        }
+        returnValue(location)
+
+
+    @classmethod
+    def lastSegmentOfUriPath(cls, managed_id, name):
+        splits = name.rsplit(&quot;.&quot;, 1)
+        fname = splits[0]
+        suffix = splits[1] if len(splits) == 2 else &quot;unknown&quot;
+        return &quot;{0}-{1}.{2}&quot;.format(fname, managed_id[:8], suffix)
+
+
+    @inlineCallbacks
+    def changed(self, contentType, dispositionName, md5, size):
+        &quot;&quot;&quot;
+        Always update name to current disposition name.
+        &quot;&quot;&quot;
+
+        self._contentType = contentType
+        self._name = dispositionName
+        self._md5 = md5
+        self._size = size
+        att = self._attachmentSchema
+        self._created, self._modified = map(
+            sqltime,
+            (yield Update(
+                {
+                    att.CONTENT_TYPE    : generateContentType(self._contentType),
+                    att.SIZE            : self._size,
+                    att.MD5             : self._md5,
+                    att.MODIFIED        : utcNowSQL,
+                    att.PATH            : self._name,
+                },
+                Where=(att.ATTACHMENT_ID == self._attachmentID),
+                Return=(att.CREATED, att.MODIFIED)).on(self._txn))[0]
+        )
+
+
+    @inlineCallbacks
+    def newReference(self, resourceID):
+        &quot;&quot;&quot;
+        Create a new reference of this attachment to the supplied calendar object resource id, and
+        return a ManagedAttachment for the new reference.
+
+        @param resourceID: the resource id to reference
+        @type resourceID: C{int}
+
+        @return: the new managed attachment
+        @rtype: L{ManagedAttachment}
+        &quot;&quot;&quot;
+
+        attco = self._attachmentLinkSchema
+        yield Insert({
+            attco.ATTACHMENT_ID               : self._attachmentID,
+            attco.MANAGED_ID                  : self._managedID,
+            attco.CALENDAR_OBJECT_RESOURCE_ID : resourceID,
+        }).on(self._txn)
+
+        mattach = (yield ManagedAttachment.load(self._txn, resourceID, self._managedID))
+        returnValue(mattach)
+
+
+    @inlineCallbacks
+    def removeFromResource(self, resourceID):
+
+        # Delete the reference
+        attco = self._attachmentLinkSchema
+        yield Delete(
+            From=attco,
+            Where=(attco.ATTACHMENT_ID == self._attachmentID).And(
+                attco.CALENDAR_OBJECT_RESOURCE_ID == resourceID),
+        ).on(self._txn)
+
+        # References still exist - if not remove actual attachment
+        rows = (yield Select(
+            [attco.CALENDAR_OBJECT_RESOURCE_ID, ],
+            From=attco,
+            Where=(attco.ATTACHMENT_ID == self._attachmentID),
+        ).on(self._txn))
+        if len(rows) == 0:
+            yield self.remove()
+
+
+    @inlineCallbacks
+    def attachProperty(self):
+        &quot;&quot;&quot;
+        Return an iCalendar ATTACH property for this attachment.
+        &quot;&quot;&quot;
+        attach = Property(&quot;ATTACH&quot;, &quot;&quot;, valuetype=Value.VALUETYPE_URI)
+        location = (yield self.updateProperty(attach))
+        returnValue((attach, location,))
+
+
+    @inlineCallbacks
+    def updateProperty(self, attach):
+        &quot;&quot;&quot;
+        Update an iCalendar ATTACH property for this attachment.
+        &quot;&quot;&quot;
+
+        location = (yield self.location())
+
+        attach.setParameter(&quot;MANAGED-ID&quot;, self.managedID())
+        attach.setParameter(&quot;FMTTYPE&quot;, &quot;{0}/{1}&quot;.format(self.contentType().mediaType, self.contentType().mediaSubtype))
+        attach.setParameter(&quot;FILENAME&quot;, self.name())
+        attach.setParameter(&quot;SIZE&quot;, str(self.size()))
+        attach.setValue(location)
+
+        returnValue(location)
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoresql_directorypyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcaldavdatastoresql_directorypy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/caldav/datastore/sql_directory.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/caldav/datastore/sql_directory.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/sql_directory.py                                (rev 0)
+++ CalendarServer/trunk/txdav/caldav/datastore/sql_directory.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,73 @@
</span><ins>+# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twext.enterprise.dal.record import SerializableRecord, fromTable
+from twext.enterprise.dal.syntax import Select, Parameter
+from twisted.internet.defer import inlineCallbacks, returnValue
+from txdav.common.datastore.sql_tables import schema
+from txdav.common.datastore.sql_directory import GroupsRecord
+
+&quot;&quot;&quot;
+Classes and methods that relate to directory objects in the SQL store. e.g.,
+delegates, groups etc
+&quot;&quot;&quot;
+
+class GroupAttendeeRecord(SerializableRecord, fromTable(schema.GROUP_ATTENDEE)):
+    &quot;&quot;&quot;
+    @DynamicAttrs
+    L{Record} for L{schema.GROUP_ATTENDEE}.
+    &quot;&quot;&quot;
+
+    @classmethod
+    @inlineCallbacks
+    def groupAttendeesForObjects(cls, txn, cobjs):
+        &quot;&quot;&quot;
+        Get delegator/group pairs for each of the specified calendar objects.
+        &quot;&quot;&quot;
+
+        # Do a join to get what we need
+        rows = yield Select(
+            list(GroupAttendeeRecord.table) + list(GroupsRecord.table),
+            From=GroupAttendeeRecord.table.join(GroupsRecord.table, GroupAttendeeRecord.groupID == GroupsRecord.groupID),
+            Where=(GroupAttendeeRecord.resourceID.In(Parameter(&quot;cobjs&quot;, len(cobjs))))
+        ).on(txn, cobjs=cobjs)
+
+        results = []
+        groupAttendeeNames = [GroupAttendeeRecord.__colmap__[column] for column in list(GroupAttendeeRecord.table)]
+        groupsNames = [GroupsRecord.__colmap__[column] for column in list(GroupsRecord.table)]
+        split_point = len(groupAttendeeNames)
+        for row in rows:
+            groupAttendeeRow = row[:split_point]
+            groupAttendeeRecord = GroupAttendeeRecord()
+            groupAttendeeRecord._attributesFromRow(zip(groupAttendeeNames, groupAttendeeRow))
+            groupAttendeeRecord.transaction = txn
+            groupsRow = row[split_point:]
+            groupsRecord = GroupsRecord()
+            groupsRecord._attributesFromRow(zip(groupsNames, groupsRow))
+            groupsRecord.transaction = txn
+            results.append((groupAttendeeRecord, groupsRecord,))
+
+        returnValue(results)
+
+
+
+class GroupShareeRecord(SerializableRecord, fromTable(schema.GROUP_SHAREE)):
+    &quot;&quot;&quot;
+    @DynamicAttrs
+    L{Record} for L{schema.GROUP_SHAREE}.
+    &quot;&quot;&quot;
+    pass
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoresql_externalpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/sql_external.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/sql_external.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/sql_external.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -18,12 +18,15 @@
</span><span class="cx"> SQL backend for CalDAV storage when resources are external.
</span><span class="cx"> &quot;&quot;&quot;
</span><span class="cx"> 
</span><del>-from twisted.internet.defer import succeed, inlineCallbacks, returnValue
</del><ins>+from twisted.internet.defer import inlineCallbacks, returnValue
</ins><span class="cx"> 
</span><span class="cx"> from twext.python.log import Logger
</span><span class="cx"> 
</span><span class="cx"> from txdav.caldav.datastore.sql import CalendarHome, Calendar, CalendarObject
</span><ins>+from txdav.caldav.datastore.sql_attachment import Attachment, AttachmentLink
+from txdav.caldav.datastore.sql_directory import GroupAttendeeRecord, GroupShareeRecord
</ins><span class="cx"> from txdav.caldav.icalendarstore import ComponentUpdateState, ComponentRemoveState
</span><ins>+from txdav.common.datastore.sql_directory import GroupsRecord
</ins><span class="cx"> from txdav.common.datastore.sql_external import CommonHomeExternal, CommonHomeChildExternal, \
</span><span class="cx">     CommonObjectResourceExternal
</span><span class="cx"> 
</span><span class="lines">@@ -34,10 +37,10 @@
</span><span class="cx">     Wrapper for a CalendarHome that is external and only supports a limited set of operations.
</span><span class="cx">     &quot;&quot;&quot;
</span><span class="cx"> 
</span><del>-    def __init__(self, transaction, ownerUID, resourceID):
</del><ins>+    def __init__(self, transaction, homeData):
</ins><span class="cx"> 
</span><del>-        CalendarHome.__init__(self, transaction, ownerUID)
-        CommonHomeExternal.__init__(self, transaction, ownerUID, resourceID)
</del><ins>+        CalendarHome.__init__(self, transaction, homeData)
+        CommonHomeExternal.__init__(self, transaction, homeData)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     def hasCalendarResourceUIDSomewhereElse(self, uid, ok_object, mode):
</span><span class="lines">@@ -61,6 +64,36 @@
</span><span class="cx">         raise AssertionError(&quot;CommonHomeExternal: not supported&quot;)
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    @inlineCallbacks
+    def getAllAttachments(self):
+        &quot;&quot;&quot;
+        Return all the L{Attachment} objects associated with this calendar home.
+        Needed during migration.
+        &quot;&quot;&quot;
+        raw_results = yield self._txn.store().conduit.send_home_get_all_attachments(self)
+        returnValue([Attachment.deserialize(self._txn, attachment) for attachment in raw_results])
+
+
+    @inlineCallbacks
+    def readAttachmentData(self, remote_id, attachment):
+        &quot;&quot;&quot;
+        Read the data associated with an attachment associated with this calendar home.
+        Needed during migration only.
+        &quot;&quot;&quot;
+        stream = attachment.store(attachment.contentType(), attachment.name(), migrating=True)
+        yield self._txn.store().conduit.send_get_attachment_data(self, remote_id, stream)
+
+
+    @inlineCallbacks
+    def getAttachmentLinks(self):
+        &quot;&quot;&quot;
+        Read the attachment&lt;-&gt;calendar object mapping data associated with this calendar home.
+        Needed during migration only.
+        &quot;&quot;&quot;
+        raw_results = yield self._txn.store().conduit.send_home_get_attachment_links(self)
+        returnValue([AttachmentLink.deserialize(self._txn, attachment) for attachment in raw_results])
+
+
</ins><span class="cx">     def getAllDropboxIDs(self):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         No children.
</span><span class="lines">@@ -82,13 +115,17 @@
</span><span class="cx">         raise AssertionError(&quot;CommonHomeExternal: not supported&quot;)
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-    def createdHome(self):
</del><ins>+    @inlineCallbacks
+    def getAllGroupAttendees(self):
</ins><span class="cx">         &quot;&quot;&quot;
</span><del>-        No children - make this a no-op.
</del><ins>+        Return a list of L{GroupAttendeeRecord},L{GroupRecord} for each group attendee referenced in calendar data
+        owned by this home.
</ins><span class="cx">         &quot;&quot;&quot;
</span><del>-        return succeed(None)
</del><span class="cx"> 
</span><ins>+        raw_results = yield self._txn.store().conduit.send_home_get_all_group_attendees(self)
+        returnValue([(GroupAttendeeRecord.deserialize(item[0]), GroupsRecord.deserialize(item[1]),) for item in raw_results])
</ins><span class="cx"> 
</span><ins>+
</ins><span class="cx">     def splitCalendars(self):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         No children.
</span><span class="lines">@@ -157,10 +194,16 @@
</span><span class="cx">     &quot;&quot;&quot;
</span><span class="cx">     SQL-based implementation of L{ICalendar}.
</span><span class="cx">     &quot;&quot;&quot;
</span><del>-    pass
</del><span class="cx"> 
</span><ins>+    @inlineCallbacks
+    def groupSharees(self):
+        results = yield self._txn.store().conduit.send_homechild_group_sharees(self)
+        results[&quot;groups&quot;] = [GroupsRecord.deserialize(items) for items in results[&quot;groups&quot;]]
+        results[&quot;sharees&quot;] = [GroupShareeRecord.deserialize(items) for items in results[&quot;sharees&quot;]]
+        returnValue(results)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><ins>+
</ins><span class="cx"> class CalendarObjectExternal(CommonObjectResourceExternal, CalendarObject):
</span><span class="cx">     &quot;&quot;&quot;
</span><span class="cx">     SQL-based implementation of L{ICalendarObject}.
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoretestcommonpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/test/common.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/test/common.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/test/common.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -375,7 +375,7 @@
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def notificationUnderTest(self):
</span><span class="cx">         txn = self.transactionUnderTest()
</span><del>-        notifications = yield txn.notificationsWithUID(&quot;home1&quot;)
</del><ins>+        notifications = yield txn.notificationsWithUID(&quot;home1&quot;, create=True)
</ins><span class="cx">         yield notifications.writeNotificationObject(
</span><span class="cx">             &quot;abc&quot;,
</span><span class="cx">             json.loads(&quot;{\&quot;notification-type\&quot;:\&quot;invite-notification\&quot;}&quot;),
</span><span class="lines">@@ -402,7 +402,7 @@
</span><span class="cx">         objects changed or deleted since
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         txn = self.transactionUnderTest()
</span><del>-        coll = yield txn.notificationsWithUID(&quot;home1&quot;)
</del><ins>+        coll = yield txn.notificationsWithUID(&quot;home1&quot;, create=True)
</ins><span class="cx">         yield coll.writeNotificationObject(
</span><span class="cx">             &quot;1&quot;,
</span><span class="cx">             json.loads(&quot;{\&quot;notification-type\&quot;:\&quot;invite-notification\&quot;}&quot;),
</span><span class="lines">@@ -435,7 +435,7 @@
</span><span class="cx">         overwrite the notification object.
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         notifications = yield self.transactionUnderTest().notificationsWithUID(
</span><del>-            &quot;home1&quot;
</del><ins>+            &quot;home1&quot;, create=True
</ins><span class="cx">         )
</span><span class="cx">         yield notifications.writeNotificationObject(
</span><span class="cx">             &quot;abc&quot;,
</span><span class="lines">@@ -462,7 +462,7 @@
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         # Prime the home collection first
</span><span class="cx">         yield self.transactionUnderTest().notificationsWithUID(
</span><del>-            &quot;home1&quot;
</del><ins>+            &quot;home1&quot;, create=True
</ins><span class="cx">         )
</span><span class="cx">         yield self.commit()
</span><span class="cx"> 
</span><span class="lines">@@ -512,7 +512,7 @@
</span><span class="cx">         overwrite the notification object.
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         notifications = yield self.transactionUnderTest().notificationsWithUID(
</span><del>-            &quot;home1&quot;
</del><ins>+            &quot;home1&quot;, create=True
</ins><span class="cx">         )
</span><span class="cx">         yield notifications.writeNotificationObject(
</span><span class="cx">             &quot;abc&quot;,
</span><span class="lines">@@ -555,7 +555,7 @@
</span><span class="cx">         L{INotificationCollection} that the object was retrieved from.
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         txn = self.transactionUnderTest()
</span><del>-        collection = yield txn.notificationsWithUID(&quot;home1&quot;)
</del><ins>+        collection = yield txn.notificationsWithUID(&quot;home1&quot;, create=True)
</ins><span class="cx">         notification = yield self.notificationUnderTest()
</span><span class="cx">         self.assertIdentical(collection, notification.notificationCollection())
</span><span class="cx"> 
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoretesttest_attachmentspy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/test/test_attachments.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/test/test_attachments.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/test/test_attachments.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -29,8 +29,7 @@
</span><span class="cx"> from twistedcaldav.config import config
</span><span class="cx"> from twistedcaldav.ical import Property, Component
</span><span class="cx"> 
</span><del>-from txdav.caldav.datastore.sql import CalendarStoreFeatures, DropBoxAttachment, \
-    ManagedAttachment
</del><ins>+from txdav.caldav.datastore.sql import CalendarStoreFeatures
</ins><span class="cx"> from txdav.caldav.datastore.test.common import CaptureProtocol
</span><span class="cx"> from txdav.caldav.icalendarstore import IAttachmentStorageTransport, IAttachment, \
</span><span class="cx">     QuotaExceeded, AttachmentSizeTooLarge
</span><span class="lines">@@ -40,6 +39,8 @@
</span><span class="cx"> 
</span><span class="cx"> import hashlib
</span><span class="cx"> import os
</span><ins>+from txdav.caldav.datastore.sql_attachment import DropBoxAttachment, \
+    ManagedAttachment
</ins><span class="cx"> 
</span><span class="cx"> &quot;&quot;&quot;
</span><span class="cx"> Tests for txdav.caldav.datastore.sql attachment handling.
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoretesttest_schedulepy"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/caldav/datastore/test/test_schedule.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/test/test_schedule.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/test/test_schedule.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,70 +0,0 @@
</span><del>-##
-# Copyright (c) 2010-2015 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-&quot;&quot;&quot;
-Tests for L{txdav.caldav.datastore.scheduling}.
-
-The aforementioned module is intended to eventually support implicit
-scheduling; however, it does not currently.  The interim purpose of this module
-and accompanying tests is to effectively test the interface specifications to
-make sure that the common tests don't require anything I{not} specified in the
-interface, so that dynamic proxies specified with a tool like
-C{proxyForInterface} can be used to implement features such as implicit
-scheduling or data caching as middleware in the data-store layer.
-&quot;&quot;&quot;
-
-from twisted.trial.unittest import TestCase, SkipTest
-from txdav.caldav.datastore.test.test_file import FileStorageTests
-from txdav.caldav.datastore.schedule import ImplicitStore
-
-simpleEvent = &quot;&quot;&quot;BEGIN:VCALENDAR
-VERSION:2.0
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:12345-67890
-DTSTART:20080601T120000Z
-DTEND:20080601T130000Z
-ORGANIZER:mailto:user1@example.com
-ATTENDEE:mailto:user1@example.com
-ATTENDEE:mailto:user2@example.com
-END:VEVENT
-END:VCALENDAR
-&quot;&quot;&quot;
-
-class ImplicitStoreTests(FileStorageTests, TestCase):
-    &quot;&quot;&quot;
-    Tests for L{ImplicitSchedulingStore}.
-    &quot;&quot;&quot;
-
-    implicitStore = None
-
-    def storeUnderTest(self):
-        if self.implicitStore is None:
-            sut = super(ImplicitStoreTests, self).storeUnderTest()
-            self.implicitStore = ImplicitStore(sut)
-        return self.implicitStore
-
-
-    def skipit(self):
-        raise SkipTest(&quot;No private attribute tests.&quot;)
-
-    test_calendarObjectsWithDotFile = skipit
-    test_countComponentTypes = skipit
-    test_init = skipit
-    test_calendarObjectsWithDirectory = skipit
-    test_hasCalendarResourceUIDSomewhereElse = skipit
-
-del FileStorageTests
</del></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoretesttest_sqlpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -767,13 +767,13 @@
</span><span class="cx">         txn2 = calendarStore.newTransaction()
</span><span class="cx"> 
</span><span class="cx">         notification_uid1_1 = yield txn1.notificationsWithUID(
</span><del>-            &quot;uid1&quot;,
</del><ins>+            &quot;uid1&quot;, create=True
</ins><span class="cx">         )
</span><span class="cx"> 
</span><span class="cx">         @inlineCallbacks
</span><span class="cx">         def _defer_notification_uid1_2():
</span><span class="cx">             notification_uid1_2 = yield txn2.notificationsWithUID(
</span><del>-                &quot;uid1&quot;,
</del><ins>+                &quot;uid1&quot;, create=True
</ins><span class="cx">             )
</span><span class="cx">             yield txn2.commit()
</span><span class="cx">             returnValue(notification_uid1_2)
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoretesttest_sql_sharingpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/test/test_sql_sharing.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/test/test_sql_sharing.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/test/test_sql_sharing.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -464,7 +464,7 @@
</span><span class="cx">         shared = yield self.calendarUnderTest(home=&quot;user02&quot;, name=sharedName)
</span><span class="cx">         self.assertTrue(shared is not None)
</span><span class="cx"> 
</span><del>-        notifyHome = yield self.transactionUnderTest().notificationsWithUID(&quot;user02&quot;)
</del><ins>+        notifyHome = yield self.transactionUnderTest().notificationsWithUID(&quot;user02&quot;, create=True)
</ins><span class="cx">         notifications = yield notifyHome.listNotificationObjects()
</span><span class="cx">         self.assertEqual(len(notifications), 0)
</span><span class="cx"> 
</span><span class="lines">@@ -587,7 +587,42 @@
</span><span class="cx">         yield self.commit()
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    @inlineCallbacks
+    def test_sharingBindRecords(self):
</ins><span class="cx"> 
</span><ins>+        yield self.calendarUnderTest(home=&quot;user01&quot;, name=&quot;calendar&quot;)
+        yield self.commit()
+
+        shared_name = yield self._createShare()
+
+        shared = yield self.calendarUnderTest(home=&quot;user01&quot;, name=&quot;calendar&quot;)
+        results = yield shared.sharingBindRecords()
+        self.assertEqual(len(results), 1)
+        self.assertEqual(results.keys(), [&quot;user02&quot;])
+        self.assertEqual(results[&quot;user02&quot;].calendarResourceName, shared_name)
+
+
+    @inlineCallbacks
+    def test_sharedToBindRecords(self):
+
+        yield self.calendarUnderTest(home=&quot;user01&quot;, name=&quot;calendar&quot;)
+        yield self.commit()
+
+        shared_name = yield self._createShare()
+
+        home = yield self.homeUnderTest(name=&quot;user02&quot;)
+        results = yield home.sharedToBindRecords()
+        self.assertEqual(len(results), 1)
+        self.assertEqual(results.keys(), [&quot;user01&quot;])
+        sharedRecord = results[&quot;user01&quot;][0]
+        ownerRecord = results[&quot;user01&quot;][1]
+        metadataRecord = results[&quot;user01&quot;][2]
+        self.assertEqual(ownerRecord.calendarResourceName, &quot;calendar&quot;)
+        self.assertEqual(sharedRecord.calendarResourceName, shared_name)
+        self.assertEqual(metadataRecord.supportedComponents, None)
+
+
+
</ins><span class="cx"> class GroupSharingTests(BaseSharingTests):
</span><span class="cx">     &quot;&quot;&quot;
</span><span class="cx">     Test store-based group sharing.
</span><span class="lines">@@ -619,7 +654,7 @@
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def _check_notifications(self, uid, items):
</span><del>-        notifyHome = yield self.transactionUnderTest().notificationsWithUID(uid)
</del><ins>+        notifyHome = yield self.transactionUnderTest().notificationsWithUID(uid, create=True)
</ins><span class="cx">         notifications = yield notifyHome.listNotificationObjects()
</span><span class="cx">         self.assertEqual(set(notifications), set(items))
</span><span class="cx"> 
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreutilpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/util.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/util.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/util.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -522,6 +522,11 @@
</span><span class="cx">             self._contentType = http_headers.MimeType.fromString(getType(self._attachment.name(), self.contentTypes))
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    def resetDetails(self, contentType, dispositionName):
+        self._contentType = contentType
+        self._dispositionName = dispositionName
+
+
</ins><span class="cx">     def write(self, data):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Children must override this to actually write the data, but should
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavicalendarstorepy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/icalendarstore.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/icalendarstore.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/icalendarstore.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -906,7 +906,7 @@
</span><span class="cx">                             is done (more than RAW).
</span><span class="cx"> 
</span><span class="cx">     RAW                   - store the supplied data as-is without any processing or validation. This is used
</span><del>-                            for unit testing purposes only.
</del><ins>+                            for unit testing purposes only, or during migration.
</ins><span class="cx">     &quot;&quot;&quot;
</span><span class="cx"> 
</span><span class="cx">     NORMAL = NamedConstant()
</span></span></pre></div>
<a id="CalendarServertrunktxdavcarddavdatastoresqlpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/carddav/datastore/sql.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/carddav/datastore/sql.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/carddav/datastore/sql.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -36,11 +36,10 @@
</span><span class="cx"> from txweb2.http_headers import MimeType
</span><span class="cx"> from txweb2.responsecode import FORBIDDEN
</span><span class="cx"> 
</span><del>-from twisted.internet.defer import inlineCallbacks, returnValue
</del><ins>+from twisted.internet.defer import inlineCallbacks, returnValue, succeed
</ins><span class="cx"> from twisted.python import hashlib
</span><span class="cx"> 
</span><span class="cx"> from twistedcaldav.config import config
</span><del>-from twistedcaldav.memcacher import Memcacher
</del><span class="cx"> from twistedcaldav.vcard import Component as VCard, InvalidVCardDataError, Property, \
</span><span class="cx">     vCardProductID
</span><span class="cx"> 
</span><span class="lines">@@ -53,11 +52,12 @@
</span><span class="cx">     KindChangeNotAllowedError
</span><span class="cx"> from txdav.common.datastore.query.generator import SQLQueryGenerator
</span><span class="cx"> from txdav.common.datastore.sql import CommonHome, CommonHomeChild, \
</span><del>-    CommonObjectResource, EADDRESSBOOKTYPE, SharingMixIn, SharingInvitation
</del><ins>+    CommonObjectResource, EADDRESSBOOKTYPE, SharingMixIn
</ins><span class="cx"> from txdav.common.datastore.sql_tables import _ABO_KIND_PERSON, \
</span><span class="cx">     _ABO_KIND_GROUP, _ABO_KIND_RESOURCE, _ABO_KIND_LOCATION, schema, \
</span><span class="cx">     _BIND_MODE_OWN, _BIND_MODE_WRITE, _BIND_STATUS_ACCEPTED, \
</span><span class="cx">     _BIND_STATUS_INVITED, _BIND_MODE_INDIRECT, _BIND_STATUS_DECLINED
</span><ins>+from txdav.common.datastore.sql_sharing import SharingInvitation
</ins><span class="cx"> from txdav.common.icommondatastore import InternalDataStoreError, \
</span><span class="cx">     InvalidUIDError, UIDExistsError, ObjectResourceTooBigError, \
</span><span class="cx">     InvalidObjectResourceError, InvalidComponentForStoreError, \
</span><span class="lines">@@ -77,20 +77,20 @@
</span><span class="cx"> 
</span><span class="cx">     # structured tables.  (new, preferred)
</span><span class="cx">     _homeSchema = schema.ADDRESSBOOK_HOME
</span><del>-    _bindSchema = schema.SHARED_ADDRESSBOOK_BIND
</del><span class="cx">     _homeMetaDataSchema = schema.ADDRESSBOOK_HOME_METADATA
</span><ins>+
+    _bindSchema = schema.SHARED_ADDRESSBOOK_BIND
</ins><span class="cx">     _revisionsSchema = schema.ADDRESSBOOK_OBJECT_REVISIONS
</span><span class="cx">     _objectSchema = schema.ADDRESSBOOK_OBJECT
</span><span class="cx"> 
</span><span class="cx">     _notifierPrefix = &quot;CardDAV&quot;
</span><span class="cx">     _dataVersionKey = &quot;ADDRESSBOOK-DATAVERSION&quot;
</span><del>-    _cacher = Memcacher(&quot;SQL.adbkhome&quot;, pickle=True, key_normalization=False)
</del><span class="cx"> 
</span><span class="cx"> 
</span><del>-    def __init__(self, transaction, ownerUID, authzUID=None):
</del><ins>+    def __init__(self, transaction, homeData, authzUID=None):
</ins><span class="cx"> 
</span><del>-        super(AddressBookHome, self).__init__(transaction, ownerUID, authzUID=authzUID)
</del><span class="cx">         self._addressbookPropertyStoreID = None
</span><ins>+        super(AddressBookHome, self).__init__(transaction, homeData, authzUID=authzUID)
</ins><span class="cx">         self._addressbook = None
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="lines">@@ -116,6 +116,7 @@
</span><span class="cx">         return (
</span><span class="cx">             cls._homeSchema.RESOURCE_ID,
</span><span class="cx">             cls._homeSchema.OWNER_UID,
</span><ins>+            cls._homeSchema.STATUS,
</ins><span class="cx">             cls._homeSchema.ADDRESSBOOK_PROPERTY_STORE_ID,
</span><span class="cx">         )
</span><span class="cx"> 
</span><span class="lines">@@ -131,19 +132,20 @@
</span><span class="cx">         return (
</span><span class="cx">             &quot;_resourceID&quot;,
</span><span class="cx">             &quot;_ownerUID&quot;,
</span><ins>+            &quot;_status&quot;,
</ins><span class="cx">             &quot;_addressbookPropertyStoreID&quot;,
</span><span class="cx">         )
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><del>-    def initFromStore(self, no_cache=False):
</del><ins>+    def initFromStore(self):
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Initialize this object from the store. We read in and cache all the
</span><span class="cx">         extra meta-data from the DB to avoid having to do DB queries for those
</span><span class="cx">         individually later.
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx"> 
</span><del>-        result = yield super(AddressBookHome, self).initFromStore(no_cache)
</del><ins>+        result = yield super(AddressBookHome, self).initFromStore()
</ins><span class="cx">         if result is not None:
</span><span class="cx">             # Created owned address book
</span><span class="cx">             addressbook = AddressBook(
</span><span class="lines">@@ -167,36 +169,23 @@
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def remove(self):
</span><del>-        ah = schema.ADDRESSBOOK_HOME
</del><span class="cx">         ahb = schema.SHARED_ADDRESSBOOK_BIND
</span><del>-        aor = schema.ADDRESSBOOK_OBJECT_REVISIONS
-        rp = schema.RESOURCE_PROPERTY
</del><span class="cx"> 
</span><span class="cx">         yield Delete(
</span><span class="cx">             From=ahb,
</span><span class="cx">             Where=ahb.ADDRESSBOOK_HOME_RESOURCE_ID == self._resourceID,
</span><span class="cx">         ).on(self._txn)
</span><span class="cx"> 
</span><del>-        yield Delete(
-            From=aor,
-            Where=aor.ADDRESSBOOK_HOME_RESOURCE_ID == self._resourceID,
-        ).on(self._txn)
</del><ins>+        yield super(AddressBookHome, self).remove()
</ins><span class="cx"> 
</span><del>-        yield Delete(
-            From=ah,
-            Where=ah.RESOURCE_ID == self._resourceID,
-        ).on(self._txn)
</del><span class="cx"> 
</span><del>-        yield Delete(
-            From=rp,
-            Where=(rp.RESOURCE_ID == self._resourceID).Or(
-                rp.RESOURCE_ID == self._addressbookPropertyStoreID
-            )
-        ).on(self._txn)
</del><ins>+    def removeAllChildren(self):
+        &quot;&quot;&quot;
+        This is a NoOp for the single child address book home
+        &quot;&quot;&quot;
+        return succeed(None)
</ins><span class="cx"> 
</span><del>-        yield self._cacher.delete(str(self._ownerUID))
</del><span class="cx"> 
</span><del>-
</del><span class="cx">     @inlineCallbacks
</span><span class="cx">     def createdHome(self):
</span><span class="cx">         yield self.addressbook()._initSyncToken()
</span><span class="lines">@@ -473,7 +462,7 @@
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><span class="cx">     @inlineCallbacks
</span><del>-    def _getDBDataIndirect(cls, home, name, resourceID, externalID):
</del><ins>+    def _getDBDataIndirect(cls, home, name, resourceID, bindUID):
</ins><span class="cx"> 
</span><span class="cx">         # Get the bind row data
</span><span class="cx">         row = None
</span><span class="lines">@@ -503,7 +492,7 @@
</span><span class="cx">         overallBindStatus = _BIND_STATUS_INVITED
</span><span class="cx">         minBindRevision = None
</span><span class="cx">         for row in rows:
</span><del>-            bindMode, homeID, resourceGroupID, externalID, name, bindStatus, bindRevision, bindMessage = row[:cls.bindColumnCount] #@UnusedVariable
</del><ins>+            homeID, resourceGroupID, name, bindMode, bindStatus, bindRevision, bindUID, bindMessage = row[:cls.bindColumnCount] #@UnusedVariable
</ins><span class="cx">             if groupID is None:
</span><span class="cx">                 groupID = resourceGroupID
</span><span class="cx">             minBindRevision = min(minBindRevision, bindRevision) if minBindRevision is not None else bindRevision
</span><span class="lines">@@ -543,9 +532,9 @@
</span><span class="cx">         returnValue((bindData, additionalBindData, metadataData, ownerHome,))
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-    def __init__(self, home, name, resourceID, mode, status, revision=0, message=None, ownerHome=None, ownerName=None, externalID=None):
</del><ins>+    def __init__(self, home, name, resourceID, mode, status, revision=0, message=None, ownerHome=None, ownerName=None, bindUID=None):
</ins><span class="cx">         ownerName = ownerHome.addressbook().name() if ownerHome else None
</span><del>-        super(AddressBook, self).__init__(home, name, resourceID, mode, status, revision=revision, message=message, ownerHome=ownerHome, ownerName=ownerName, externalID=externalID)
</del><ins>+        super(AddressBook, self).__init__(home, name, resourceID, mode, status, revision=revision, message=message, ownerHome=ownerHome, ownerName=ownerName, bindUID=bindUID)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     def __repr__(self):
</span><span class="lines">@@ -875,7 +864,7 @@
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><del>-    def create(cls, home, name, externalID=None):
</del><ins>+    def create(cls, home, name, bindUID=None):
</ins><span class="cx">         if name == home.addressbook().name():
</span><span class="cx">             # raise HomeChildNameAlreadyExistsError
</span><span class="cx">             pass
</span><span class="lines">@@ -1141,7 +1130,7 @@
</span><span class="cx">             home._txn, homeID=home._resourceID
</span><span class="cx">         )
</span><span class="cx">         for groupRow in groupRows:
</span><del>-            bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = groupRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</del><ins>+            homeID, resourceID, bindName, bindMode, bindStatus, bindRevision, bindUID, bindMessage = groupRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</ins><span class="cx">             ownerAddressBookID = yield AddressBookObject.ownerAddressBookIDFromGroupID(home._txn, resourceID)
</span><span class="cx">             ownerHome = yield home._txn.homeWithResourceID(home._homeType, ownerAddressBookID)
</span><span class="cx">             names |= set([ownerHome.uid()])
</span><span class="lines">@@ -1169,7 +1158,7 @@
</span><span class="cx">         )
</span><span class="cx">         # get ownerHomeIDs
</span><span class="cx">         for dataRow in dataRows:
</span><del>-            bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = dataRow[:cls.bindColumnCount] #@UnusedVariable
</del><ins>+            homeID, resourceID, bindName, bindMode, bindStatus, bindRevision, bindUID, bindMessage = dataRow[:cls.bindColumnCount] #@UnusedVariable
</ins><span class="cx">             ownerHome = yield home.ownerHomeWithChildID(resourceID)
</span><span class="cx">             ownerHomeToDataRowMap[ownerHome] = dataRow
</span><span class="cx"> 
</span><span class="lines">@@ -1178,12 +1167,16 @@
</span><span class="cx">             home._txn, homeID=home._resourceID
</span><span class="cx">         )
</span><span class="cx">         for groupBindRow in groupBindRows:
</span><del>-            bindMode, homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</del><ins>+            homeID, resourceID, name, bindMode, bindStatus, bindRevision, bindUID, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</ins><span class="cx">             ownerAddressBookID = yield AddressBookObject.ownerAddressBookIDFromGroupID(home._txn, resourceID)
</span><span class="cx">             ownerHome = yield home.ownerHomeWithChildID(ownerAddressBookID)
</span><span class="cx">             if ownerHome not in ownerHomeToDataRowMap:
</span><del>-                groupBindRow[0] = _BIND_MODE_INDIRECT
-                groupBindRow[3:7] = 4 * [None]  # bindName, bindStatus, bindRevision, bindMessage
</del><ins>+                groupBindRow[cls.bindColumns().index(cls._bindSchema.BIND_MODE)] = _BIND_MODE_INDIRECT
+                groupBindRow[cls.bindColumns().index(cls._bindSchema.RESOURCE_NAME)] = None
+                groupBindRow[cls.bindColumns().index(cls._bindSchema.BIND_STATUS)] = None
+                groupBindRow[cls.bindColumns().index(cls._bindSchema.BIND_REVISION)] = None
+                groupBindRow[cls.bindColumns().index(cls._bindSchema.BIND_UID)] = None
+                groupBindRow[cls.bindColumns().index(cls._bindSchema.MESSAGE)] = None
</ins><span class="cx">                 ownerHomeToDataRowMap[ownerHome] = groupBindRow
</span><span class="cx"> 
</span><span class="cx">         if ownerHomeToDataRowMap:
</span><span class="lines">@@ -1272,7 +1265,7 @@
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><span class="cx">     @inlineCallbacks
</span><del>-    def _indirectObjectWithNameOrID(cls, home, name=None, resourceID=None, externalID=None, accepted=True):
</del><ins>+    def _indirectObjectWithNameOrID(cls, home, name=None, resourceID=None, bindUID=None, accepted=True):
</ins><span class="cx">         # replaces objectWithName()
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Synthesize and indirect child for matching name or id based on whether shared groups exist.
</span><span class="lines">@@ -1285,7 +1278,7 @@
</span><span class="cx">             exists.
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx"> 
</span><del>-        dbData = yield cls._getDBDataIndirect(home, name, resourceID, externalID)
</del><ins>+        dbData = yield cls._getDBDataIndirect(home, name, resourceID, bindUID)
</ins><span class="cx">         if dbData is None:
</span><span class="cx">             returnValue(None)
</span><span class="cx">         bindData, additionalBindData, metadataData, ownerHome = dbData
</span><span class="lines">@@ -1423,7 +1416,7 @@
</span><span class="cx">             readWriteGroupIDs = set()
</span><span class="cx">             readOnlyGroupIDs = set()
</span><span class="cx">             for groupBindRow in groupBindRows:
</span><del>-                bindMode, homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</del><ins>+                homeID, resourceID, name, bindMode, bindStatus, bindRevision, bindUID, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</ins><span class="cx">                 if bindMode == _BIND_MODE_WRITE:
</span><span class="cx">                     readWriteGroupIDs.add(resourceID)
</span><span class="cx">                 else:
</span><span class="lines">@@ -1484,7 +1477,7 @@
</span><span class="cx">         readWriteGroupIDs = []
</span><span class="cx">         readOnlyGroupIDs = []
</span><span class="cx">         for groupBindRow in groupBindRows:
</span><del>-            bindMode, homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</del><ins>+            homeID, resourceID, name, bindMode, bindStatus, bindRevision, bindUID, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</ins><span class="cx">             if bindMode == _BIND_MODE_WRITE:
</span><span class="cx">                 readWriteGroupIDs.append(resourceID)
</span><span class="cx">             else:
</span><span class="lines">@@ -1602,10 +1595,10 @@
</span><span class="cx">                 subt,
</span><span class="cx">                 homeID=shareeHome._resourceID,
</span><span class="cx">                 resourceID=self._resourceID,
</span><del>-                externalID=None,
</del><span class="cx">                 name=newName,
</span><span class="cx">                 mode=mode,
</span><span class="cx">                 bindStatus=status,
</span><ins>+                bindUID=None,
</ins><span class="cx">                 message=summary
</span><span class="cx">             )
</span><span class="cx">             returnValue(newName)
</span><span class="lines">@@ -1920,7 +1913,7 @@
</span><span class="cx">         yield child._loadPropertyStore(propstore)
</span><span class="cx"> 
</span><span class="cx">         if groupBindData:
</span><del>-            bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = groupBindData[:AddressBookObject.bindColumnCount] #@UnusedVariable
</del><ins>+            homeID, resourceID, bindName, bindMode, bindStatus, bindRevision, bindUID, bindMessage = groupBindData[:AddressBookObject.bindColumnCount] #@UnusedVariable
</ins><span class="cx">             child._bindMode = bindMode
</span><span class="cx">             child._bindStatus = bindStatus
</span><span class="cx">             child._bindMessage = bindMessage
</span><span class="lines">@@ -2021,7 +2014,7 @@
</span><span class="cx">         self._bindName = None
</span><span class="cx">         self._bindRevision = None
</span><span class="cx">         super(AddressBookObject, self).__init__(addressbook, name, uid, resourceID, options)
</span><del>-        self._externalID = None
</del><ins>+        self._bindUID = None
</ins><span class="cx">         self._options = {} if options is None else options
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="lines">@@ -2230,7 +2223,7 @@
</span><span class="cx">         )
</span><span class="cx">         if groupBindRows:
</span><span class="cx">             groupBindRow = groupBindRows[0]
</span><del>-            bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</del><ins>+            homeID, resourceID, bindName, bindMode, bindStatus, bindRevision, bindUID, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</ins><span class="cx"> 
</span><span class="cx">             if accepted is not None and (bindStatus == _BIND_STATUS_ACCEPTED) != bool(accepted):
</span><span class="cx">                 returnValue(None)
</span></span></pre></div>
<a id="CalendarServertrunktxdavcarddavdatastoresql_externalpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/carddav/datastore/sql_external.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/carddav/datastore/sql_external.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/carddav/datastore/sql_external.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -18,8 +18,6 @@
</span><span class="cx"> SQL backend for CardDAV storage when resources are external.
</span><span class="cx"> &quot;&quot;&quot;
</span><span class="cx"> 
</span><del>-from twisted.internet.defer import succeed
-
</del><span class="cx"> from twext.python.log import Logger
</span><span class="cx"> 
</span><span class="cx"> from txdav.carddav.datastore.sql import AddressBookHome, AddressBook, \
</span><span class="lines">@@ -31,10 +29,10 @@
</span><span class="cx"> 
</span><span class="cx"> class AddressBookHomeExternal(CommonHomeExternal, AddressBookHome):
</span><span class="cx"> 
</span><del>-    def __init__(self, transaction, ownerUID, resourceID):
</del><ins>+    def __init__(self, transaction, homeData):
</ins><span class="cx"> 
</span><del>-        AddressBookHome.__init__(self, transaction, ownerUID)
-        CommonHomeExternal.__init__(self, transaction, ownerUID, resourceID)
</del><ins>+        AddressBookHome.__init__(self, transaction, homeData)
+        CommonHomeExternal.__init__(self, transaction, homeData)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     def hasAddressBookResourceUIDSomewhereElse(self, uid, ok_object, mode):
</span><span class="lines">@@ -51,13 +49,6 @@
</span><span class="cx">         raise AssertionError(&quot;CommonHomeExternal: not supported&quot;)
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-    def createdHome(self):
-        &quot;&quot;&quot;
-        No children - make this a no-op.
-        &quot;&quot;&quot;
-        return succeed(None)
-
-
</del><span class="cx">     def addressbook(self):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         No children.
</span></span></pre></div>
<a id="CalendarServertrunktxdavcarddavdatastoretesttest_sqlpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/carddav/datastore/test/test_sql.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/carddav/datastore/test/test_sql.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/carddav/datastore/test/test_sql.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -70,7 +70,7 @@
</span><span class="cx">         populateTxn = self.storeUnderTest().newTransaction()
</span><span class="cx">         for homeUID in self.requirements:
</span><span class="cx">             addressbooks = self.requirements[homeUID]
</span><del>-            home = yield populateTxn.addressbookHomeWithUID(homeUID, True)
</del><ins>+            home = yield populateTxn.addressbookHomeWithUID(homeUID, create=True)
</ins><span class="cx">             if addressbooks is not None:
</span><span class="cx">                 addressbook = home.addressbook()
</span><span class="cx"> 
</span><span class="lines">@@ -364,13 +364,13 @@
</span><span class="cx">         txn2 = addressbookStore.newTransaction()
</span><span class="cx"> 
</span><span class="cx">         notification_uid1_1 = yield txn1.notificationsWithUID(
</span><del>-            &quot;uid1&quot;,
</del><ins>+            &quot;uid1&quot;, create=True,
</ins><span class="cx">         )
</span><span class="cx"> 
</span><span class="cx">         @inlineCallbacks
</span><span class="cx">         def _defer_notification_uid1_2():
</span><span class="cx">             notification_uid1_2 = yield txn2.notificationsWithUID(
</span><del>-                &quot;uid1&quot;,
</del><ins>+                &quot;uid1&quot;, create=True,
</ins><span class="cx">             )
</span><span class="cx">             yield txn2.commit()
</span><span class="cx">             returnValue(notification_uid1_2)
</span></span></pre></div>
<a id="CalendarServertrunktxdavcarddavdatastoretesttest_sql_sharingpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/carddav/datastore/test/test_sql_sharing.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/carddav/datastore/test/test_sql_sharing.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/carddav/datastore/test/test_sql_sharing.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -45,7 +45,7 @@
</span><span class="cx">         for homeUID in self.requirements:
</span><span class="cx">             addressbooks = self.requirements[homeUID]
</span><span class="cx">             if addressbooks is not None:
</span><del>-                home = yield populateTxn.addressbookHomeWithUID(homeUID, True)
</del><ins>+                home = yield populateTxn.addressbookHomeWithUID(homeUID, create=True)
</ins><span class="cx">                 addressbook = home.addressbook()
</span><span class="cx"> 
</span><span class="cx">                 addressbookObjNames = addressbooks[addressbook.name()]
</span><span class="lines">@@ -198,7 +198,7 @@
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def _check_notifications(self, home, items):
</span><del>-        notifyHome = yield self.transactionUnderTest().notificationsWithUID(home)
</del><ins>+        notifyHome = yield self.transactionUnderTest().notificationsWithUID(home, create=True)
</ins><span class="cx">         notifications = yield notifyHome.listNotificationObjects()
</span><span class="cx">         self.assertEqual(set(notifications), set(items))
</span><span class="cx"> 
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorefilepy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/file.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/file.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/file.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -44,7 +44,8 @@
</span><span class="cx"> from txdav.common.icommondatastore import HomeChildNameNotAllowedError, \
</span><span class="cx">     HomeChildNameAlreadyExistsError, NoSuchHomeChildError, \
</span><span class="cx">     InternalDataStoreError, ObjectResourceNameNotAllowedError, \
</span><del>-    ObjectResourceNameAlreadyExistsError, NoSuchObjectResourceError
</del><ins>+    ObjectResourceNameAlreadyExistsError, NoSuchObjectResourceError, \
+    ECALENDARTYPE, EADDRESSBOOKTYPE
</ins><span class="cx"> from txdav.common.idirectoryservice import IStoreDirectoryService
</span><span class="cx"> from txdav.common.inotifications import INotificationCollection, \
</span><span class="cx">     INotificationObject
</span><span class="lines">@@ -64,16 +65,6 @@
</span><span class="cx"> from twistedcaldav.sql import AbstractSQLDatabase, db_prefix
</span><span class="cx"> import os
</span><span class="cx"> 
</span><del>-ECALENDARTYPE = 0
-EADDRESSBOOKTYPE = 1
-
-# Labels used to identify the class of resource being modified, so that
-# notification systems can target the correct application
-NotifierPrefixes = {
-    ECALENDARTYPE : &quot;CalDAV&quot;,
-    EADDRESSBOOKTYPE : &quot;CardDAV&quot;,
-}
-
</del><span class="cx"> TOPPATHS = (
</span><span class="cx">     &quot;calendars&quot;,
</span><span class="cx">     &quot;addressbooks&quot;
</span><span class="lines">@@ -343,15 +334,15 @@
</span><span class="cx">         CommonStoreTransaction._homeClass[EADDRESSBOOKTYPE] = AddressBookHome
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-    def calendarHomeWithUID(self, uid, create=False):
-        return self.homeWithUID(ECALENDARTYPE, uid, create=create)
</del><ins>+    def calendarHomeWithUID(self, uid, status=None, create=False):
+        return self.homeWithUID(ECALENDARTYPE, uid, status=status, create=create)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><del>-    def addressbookHomeWithUID(self, uid, create=False):
-        return self.homeWithUID(EADDRESSBOOKTYPE, uid, create=create)
</del><ins>+    def addressbookHomeWithUID(self, uid, status=None, create=False):
+        return self.homeWithUID(EADDRESSBOOKTYPE, uid, status=status, create=create)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><del>-    def _determineMemo(self, storeType, uid, create=False):
</del><ins>+    def _determineMemo(self, storeType, uid, status=None, create=False):
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Determine the memo dictionary to use for homeWithUID.
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="lines">@@ -374,7 +365,7 @@
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @memoizedKey(&quot;uid&quot;, _determineMemo, deferredResult=False)
</span><del>-    def homeWithUID(self, storeType, uid, create=False):
</del><ins>+    def homeWithUID(self, storeType, uid, status=None, create=False):
</ins><span class="cx">         if uid.startswith(&quot;.&quot;):
</span><span class="cx">             return None
</span><span class="cx"> 
</span><span class="lines">@@ -385,7 +376,7 @@
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @memoizedKey(&quot;uid&quot;, &quot;_notificationHomes&quot;, deferredResult=False)
</span><del>-    def notificationsWithUID(self, uid, home=None):
</del><ins>+    def notificationsWithUID(self, uid, home=None, create=False):
</ins><span class="cx"> 
</span><span class="cx">         if home is None:
</span><span class="cx">             home = self.homeWithUID(self._notificationHomeType, uid, create=True)
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingattachmentspy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/attachments.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/attachments.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/attachments.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -15,6 +15,9 @@
</span><span class="cx"> ##
</span><span class="cx"> 
</span><span class="cx"> from twisted.internet.defer import inlineCallbacks, returnValue
</span><ins>+from txdav.caldav.icalendarstore import InvalidAttachmentOperation
+from txdav.common.datastore.podding.util import UtilityConduitMixin
+from txweb2.http_headers import generateContentType
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx"> class AttachmentsConduitMixin(object):
</span><span class="lines">@@ -150,3 +153,48 @@
</span><span class="cx">             request[&quot;rids&quot;],
</span><span class="cx">             request[&quot;managedID&quot;],
</span><span class="cx">         )
</span><ins>+
+
+    @inlineCallbacks
+    def send_get_attachment_data(self, home, attachment_id, stream):
+        &quot;&quot;&quot;
+        Managed attachment readAttachmentData call. We are using streams on the sender and the receiver
+        side to avoid reading the whole attachment into memory.
+
+        @param home: the home whose attachment is being read
+        @type home: L{CalendarHome}
+        @param attachment_id: attachment-id to get
+        @type attachment_id: C{str}
+        @param stream: attachment data stream to write to
+        @type stream: L{IStream}
+        &quot;&quot;&quot;
+
+        actionName = &quot;get-attachment-data&quot;
+        txn, request, server = yield self._getRequestForStoreObject(actionName, home, False)
+        request[&quot;attachmentID&quot;] = attachment_id
+
+        response = yield self.sendRequestToServer(txn, server, request, writeStream=stream)
+        returnValue(response)
+
+
+    @inlineCallbacks
+    def recv_get_attachment_data(self, txn, request, stream):
+        &quot;&quot;&quot;
+        Process an getAttachmentData cross-pod request. Request arguments as per L{send_get_attachment_data}.
+
+        @param request: request arguments
+        @type request: C{dict}
+        &quot;&quot;&quot;
+
+        home, _ignore = yield self._getStoreObjectForRequest(txn, request)
+        attachment = yield home.getAttachmentByID(request[&quot;attachmentID&quot;])
+        if attachment is None:
+            raise InvalidAttachmentOperation(&quot;Attachment is missing: {}&quot;.format(request[&quot;attachmentID&quot;]))
+
+        attachment.retrieve(stream)
+        returnValue((generateContentType(attachment.contentType()), attachment.name(),))
+
+
+# Calls on L{CommonHome} objects
+UtilityConduitMixin._make_simple_action(AttachmentsConduitMixin, &quot;home_get_all_attachments&quot;, &quot;getAllAttachments&quot;, classMethod=False, transform_recv_result=UtilityConduitMixin._to_serialize_list)
+UtilityConduitMixin._make_simple_action(AttachmentsConduitMixin, &quot;home_get_attachment_links&quot;, &quot;getAttachmentLinks&quot;, classMethod=False, transform_recv_result=UtilityConduitMixin._to_serialize_list)
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingconduitpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/conduit.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/conduit.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/conduit.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -19,14 +19,11 @@
</span><span class="cx"> from txdav.common.idirectoryservice import DirectoryRecordNotFoundError
</span><span class="cx"> from txdav.common.datastore.podding.attachments import AttachmentsConduitMixin
</span><span class="cx"> from txdav.common.datastore.podding.base import FailedCrossPodRequestError
</span><del>-from txdav.common.datastore.podding.directory import (
-    DirectoryPoddingConduitMixin
-)
</del><ins>+from txdav.common.datastore.podding.directory import DirectoryPoddingConduitMixin
+from txdav.common.datastore.podding.request import ConduitRequest
+from txdav.common.datastore.podding.sharing_invites import SharingInvitesConduitMixin
</ins><span class="cx"> from txdav.common.datastore.podding.store_api import StoreAPIConduitMixin
</span><del>-from txdav.common.datastore.podding.request import ConduitRequest
-from txdav.common.datastore.podding.sharing_invites import (
-    SharingInvitesConduitMixin
-)
</del><ins>+from txdav.common.datastore.podding.util import UtilityConduitMixin
</ins><span class="cx"> 
</span><span class="cx"> from twisted.internet.defer import inlineCallbacks, returnValue
</span><span class="cx"> from twisted.python.reflect import namedClass
</span><span class="lines">@@ -36,6 +33,7 @@
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx"> class PoddingConduit(
</span><ins>+    UtilityConduitMixin,
</ins><span class="cx">     StoreAPIConduitMixin,
</span><span class="cx">     AttachmentsConduitMixin,
</span><span class="cx">     SharingInvitesConduitMixin,
</span><span class="lines">@@ -44,33 +42,29 @@
</span><span class="cx">     &quot;&quot;&quot;
</span><span class="cx">     This class is the API/RPC bridge between cross-pod requests and the store.
</span><span class="cx"> 
</span><del>-    Each cross-pod request/response is described by a Python C{dict} that is
-    serialized to JSON for the HTTP request/response.
</del><ins>+    Each cross-pod request/response is described by a Python C{dict} that is serialized
+    to JSON for the HTTP request/response.
</ins><span class="cx"> 
</span><del>-    Each request C{dict} has an &quot;action&quot; key that indicates what call is being
-    made, and the other keys are arguments to that call.
</del><ins>+    Each request C{dict} has an &quot;action&quot; key that indicates what call is being made, and
+    the other keys are arguments to that call.
</ins><span class="cx"> 
</span><del>-    Each response C{dict} has a &quot;result&quot; key that indicates the call result,
-    and other optional keys for any values returned by the call.
</del><ins>+    Each response C{dict} has a &quot;result&quot; key that indicates the call result, and other
+    optional keys for any values returned by the call.
</ins><span class="cx"> 
</span><del>-    The conduit provides two methods for each action: one for the sending side
-    and one for the receiving side, called &quot;send_{action}&quot; and &quot;recv_{action}&quot;,
-    respectively, where {action} is the action value.
</del><ins>+    The conduit provides two methods for each action: one for the sending side and one for
+    the receiving side, called &quot;send_{action}&quot; and &quot;recv_{action}&quot;, respectively, where
+    {action} is the action value.
</ins><span class="cx"> 
</span><del>-    The &quot;send_{action}&quot; calls each have a set of arguments specific to the call
-    itself.
-    The code takes care of packing that into a C{dict} and sending to the
-    appropriate pod.
</del><ins>+    The &quot;send_{action}&quot; calls each have a set of arguments specific to the call itself. The
+    code takes care of packing that into a C{dict} and sending to the appropriate pod.
</ins><span class="cx"> 
</span><del>-    The &quot;recv_{action}&quot; calls take a single C{dict} argument that is the
-    deserialized JSON data from the incoming request. The return value is a
-    C{dict} with the result.
</del><ins>+    The &quot;recv_{action}&quot; calls take a single C{dict} argument that is the deserialized JSON
+    data from the incoming request. The return value is a C{dict} with the result.
</ins><span class="cx"> 
</span><del>-    Some simple forms of send_/recv_ methods can be auto-generated to simplify
-    coding.
</del><ins>+    Some simple forms of send_/recv_ methods can be auto-generated to simplify coding.
</ins><span class="cx"> 
</span><del>-    Actual implementations of this will be done via mix-ins for the different
-    sub-systems using the conduit.
</del><ins>+    Actual implementations of this will be done via mix-ins for the different sub-systems using
+    the conduit.
</ins><span class="cx">     &quot;&quot;&quot;
</span><span class="cx"> 
</span><span class="cx">     conduitRequestClass = ConduitRequest
</span><span class="lines">@@ -80,6 +74,7 @@
</span><span class="cx">         @param store: the L{CommonDataStore} in use.
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         self.store = store
</span><ins>+        self.streamingActions = (&quot;get-attachment-data&quot;,)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="lines">@@ -88,12 +83,9 @@
</span><span class="cx">         Verify that the specified uids are valid for the request and return the
</span><span class="cx">         matching directory records.
</span><span class="cx"> 
</span><del>-        @param source_uid: UID for the user on whose behalf the request is
-            being made
</del><ins>+        @param source_uid: UID for the user on whose behalf the request is being made
</ins><span class="cx">         @type source_uid: C{str}
</span><del>-
-        @param destination_uid: UID for the user to whom the request is being
-            sent
</del><ins>+        @param destination_uid: UID for the user to whom the request is being sent
</ins><span class="cx">         @type destination_uid: C{str}
</span><span class="cx"> 
</span><span class="cx">         @return: L{Deferred} resulting in C{tuple} of L{IStoreDirectoryRecord}
</span><span class="lines">@@ -101,59 +93,57 @@
</span><span class="cx"> 
</span><span class="cx">         source = yield self.store.directoryService().recordWithUID(source_uid)
</span><span class="cx">         if source is None:
</span><del>-            raise DirectoryRecordNotFoundError(
-                &quot;Cross-pod source: {}&quot;.format(source_uid)
-            )
</del><ins>+            raise DirectoryRecordNotFoundError(&quot;Cross-pod source: {}&quot;.format(source_uid))
</ins><span class="cx">         if not source.thisServer():
</span><del>-            raise FailedCrossPodRequestError(
-                &quot;Cross-pod source not on this server: {}&quot;.format(source_uid)
-            )
</del><ins>+            raise FailedCrossPodRequestError(&quot;Cross-pod source not on this server: {}&quot;.format(source_uid))
</ins><span class="cx"> 
</span><del>-        destination = yield self.store.directoryService().recordWithUID(
-            destination_uid
-        )
</del><ins>+        destination = yield self.store.directoryService().recordWithUID(destination_uid)
</ins><span class="cx">         if destination is None:
</span><del>-            raise DirectoryRecordNotFoundError(
-                &quot;Cross-pod destination: {}&quot;.format(destination_uid)
-            )
</del><ins>+            raise DirectoryRecordNotFoundError(&quot;Cross-pod destination: {}&quot;.format(destination_uid))
</ins><span class="cx">         if destination.thisServer():
</span><del>-            raise FailedCrossPodRequestError(
-                &quot;Cross-pod destination on this server: {}&quot;.format(
-                    destination_uid
-                )
-            )
</del><ins>+            raise FailedCrossPodRequestError(&quot;Cross-pod destination on this server: {}&quot;.format(destination_uid))
</ins><span class="cx"> 
</span><span class="cx">         returnValue((source, destination,))
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     def sendRequest(self, txn, recipient, data, stream=None, streamType=None):
</span><del>-        return self.sendRequestToServer(
-            txn, recipient.server(), data, stream, streamType
-        )
</del><ins>+        return self.sendRequestToServer(txn, recipient.server(), data, stream, streamType)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><del>-    def sendRequestToServer(
-        self, txn, server, data, stream=None, streamType=None
-    ):
</del><ins>+    def sendRequestToServer(self, txn, server, data, stream=None, streamType=None, writeStream=None):
</ins><span class="cx"> 
</span><del>-        request = self.conduitRequestClass(server, data, stream, streamType)
</del><ins>+        request = self.conduitRequestClass(server, data, stream, streamType, writeStream)
</ins><span class="cx">         try:
</span><span class="cx">             response = (yield request.doRequest(txn))
</span><span class="cx">         except Exception as e:
</span><del>-            raise FailedCrossPodRequestError(
-                &quot;Failed cross-pod request: {}&quot;.format(e)
-            )
</del><ins>+            raise FailedCrossPodRequestError(&quot;Failed cross-pod request: {}&quot;.format(e))
</ins><span class="cx">         if response[&quot;result&quot;] == &quot;exception&quot;:
</span><span class="cx">             raise namedClass(response[&quot;class&quot;])(response[&quot;details&quot;])
</span><span class="cx">         elif response[&quot;result&quot;] != &quot;ok&quot;:
</span><del>-            raise FailedCrossPodRequestError(
-                &quot;Cross-pod request failed: {}&quot;.format(response)
-            )
</del><ins>+            raise FailedCrossPodRequestError(&quot;Cross-pod request failed: {}&quot;.format(response))
</ins><span class="cx">         else:
</span><span class="cx">             returnValue(response.get(&quot;value&quot;))
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    def isStreamAction(self, data):
+        &quot;&quot;&quot;
+        Check to see if this is a request that will return a data stream rather than a JSON response.
+        e.g., this is used to retrieve attachment data on another pod.
+
+        @param data: the JSON data to process
+        @type data: C{dict}
+        &quot;&quot;&quot;
+        # Must have a dict with an &quot;action&quot; key
+        try:
+            action = data[&quot;action&quot;]
+        except (KeyError, TypeError) as e:
+            log.error(&quot;JSON data must have an object as its root with an 'action' attribute: {ex}\n{json}&quot;, ex=e, json=data)
+            return False
+
+        return action in self.streamingActions
+
+
</ins><span class="cx">     @inlineCallbacks
</span><span class="cx">     def processRequest(self, data):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="lines">@@ -166,15 +156,8 @@
</span><span class="cx">         try:
</span><span class="cx">             action = data[&quot;action&quot;]
</span><span class="cx">         except (KeyError, TypeError) as e:
</span><del>-            log.error(
-                &quot;JSON data must have an object as its root with an 'action' &quot;
-                &quot;attribute: {ex}\n{json}&quot;,
-                ex=e, json=data
-            )
-            raise FailedCrossPodRequestError(
-                &quot;JSON data must have an object as its root with an 'action' &quot;
-                &quot;attribute: {}\n{}&quot;.format(e, data,)
-            )
</del><ins>+            log.error(&quot;JSON data must have an object as its root with an 'action' attribute: {ex}\n{json}&quot;, ex=e, json=data)
+            raise FailedCrossPodRequestError(&quot;JSON data must have an object as its root with an 'action' attribute: {}\n{}&quot;.format(e, data,))
</ins><span class="cx"> 
</span><span class="cx">         if action == &quot;ping&quot;:
</span><span class="cx">             result = {&quot;result&quot;: &quot;ok&quot;}
</span><span class="lines">@@ -183,9 +166,7 @@
</span><span class="cx">         method = &quot;recv_{}&quot;.format(action.replace(&quot;-&quot;, &quot;_&quot;))
</span><span class="cx">         if not hasattr(self, method):
</span><span class="cx">             log.error(&quot;Unsupported action: {action}&quot;, action=action)
</span><del>-            raise FailedCrossPodRequestError(
-                &quot;Unsupported action: {}&quot;.format(action)
-            )
</del><ins>+            raise FailedCrossPodRequestError(&quot;Unsupported action: {}&quot;.format(action))
</ins><span class="cx"> 
</span><span class="cx">         # Need a transaction to work with
</span><span class="cx">         txn = self.store.newTransaction(repr(&quot;Conduit request&quot;))
</span><span class="lines">@@ -203,9 +184,7 @@
</span><span class="cx">             log.error(&quot;Failed action: {action}, {ex}&quot;, action=action, ex=e)
</span><span class="cx">             result = {
</span><span class="cx">                 &quot;result&quot;: &quot;exception&quot;,
</span><del>-                &quot;class&quot;: &quot;.&quot;.join((
-                    e.__class__.__module__, e.__class__.__name__,
-                )),
</del><ins>+                &quot;class&quot;: &quot;.&quot;.join((e.__class__.__module__, e.__class__.__name__,)),
</ins><span class="cx">                 &quot;details&quot;: str(e),
</span><span class="cx">             }
</span><span class="cx"> 
</span><span class="lines">@@ -213,3 +192,48 @@
</span><span class="cx">             yield txn.commit()
</span><span class="cx"> 
</span><span class="cx">         returnValue(result)
</span><ins>+
+
+    @inlineCallbacks
+    def processRequestStream(self, data, stream):
+        &quot;&quot;&quot;
+        Process the request.
+
+        @param data: the JSON data to process
+        @type data: C{dict}
+
+        @return: a L{tuple} of content-type and name, if successful, else a L{dict} for a JSON result
+        @rtype: L{tuple} of (L{str}, L{str}), or L{dict}
+        &quot;&quot;&quot;
+        # Must have a dict with an &quot;action&quot; key
+        try:
+            action = data[&quot;action&quot;]
+        except (KeyError, TypeError) as e:
+            log.error(&quot;JSON data must have an object as its root with an 'action' attribute: {ex}\n{json}&quot;, ex=e, json=data)
+            raise FailedCrossPodRequestError(&quot;JSON data must have an object as its root with an 'action' attribute: {}\n{}&quot;.format(e, data,))
+
+        method = &quot;recv_{}&quot;.format(action.replace(&quot;-&quot;, &quot;_&quot;))
+        if not hasattr(self, method):
+            log.error(&quot;Unsupported action: {action}&quot;, action=action)
+            raise FailedCrossPodRequestError(&quot;Unsupported action: {}&quot;.format(action))
+
+        # Need a transaction to work with
+        txn = self.store.newTransaction(repr(&quot;Conduit request&quot;))
+
+        # Do the actual request processing
+        try:
+            result = (yield getattr(self, method)(txn, data, stream))
+        except Exception as e:
+            # Send the exception over to the other side
+            yield txn.abort()
+            log.error(&quot;Failed action: {action}, {ex}&quot;, action=action, ex=e)
+            result = {
+                &quot;result&quot;: &quot;exception&quot;,
+                &quot;class&quot;: &quot;.&quot;.join((e.__class__.__module__, e.__class__.__name__,)),
+                &quot;details&quot;: str(e),
+            }
+
+        else:
+            yield txn.commit()
+
+        returnValue(result)
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingdirectorypy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/directory.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/directory.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/directory.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -191,3 +191,126 @@
</span><span class="cx">         delegators = yield Delegates._delegatedToUIDs(txn, delegate, request[&quot;read-write&quot;], onlyThisServer=True)
</span><span class="cx"> 
</span><span class="cx">         returnValue(list(delegators))
</span><ins>+
+
+    @inlineCallbacks
+    def send_dump_individual_delegates(self, txn, delegator):
+        &quot;&quot;&quot;
+        Get L{DelegateRecords} from another pod.
+
+        @param txn: transaction to use
+        @type txn: L{CommonStoreTransaction}
+        @param delegator: delegate to lookup
+        @type delegator: L{DirectoryRecord}
+        @param readWrite: if True, read and write access delegates are returned;
+            read-only access otherwise
+        &quot;&quot;&quot;
+        if delegator.thisServer():
+            raise FailedCrossPodRequestError(&quot;Cross-pod destination on this server: {}&quot;.format(delegator.uid))
+
+        request = {
+            &quot;action&quot;: &quot;dump-individual-delegates&quot;,
+            &quot;uid&quot;: delegator.uid,
+        }
+        response = yield self.sendRequestToServer(txn, delegator.server(), request)
+        returnValue(response)
+
+
+    @inlineCallbacks
+    def recv_dump_individual_delegates(self, txn, request):
+        &quot;&quot;&quot;
+        Process an delegators cross-pod request. Request arguments as per L{send_dump_individual_delegates}.
+
+        @param request: request arguments
+        @type request: C{dict}
+        &quot;&quot;&quot;
+
+        delegator = yield txn.directoryService().recordWithUID(request[&quot;uid&quot;])
+        if delegator is None or not delegator.thisServer():
+            raise FailedCrossPodRequestError(&quot;Cross-pod delegate missing or on this server: {}&quot;.format(delegator.uid))
+
+        delegates = yield txn.dumpIndividualDelegatesLocal(delegator.uid)
+
+        returnValue(self._to_serialize_list(delegates))
+
+
+    @inlineCallbacks
+    def send_dump_group_delegates(self, txn, delegator):
+        &quot;&quot;&quot;
+        Get L{DelegateGroupsRecord},L{GroupsRecord} from another pod.
+
+        @param txn: transaction to use
+        @type txn: L{CommonStoreTransaction}
+        @param delegator: delegate to lookup
+        @type delegator: L{DirectoryRecord}
+        @param readWrite: if True, read and write access delegates are returned;
+            read-only access otherwise
+        &quot;&quot;&quot;
+        if delegator.thisServer():
+            raise FailedCrossPodRequestError(&quot;Cross-pod destination on this server: {}&quot;.format(delegator.uid))
+
+        request = {
+            &quot;action&quot;: &quot;dump-group-delegates&quot;,
+            &quot;uid&quot;: delegator.uid,
+        }
+        response = yield self.sendRequestToServer(txn, delegator.server(), request)
+        returnValue(response)
+
+
+    @inlineCallbacks
+    def recv_dump_group_delegates(self, txn, request):
+        &quot;&quot;&quot;
+        Process an delegators cross-pod request. Request arguments as per L{send_dump_group_delegates}.
+
+        @param request: request arguments
+        @type request: C{dict}
+        &quot;&quot;&quot;
+
+        delegator = yield txn.directoryService().recordWithUID(request[&quot;uid&quot;])
+        if delegator is None or not delegator.thisServer():
+            raise FailedCrossPodRequestError(&quot;Cross-pod delegate missing or on this server: {}&quot;.format(delegator.uid))
+
+        results = yield txn.dumpGroupDelegatesLocal(delegator.uid)
+
+        returnValue([[delegator.serialize(), group.serialize()] for delegator, group in results])
+
+
+    @inlineCallbacks
+    def send_dump_external_delegates(self, txn, delegator):
+        &quot;&quot;&quot;
+        Get L{ExternalDelegateGroupsRecord} from another pod.
+
+        @param txn: transaction to use
+        @type txn: L{CommonStoreTransaction}
+        @param delegator: delegate to lookup
+        @type delegator: L{DirectoryRecord}
+        @param readWrite: if True, read and write access delegates are returned;
+            read-only access otherwise
+        &quot;&quot;&quot;
+        if delegator.thisServer():
+            raise FailedCrossPodRequestError(&quot;Cross-pod destination on this server: {}&quot;.format(delegator.uid))
+
+        request = {
+            &quot;action&quot;: &quot;dump-external-delegates&quot;,
+            &quot;uid&quot;: delegator.uid,
+        }
+        response = yield self.sendRequestToServer(txn, delegator.server(), request)
+        returnValue(response)
+
+
+    @inlineCallbacks
+    def recv_dump_external_delegates(self, txn, request):
+        &quot;&quot;&quot;
+        Process an delegators cross-pod request. Request arguments as per L{send_dump_external_delegates}.
+
+        @param request: request arguments
+        @type request: C{dict}
+        &quot;&quot;&quot;
+
+        delegator = yield txn.directoryService().recordWithUID(request[&quot;uid&quot;])
+        if delegator is None or not delegator.thisServer():
+            raise FailedCrossPodRequestError(&quot;Cross-pod delegate missing or on this server: {}&quot;.format(delegator.uid))
+
+        delegates = yield txn.dumpExternalDelegatesLocal(delegator.uid)
+
+        returnValue(self._to_serialize_list(delegates))
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigration__init__py"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/common/datastore/podding/migration/__init__.py (14520 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/__init__.py        2015-03-06 21:20:35 UTC (rev 14520)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/__init__.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,15 +0,0 @@
</span><del>-##
-# Copyright (c) 2015 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
</del></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigration__init__pyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastorepoddingmigration__init__py"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/podding/migration/__init__.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/__init__.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/migration/__init__.py                                (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/__init__.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,15 @@
</span><ins>+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationhome_syncpy"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/common/datastore/podding/migration/home_sync.py (14520 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/home_sync.py        2015-03-06 21:20:35 UTC (rev 14520)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/home_sync.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,1353 +0,0 @@
</span><del>-##
-# Copyright (c) 2015 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-from functools import wraps
-
-from twext.python.log import Logger
-from twisted.internet.defer import returnValue, inlineCallbacks
-from twisted.python.failure import Failure
-from twistedcaldav.accounting import emitAccounting
-from txdav.caldav.icalendarstore import ComponentUpdateState
-from txdav.common.datastore.podding.migration.sync_metadata import CalendarMigrationRecord, \
-    CalendarObjectMigrationRecord, AttachmentMigrationRecord
-from txdav.caldav.datastore.sql import ManagedAttachment, CalendarBindRecord
-from txdav.common.datastore.sql_external import NotificationCollectionExternal
-from txdav.common.datastore.sql_notification import NotificationCollection
-from txdav.common.datastore.sql_tables import _HOME_STATUS_MIGRATING, _HOME_STATUS_DISABLED, \
-    _HOME_STATUS_EXTERNAL, _HOME_STATUS_NORMAL
-from txdav.common.idirectoryservice import DirectoryRecordNotFoundError
-
-from uuid import uuid4
-import datetime
-
-log = Logger()
-
-ACCOUNTING_TYPE = &quot;migration&quot;
-ACCOUNTING_LOG = &quot;migration.log&quot;
-
-def inTransactionWrapper(operation):
-    &quot;&quot;&quot;
-    This wrapper converts an instance method that takes a transaction as its
-    first parameter into one where the transaction parameter is an optional
-    keyword argument. If the keyword argument is present and not None, then
-    the instance method is called with that keyword as the first positional
-    argument (i.e., almost a NoOp). If the keyword argument is not present,
-    then a new transaction is created and the instance method called with
-    it as the first positional argument, plus the call is wrapped with
-    try/except/else to ensure proper commit and abort of the internally
-    created transaction is done.
-
-    So this wrapper allows for a method that requires a transaction to be run
-    with either an existing transaction or one created just for the purpose
-    of running it.
-
-    @param operation: a callable that takes an L{IAsyncTransaction} as its first
-        argument, and returns a value.
-    &quot;&quot;&quot;
-
-    @wraps(operation)
-    @inlineCallbacks
-    def _inTxn(self, *args, **kwargs):
-        label = self.label(operation.__name__)
-        if &quot;txn&quot; in kwargs:
-            txn = kwargs[&quot;txn&quot;]
-            del kwargs[&quot;txn&quot;]
-            result = yield operation(self, txn, *args, **kwargs)
-            returnValue(result)
-        else:
-            txn = self.store.newTransaction(label=label)
-            try:
-                result = yield operation(self, txn, *args, **kwargs)
-            except Exception as ex:
-                f = Failure()
-                yield txn.abort()
-                log.error(&quot;{label} failed: {e}&quot;.format(label=label, e=str(ex)))
-                returnValue(f)
-            else:
-                yield txn.commit()
-                returnValue(result)
-
-    return _inTxn
-
-
-
-# Cross-pod synchronization of an entire calendar home
-class CrossPodHomeSync(object):
-
-    BATCH_SIZE = 50
-
-    def __init__(self, store, diruid, final=False, uselog=None):
-        &quot;&quot;&quot;
-        @param store: the data store
-        @type store: L{CommonDataStore}
-        @param diruid: directory uid of the user whose home is to be sync'd
-        @type diruid: L{str}
-        @param final: indicates whether this is in the final sync stage with the remote home
-            already disabled
-        @type final: L{bool}
-        @param uselog: additional logging written to this object
-        @type: L{File}
-        &quot;&quot;&quot;
-
-        self.store = store
-        self.diruid = diruid
-        self.disabledRemote = final
-        self.uselog = uselog
-        self.record = None
-        self.homeId = None
-
-
-    def label(self, detail):
-        return &quot;Cross-pod Migration Sync for {}: {}&quot;.format(self.diruid, detail)
-
-
-    def accounting(self, logstr):
-        emitAccounting(ACCOUNTING_TYPE, self.record, &quot;{} {}\n&quot;.format(datetime.datetime.now().isoformat(), logstr), filename=ACCOUNTING_LOG)
-        if self.uselog is not None:
-            self.uselog.write(&quot;CrossPodHomeSync: {}\n&quot;.format(logstr))
-
-
-    @inlineCallbacks
-    def migrateHere(self):
-        &quot;&quot;&quot;
-        This is a full, serialized version of a data migration (minus any directory
-        update) that can be triggered via a command line tool. It is designed to
-        minimize down time for the migrating user.
-        &quot;&quot;&quot;
-
-        # Step 1 - initial full sync
-        yield self.sync()
-
-        # Step 2 - increment sync (since the initial sync may take a long time
-        # to run we should do one incremental sync before bringing down the
-        # account being migrated)
-        yield self.sync()
-
-        # Step 3 - disable remote home
-        # NB Any failure from this point on will need to be caught and
-        # handled by re-enabling the old home (and fixing any sharing state
-        # that may have been changed)
-        yield self.disableRemoteHome()
-
-        # Step 4 - final incremental sync
-        yield self.sync()
-
-        # Step 5 - final overall sync of meta-data (including sharing re-linking)
-        yield self.finalSync()
-
-        # Step 6 - enable new home
-        yield self.enableLocalHome()
-
-        # Step 7 - remove remote home
-        yield self.removeRemoteHome()
-
-        # Step 8 - say phew! TODO: Actually alert everyone else
-        pass
-
-
-    @inlineCallbacks
-    def sync(self):
-        &quot;&quot;&quot;
-        Initiate a sync of the home. This is a simple data sync that does not
-        reconcile sharing state etc. The L{finalSync} method will do a full
-        sharing reconcile as well as disable the migration source home.
-        &quot;&quot;&quot;
-
-        yield self.loadRecord()
-        self.accounting(&quot;Starting: sync...&quot;)
-        yield self.prepareCalendarHome()
-
-        # Calendar list and calendar data
-        yield self.syncCalendarList()
-
-        # Sync home metadata such as alarms, default calendars, etc
-        yield self.syncCalendarHomeMetaData()
-
-        # Sync attachments
-        yield self.syncAttachments()
-
-        self.accounting(&quot;Completed: sync.\n&quot;)
-
-
-    @inlineCallbacks
-    def finalSync(self):
-        &quot;&quot;&quot;
-        Do the final sync up of any additional data, re-link sharing bind
-        rows, recalculate quota etc.
-        &quot;&quot;&quot;
-
-        yield self.loadRecord()
-        self.accounting(&quot;Starting: finalSync...&quot;)
-        yield self.prepareCalendarHome()
-
-        # Link attachments to resources: ATTACHMENT_CALENDAR_OBJECT table
-        yield self.linkAttachments()
-
-        # TODO: Re-write attachment URIs - not sure if we need this as reverse proxy may take care of it
-        pass
-
-        # Group attendee reconcile
-        yield self.groupAttendeeReconcile()
-
-        # Delegates reconcile
-        yield self.delegateReconcile()
-
-        # Shared collections reconcile (including group sharees)
-        yield self.sharedByCollectionsReconcile()
-        yield self.sharedToCollectionsReconcile()
-
-        # Notifications
-        yield self.notificationsReconcile()
-
-        # TODO: work items
-        pass
-
-        self.accounting(&quot;Completed: finalSync.\n&quot;)
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def disableRemoteHome(self, txn):
-        &quot;&quot;&quot;
-        Mark the remote home as disabled.
-        &quot;&quot;&quot;
-
-        yield self.loadRecord()
-        self.accounting(&quot;Starting: disableRemoteHome...&quot;)
-        yield self.prepareCalendarHome()
-
-        # Calendar home
-        remote_home = yield self._remoteHome(txn)
-        yield remote_home.setStatus(_HOME_STATUS_DISABLED)
-
-        # Notification home
-        notifications = yield self._remoteNotificationsHome(txn)
-        yield notifications.setStatus(_HOME_STATUS_DISABLED)
-
-        self.disabledRemote = True
-
-        self.accounting(&quot;Completed: disableRemoteHome.\n&quot;)
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def enableLocalHome(self, txn):
-        &quot;&quot;&quot;
-        Mark the local home as enabled and remove any previously existing external home.
-        &quot;&quot;&quot;
-
-        yield self.loadRecord()
-        self.accounting(&quot;Starting: enableLocalHome...&quot;)
-        yield self.prepareCalendarHome()
-
-        # Disable any local external homes
-        oldhome = yield txn.calendarHomeWithUID(self.diruid, status=_HOME_STATUS_EXTERNAL)
-        if oldhome is not None:
-            yield oldhome.setLocalStatus(_HOME_STATUS_DISABLED)
-        oldnotifications = yield txn.notificationsWithUID(self.diruid, status=_HOME_STATUS_EXTERNAL)
-        if oldnotifications:
-            yield oldnotifications.setLocalStatus(_HOME_STATUS_DISABLED)
-
-        # Enable the migrating ones
-        newhome = yield txn.calendarHomeWithUID(self.diruid, status=_HOME_STATUS_MIGRATING)
-        if newhome is not None:
-            yield newhome.setStatus(_HOME_STATUS_NORMAL)
-        newnotifications = yield txn.notificationsWithUID(self.diruid, status=_HOME_STATUS_MIGRATING)
-        if newnotifications:
-            yield newnotifications.setStatus(_HOME_STATUS_NORMAL)
-
-        # TODO: purge the old ones
-        pass
-
-        self.accounting(&quot;Completed: enableLocalHome.\n&quot;)
-
-
-    @inlineCallbacks
-    def removeRemoteHome(self):
-        &quot;&quot;&quot;
-        Remove all the old data on the remote pod.
-        &quot;&quot;&quot;
-
-        # TODO: implement API on CommonHome to purge the old data without
-        # any side-effects (scheduling, sharing etc).
-        yield self.loadRecord()
-        self.accounting(&quot;Starting: removeRemoteHome...&quot;)
-        yield self.prepareCalendarHome()
-
-        self.accounting(&quot;Completed: removeRemoteHome.\n&quot;)
-
-
-    @inlineCallbacks
-    def loadRecord(self):
-        &quot;&quot;&quot;
-        Initiate a sync of the home.
-        &quot;&quot;&quot;
-
-        if self.record is None:
-            self.record = yield self.store.directoryService().recordWithUID(self.diruid)
-            if self.record is None:
-                raise DirectoryRecordNotFoundError(&quot;Cross-pod Migration Sync missing directory record for {}&quot;.format(self.diruid))
-            if self.record.thisServer():
-                raise ValueError(&quot;Cross-pod Migration Sync cannot sync with user already on this server: {}&quot;.format(self.diruid))
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def prepareCalendarHome(self, txn):
-        &quot;&quot;&quot;
-        Make sure the inactive home to migrate into is present on this pod.
-        &quot;&quot;&quot;
-
-        if self.homeId is None:
-            home = yield self._localHome(txn)
-            if home is None:
-                if self.disabledRemote:
-                    self.homeId = None
-                else:
-                    home = yield txn.calendarHomeWithUID(self.diruid, status=_HOME_STATUS_MIGRATING, create=True)
-                    self.accounting(&quot;  Created new home collection to migrate into.&quot;)
-            self.homeId = home.id() if home is not None else None
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def syncCalendarHomeMetaData(self, txn):
-        &quot;&quot;&quot;
-        Make sure the home meta-data (alarms, default calendars) is properly sync'd
-        &quot;&quot;&quot;
-
-        self.accounting(&quot;Starting: syncCalendarHomeMetaData...&quot;)
-        remote_home = yield self._remoteHome(txn)
-        yield remote_home.readMetaData()
-
-        calendars = yield CalendarMigrationRecord.querysimple(txn, calendarHomeResourceID=self.homeId)
-        calendarIDMap = dict((item.remoteResourceID, item.localResourceID) for item in calendars)
-
-        local_home = yield self._localHome(txn)
-        yield local_home.copyMetadata(remote_home, calendarIDMap)
-
-        self.accounting(&quot;Completed: syncCalendarHomeMetaData.&quot;)
-
-
-    @inlineCallbacks
-    def _remoteHome(self, txn):
-        &quot;&quot;&quot;
-        Create a synthetic external home object that maps to the actual remote home.
-        &quot;&quot;&quot;
-
-        from txdav.caldav.datastore.sql_external import CalendarHomeExternal
-        resourceID = yield txn.store().conduit.send_home_resource_id(txn, self.record, migrating=True)
-        home = CalendarHomeExternal.makeSyntheticExternalHome(txn, self.record.uid, resourceID) if resourceID is not None else None
-        if self.disabledRemote:
-            home._migratingHome = True
-        returnValue(home)
-
-
-    @inlineCallbacks
-    def _remoteNotificationsHome(self, txn):
-        &quot;&quot;&quot;
-        Create a synthetic external home object that maps to the actual remote home.
-        &quot;&quot;&quot;
-
-        notifications = yield NotificationCollectionExternal.notificationsWithUID(txn, self.diruid, create=True)
-        if self.disabledRemote:
-            notifications._migratingHome = True
-        returnValue(notifications)
-
-
-    def _localHome(self, txn):
-        &quot;&quot;&quot;
-        Get the home on this pod that will have data migrated to it.
-        &quot;&quot;&quot;
-
-        return txn.calendarHomeWithUID(self.diruid, status=_HOME_STATUS_MIGRATING)
-
-
-    @inlineCallbacks
-    def syncCalendarList(self):
-        &quot;&quot;&quot;
-        Synchronize each owned calendar.
-        &quot;&quot;&quot;
-
-        self.accounting(&quot;Starting: syncCalendarList...&quot;)
-
-        # Remote sync details
-        remote_sync_state = yield self.getCalendarSyncList()
-        self.accounting(&quot;  Found {} remote calendars to sync.&quot;.format(len(remote_sync_state)))
-
-        # Get local sync details from local DB
-        local_sync_state = yield self.getSyncState()
-        self.accounting(&quot;  Found {} local calendars to sync.&quot;.format(len(local_sync_state)))
-
-        # Remove local calendars no longer on the remote side
-        yield self.purgeLocal(local_sync_state, remote_sync_state)
-
-        # Sync each calendar that matches on both sides
-        for remoteID in remote_sync_state.keys():
-            yield self.syncCalendar(remoteID, local_sync_state, remote_sync_state)
-
-        self.accounting(&quot;Completed: syncCalendarList.&quot;)
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def getCalendarSyncList(self, txn):
-        &quot;&quot;&quot;
-        Get the names and sync-tokens for each remote owned calendar.
-        &quot;&quot;&quot;
-
-        # List of calendars from the remote side
-        home = yield self._remoteHome(txn)
-        if home is None:
-            returnValue(None)
-        calendars = yield home.loadChildren()
-        results = {}
-        for calendar in calendars:
-            if calendar.owned():
-                sync_token = yield calendar.syncToken()
-                results[calendar.id()] = CalendarMigrationRecord.make(
-                    calendarHomeResourceID=home.id(),
-                    remoteResourceID=calendar.id(),
-                    localResourceID=0,
-                    lastSyncToken=sync_token,
-                )
-
-        returnValue(results)
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def getSyncState(self, txn):
-        &quot;&quot;&quot;
-        Get local synchronization state for the home being migrated.
-        &quot;&quot;&quot;
-        records = yield CalendarMigrationRecord.querysimple(
-            txn, calendarHomeResourceID=self.homeId
-        )
-        returnValue(dict([(record.remoteResourceID, record) for record in records]))
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def updateSyncState(self, txn, stateRecord, newSyncToken):
-        &quot;&quot;&quot;
-        Update or insert an L{CalendarMigrationRecord} with the new specified sync token.
-        &quot;&quot;&quot;
-        if stateRecord.isnew():
-            stateRecord.lastSyncToken = newSyncToken
-            yield stateRecord.insert(txn)
-        else:
-            # The existing stateRecord has a stale txn, but valid column values. We have
-            # to duplicate it before we can give it a different txn.
-            stateRecord = stateRecord.duplicate()
-            stateRecord.transaction = txn
-            yield stateRecord.update(lastSyncToken=newSyncToken)
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def purgeLocal(self, txn, local_sync_state, remote_sync_state):
-        &quot;&quot;&quot;
-        Remove (silently - i.e., no scheduling) local calendars that are no longer on the remote side.
-
-        @param txn: transaction to use
-        @type txn: L{CommonStoreTransaction}
-        @param local_sync_state: local sync state
-        @type local_sync_state: L{dict}
-        @param remote_sync_state: remote sync state
-        @type remote_sync_state: L{dict}
-        &quot;&quot;&quot;
-        home = yield self._localHome(txn)
-        for localID in set(local_sync_state.keys()) - set(remote_sync_state.keys()):
-            calendar = yield home.childWithID(local_sync_state[localID].localResourceID)
-            if calendar is not None:
-                yield calendar.purge()
-            del local_sync_state[localID]
-            self.accounting(&quot;  Purged calendar local-id={} that no longer exists on the remote pod.&quot;.format(localID))
-
-
-    @inlineCallbacks
-    def syncCalendar(self, remoteID, local_sync_state, remote_sync_state):
-        &quot;&quot;&quot;
-        Sync the contents of a calendar from the remote side. The local calendar may need to be created
-        on initial sync. Make use of sync tokens to avoid unnecessary work.
-
-        @param remoteID: id of the remote calendar to sync
-        @type remoteID: L{int}
-        @param local_sync_state: local sync state
-        @type local_sync_state: L{dict}
-        @param remote_sync_state: remote sync state
-        @type remote_sync_state: L{dict}
-        &quot;&quot;&quot;
-
-        self.accounting(&quot;Starting: syncCalendar.&quot;)
-
-        # See if we need to create the local one first
-        if remoteID not in local_sync_state:
-            localID = yield self.newCalendar()
-            local_sync_state[remoteID] = CalendarMigrationRecord.make(
-                calendarHomeResourceID=self.homeId,
-                remoteResourceID=remoteID,
-                localResourceID=localID,
-                lastSyncToken=None,
-            )
-            self.accounting(&quot;  Created new calendar local-id={}, remote-id={}.&quot;.format(localID, remoteID))
-        else:
-            localID = local_sync_state.get(remoteID).localResourceID
-            self.accounting(&quot;  Updating calendar local-id={}, remote-id={}.&quot;.format(localID, remoteID))
-        local_record = local_sync_state.get(remoteID)
-
-        remote_token = remote_sync_state[remoteID].lastSyncToken
-        if local_record.lastSyncToken != remote_token:
-            # Sync meta-data such as name, alarms, supported-components, transp, etc
-            yield self.syncCalendarMetaData(local_record)
-
-            # Sync object resources
-            changed, removed = yield self.findObjectsToSync(local_record)
-            self.accounting(&quot;  Calendar objects changed={}, removed={}.&quot;.format(len(changed), len(removed)))
-            yield self.purgeDeletedObjectsInBatches(local_record, removed)
-            yield self.updateChangedObjectsInBatches(local_record, changed)
-
-        yield self.updateSyncState(local_record, remote_token)
-        self.accounting(&quot;Completed: syncCalendar.&quot;)
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def newCalendar(self, txn):
-        &quot;&quot;&quot;
-        Create a new local calendar to sync remote data to. We don't care about the name
-        of the calendar right now - it will be sync'd later.
-        &quot;&quot;&quot;
-
-        home = yield self._localHome(txn)
-        calendar = yield home.createChildWithName(str(uuid4()))
-        returnValue(calendar.id())
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def syncCalendarMetaData(self, txn, migrationRecord):
-        &quot;&quot;&quot;
-        Sync the metadata of a calendar from the remote side.
-
-        @param migrationRecord: current migration record
-        @type localID: L{CalendarMigrationRecord}
-        &quot;&quot;&quot;
-
-        # Remote changes
-        remote_home = yield self._remoteHome(txn)
-        remote_calendar = yield remote_home.childWithID(migrationRecord.remoteResourceID)
-        if remote_calendar is None:
-            returnValue(None)
-
-        # Check whether the deleted set items
-        local_home = yield self._localHome(txn)
-        local_calendar = yield local_home.childWithID(migrationRecord.localResourceID)
-        yield local_calendar.copyMetadata(remote_calendar)
-        self.accounting(&quot;  Copied calendar meta-data for calendar local-id={0.localResourceID}, remote-id={0.remoteResourceID}.&quot;.format(migrationRecord))
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def findObjectsToSync(self, txn, migrationRecord):
-        &quot;&quot;&quot;
-        Find the set of object resources that need to be sync'd from the remote
-        side and the set that need to be removed locally. Take into account the
-        possibility that this is a partial sync and removals or additions might
-        be false positives.
-
-        @param migrationRecord: current migration record
-        @type localID: L{CalendarMigrationRecord}
-        &quot;&quot;&quot;
-
-        # Remote changes
-        remote_home = yield self._remoteHome(txn)
-        remote_calendar = yield remote_home.childWithID(migrationRecord.remoteResourceID)
-        if remote_calendar is None:
-            returnValue(None)
-        changed, deleted, _ignore_invalid = yield remote_calendar.resourceNamesSinceToken(migrationRecord.lastSyncToken)
-
-        # Check whether the deleted set items
-        local_home = yield self._localHome(txn)
-        local_calendar = yield local_home.childWithID(migrationRecord.localResourceID)
-
-        # Check the md5's on each changed remote with the local one to filter out ones
-        # we don't actually need to sync
-        remote_changes = yield remote_calendar.objectResourcesWithNames(changed)
-        remote_changes = dict([(calendar.name(), calendar) for calendar in remote_changes])
-
-        local_changes = yield local_calendar.objectResourcesWithNames(changed)
-        local_changes = dict([(calendar.name(), calendar) for calendar in local_changes])
-
-        actual_changes = []
-        for name, calendar in remote_changes.items():
-            if name not in local_changes or remote_changes[name].md5() != local_changes[name].md5():
-                actual_changes.append(name)
-
-        returnValue((actual_changes, deleted,))
-
-
-    @inlineCallbacks
-    def purgeDeletedObjectsInBatches(self, migrationRecord, deleted):
-        &quot;&quot;&quot;
-        Purge (silently remove) the specified object resources. This needs to
-        succeed in the case where some or all resources have already been deleted.
-        Do this in batches to keep transaction times small.
-
-        @param migrationRecord: local calendar migration record
-        @type migrationRecord: L{CalendarMigrationRecord}
-        @param deleted: list of names to purge
-        @type deleted: L{list} of L{str}
-        &quot;&quot;&quot;
-
-        remaining = list(deleted)
-        while remaining:
-            yield self.purgeBatch(migrationRecord.localResourceID, remaining[:self.BATCH_SIZE])
-            del remaining[:self.BATCH_SIZE]
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def purgeBatch(self, txn, localID, purge_names):
-        &quot;&quot;&quot;
-        Purge a bunch of object resources from the specified calendar.
-
-        @param txn: transaction to use
-        @type txn: L{CommonStoreTransaction}
-        @param localID: id of the local calendar to sync
-        @type localID: L{int}
-        @param purge_names: object resource names to purge
-        @type purge_names: L{list} of L{str}
-        &quot;&quot;&quot;
-
-        # Check whether the deleted set items
-        local_home = yield self._localHome(txn)
-        local_calendar = yield local_home.childWithID(localID)
-        local_objects = yield local_calendar.objectResourcesWithNames(purge_names)
-
-        for local_object in local_objects:
-            yield local_object.purge()
-            self.accounting(&quot;  Purged calendar object local-id={}.&quot;.format(local_object.id()))
-
-
-    @inlineCallbacks
-    def updateChangedObjectsInBatches(self, migrationRecord, changed):
-        &quot;&quot;&quot;
-        Update the specified object resources. This needs to succeed in the
-        case where some or all resources have already been deleted.
-        Do this in batches to keep transaction times small.
-
-        @param migrationRecord: local calendar migration record
-        @type migrationRecord: L{CalendarMigrationRecord}
-        @param changed: list of names to update
-        @type changed: L{list} of L{str}
-        &quot;&quot;&quot;
-
-        remaining = list(changed)
-        while remaining:
-            yield self.updateBatch(
-                migrationRecord.localResourceID,
-                migrationRecord.remoteResourceID,
-                remaining[:self.BATCH_SIZE],
-            )
-            del remaining[:self.BATCH_SIZE]
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def updateBatch(self, txn, localID, remoteID, remaining):
-        &quot;&quot;&quot;
-        Update a bunch of object resources from the specified remote calendar.
-
-        @param txn: transaction to use
-        @type txn: L{CommonStoreTransaction}
-        @param localID: id of the local calendar to sync
-        @type localID: L{int}
-        @param remoteID: id of the remote calendar to sync with
-        @type remoteID: L{int}
-        @param purge_names: object resource names to update
-        @type purge_names: L{list} of L{str}
-        &quot;&quot;&quot;
-
-        # Get remote objects
-        remote_home = yield self._remoteHome(txn)
-        remote_calendar = yield remote_home.childWithID(remoteID)
-        if remote_calendar is None:
-            returnValue(None)
-        remote_objects = yield remote_calendar.objectResourcesWithNames(remaining)
-        remote_objects = dict([(obj.name(), obj) for obj in remote_objects])
-
-        # Get local objects
-        local_home = yield self._localHome(txn)
-        local_calendar = yield local_home.childWithID(localID)
-        local_objects = yield local_calendar.objectResourcesWithNames(remaining)
-        local_objects = dict([(obj.name(), obj) for obj in local_objects])
-
-        # Sync ones that still exist - use txn._migrating together with stuffing the remote md5
-        # value onto the component being stored to ensure that the md5 value stored locally
-        # matches the remote one (which should help reduce the need for a client to resync
-        # the data when moved from one pod to the other).
-        txn._migrating = True
-        for obj_name in remote_objects.keys():
-            remote_object = remote_objects[obj_name]
-            remote_data = yield remote_object.component()
-            remote_data.md5 = remote_object.md5()
-            if obj_name in local_objects:
-                local_object = yield local_objects[obj_name]
-                yield local_object._setComponentInternal(remote_data, internal_state=ComponentUpdateState.RAW)
-                del local_objects[obj_name]
-                log_op = &quot;Updated&quot;
-            else:
-                local_object = yield local_calendar._createCalendarObjectWithNameInternal(obj_name, remote_data, internal_state=ComponentUpdateState.RAW)
-
-                # Maintain the mapping from the remote to local id. Note that this mapping never changes as the ids on both
-                # sides are immutable - though it may get deleted if the local object is removed during sync (via a cascade).
-                yield CalendarObjectMigrationRecord.create(
-                    txn,
-                    calendarHomeResourceID=self.homeId,
-                    remoteResourceID=remote_object.id(),
-                    localResourceID=local_object.id()
-                )
-                log_op = &quot;Created&quot;
-
-            # Sync meta-data such as schedule object, schedule tags, access mode etc
-            yield local_object.copyMetadata(remote_object)
-            self.accounting(&quot;  {} calendar object local-id={}, remote-id={}.&quot;.format(log_op, local_object.id(), remote_object.id()))
-
-        # Purge the ones that remain
-        for local_object in local_objects.values():
-            yield local_object.purge()
-            self.accounting(&quot;  Purged calendar object local-id={}.&quot;.format(local_object.id()))
-
-
-    @inlineCallbacks
-    def syncAttachments(self):
-        &quot;&quot;&quot;
-        Sync attachments (both metadata and actual attachment data) for the home being migrated.
-        &quot;&quot;&quot;
-
-        self.accounting(&quot;Starting: syncAttachments...&quot;)
-
-        # Two steps - sync the table first in one txn, then sync each attachment's data
-        changed_ids, removed_ids = yield self.syncAttachmentTable()
-        self.accounting(&quot;  Attachments changed={}, removed={}&quot;.format(len(changed_ids), len(removed_ids)))
-
-        for local_id in changed_ids:
-            yield self.syncAttachmentData(local_id)
-
-        self.accounting(&quot;Completed: syncAttachments.&quot;)
-
-        returnValue((changed_ids, removed_ids,))
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def syncAttachmentTable(self, txn):
-        &quot;&quot;&quot;
-        Sync the ATTACHMENT table data for the home being migrated. Return the list of local attachment ids that
-        now need there attachment data sync'd from the server.
-        &quot;&quot;&quot;
-
-        remote_home = yield self._remoteHome(txn)
-        rattachments = yield remote_home.getAllAttachments()
-        rmap = dict([(attachment.id(), attachment) for attachment in rattachments])
-
-        local_home = yield self._localHome(txn)
-        lattachments = yield local_home.getAllAttachments()
-        lmap = dict([(attachment.id(), attachment) for attachment in lattachments])
-
-        # Figure out the differences
-        records = yield AttachmentMigrationRecord.querysimple(
-            txn, calendarHomeResourceID=self.homeId
-        )
-        mapping = dict([(record.remoteResourceID, record) for record in records])
-
-        # Removed - remove attachment and migration state
-        removed = set(mapping.keys()) - set(rmap.keys())
-        for remove_id in removed:
-            record = mapping[remove_id]
-            att = yield ManagedAttachment.load(txn, None, None, attachmentID=record.localResourceID)
-            if att:
-                yield att.remove(adjustQuota=False)
-            else:
-                yield record.delete()
-
-        # Track which ones need attachment data sync'd over
-        data_ids = set()
-
-        # Added - add new attachment and migration state
-        added = set(rmap.keys()) - set(mapping.keys())
-        for added_id in added:
-            attachment = yield ManagedAttachment._create(txn, None, self.homeId)
-            yield AttachmentMigrationRecord.create(
-                txn,
-                calendarHomeResourceID=self.homeId,
-                remoteResourceID=added_id,
-                localResourceID=attachment.id(),
-            )
-            data_ids.add(attachment.id())
-
-        # Possible updates - check for md5 change and sync
-        updates = set(mapping.keys()) &amp; set(rmap.keys())
-        for updated_id in updates:
-            local_id = mapping[updated_id].localResourceID
-            if rmap[updated_id].md5() != lmap[local_id].md5():
-                yield lmap[local_id].copyRemote(rmap[updated_id])
-                data_ids.add(local_id)
-
-        returnValue((data_ids, removed,))
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def syncAttachmentData(self, txn, local_id):
-        &quot;&quot;&quot;
-        Sync the attachment data for the home being migrated.
-        &quot;&quot;&quot;
-
-        remote_home = yield self._remoteHome(txn)
-        local_home = yield self._localHome(txn)
-        attachment = yield local_home.getAttachmentByID(local_id)
-        if attachment is None:
-            returnValue(None)
-
-        records = yield AttachmentMigrationRecord.querysimple(
-            txn, calendarHomeResourceID=self.homeId, localResourceID=local_id
-        )
-        if records:
-            # Read the data from the conduit
-            yield remote_home.readAttachmentData(records[0].remoteResourceID, attachment)
-            self.accounting(&quot;  Read attachment local-id={0.localResourceID}, remote-id={0.remoteResourceID}&quot;.format(records[0]))
-
-
-    @inlineCallbacks
-    def linkAttachments(self):
-        &quot;&quot;&quot;
-        Link attachments to the calendar objects they belong to.
-        &quot;&quot;&quot;
-
-        self.accounting(&quot;Starting: linkAttachments...&quot;)
-
-        # Get the map of links for the remote home
-        links = yield self.getAttachmentLinks()
-        self.accounting(&quot;  Linking {} attachments&quot;.format(len(links)))
-
-        # Get remote-&gt;local ID mappings
-        attachmentIDMap, objectIDMap = yield self.getAttachmentMappings()
-
-        # Batch setting links for the local home
-        len_links = len(links)
-        while links:
-            yield self.makeAttachmentLinks(links[:50], attachmentIDMap, objectIDMap)
-            links = links[50:]
-
-        self.accounting(&quot;Completed: linkAttachments.&quot;)
-
-        returnValue(len_links)
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def getAttachmentLinks(self, txn):
-        &quot;&quot;&quot;
-        Get the remote link information.
-        &quot;&quot;&quot;
-
-        # Get the map of links for the remote home
-        remote_home = yield self._remoteHome(txn)
-        links = yield remote_home.getAttachmentLinks()
-        returnValue(links)
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def getAttachmentMappings(self, txn):
-        &quot;&quot;&quot;
-        Get the remote link information.
-        &quot;&quot;&quot;
-
-        # Get migration mappings
-        records = yield AttachmentMigrationRecord.querysimple(
-            txn, calendarHomeResourceID=self.homeId
-        )
-        attachmentIDMap = dict([(record.remoteResourceID, record) for record in records])
-
-        records = yield CalendarObjectMigrationRecord.querysimple(
-            txn, calendarHomeResourceID=self.homeId
-        )
-        objectIDMap = dict([(record.remoteResourceID, record) for record in records])
-
-        returnValue((attachmentIDMap, objectIDMap,))
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def makeAttachmentLinks(self, txn, links, attachmentIDMap, objectIDMap):
-        &quot;&quot;&quot;
-        Map remote links to local links.
-        &quot;&quot;&quot;
-
-        for link in links:
-            # Remote link has an invalid txn at this point so replace that first
-            link._txn = txn
-
-            # Now re-map the attachment ID and calendar_object_id to the local ones
-            link._attachmentID = attachmentIDMap[link._attachmentID].localResourceID
-            link._calendarObjectID = objectIDMap[link._calendarObjectID].localResourceID
-
-            yield link.insert()
-
-
-    @inlineCallbacks
-    def delegateReconcile(self):
-        &quot;&quot;&quot;
-        Sync the delegate assignments from the remote home to the local home. We won't use
-        a fake directory UID locally.
-        &quot;&quot;&quot;
-
-        self.accounting(&quot;Starting: delegateReconcile...&quot;)
-
-        yield self.individualDelegateReconcile()
-        yield self.groupDelegateReconcile()
-        yield self.externalDelegateReconcile()
-
-        self.accounting(&quot;Completed: delegateReconcile.&quot;)
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def individualDelegateReconcile(self, txn):
-        &quot;&quot;&quot;
-        Sync the delegate assignments from the remote home to the local home. We won't use
-        a fake directory UID locally.
-        &quot;&quot;&quot;
-        remote_records = yield txn.dumpIndividualDelegatesExternal(self.record)
-        for record in remote_records:
-            yield record.insert(txn)
-
-        self.accounting(&quot;  Found {} individual delegates&quot;.format(len(remote_records)))
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def groupDelegateReconcile(self, txn):
-        &quot;&quot;&quot;
-        Sync the delegate assignments from the remote home to the local home. We won't use
-        a fake directory UID locally.
-        &quot;&quot;&quot;
-        remote_records = yield txn.dumpGroupDelegatesExternal(self.record)
-        for delegator, group in remote_records:
-            # We need to make sure the group exists locally first and map the groupID to the local one
-            local_group = yield txn.groupByUID(group.groupUID)
-            delegator.groupID = local_group.groupID
-            yield delegator.insert(txn)
-
-        self.accounting(&quot;  Found {} group delegates&quot;.format(len(remote_records)))
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def externalDelegateReconcile(self, txn):
-        &quot;&quot;&quot;
-        Sync the external delegate assignments from the remote home to the local home. We won't use
-        a fake directory UID locally.
-        &quot;&quot;&quot;
-        remote_records = yield txn.dumpExternalDelegatesExternal(self.record)
-        for record in remote_records:
-            yield record.insert(txn)
-
-        self.accounting(&quot;  Found {} external delegates&quot;.format(len(remote_records)))
-
-
-    @inlineCallbacks
-    def groupAttendeeReconcile(self):
-        &quot;&quot;&quot;
-        Sync the remote group attendee links to the local store.
-        &quot;&quot;&quot;
-
-        self.accounting(&quot;Starting: groupAttendeeReconcile...&quot;)
-
-        # Get remote data and local mapping information
-        remote_group_attendees, objectIDMap = yield self.groupAttendeeData()
-        self.accounting(&quot;  Found {} group attendees&quot;.format(len(remote_group_attendees)))
-
-        # Map each result to a local resource (in batches)
-        number_of_links = len(remote_group_attendees)
-        while remote_group_attendees:
-            yield self.groupAttendeeProcess(remote_group_attendees[:50], objectIDMap)
-            remote_group_attendees = remote_group_attendees[50:]
-
-        self.accounting(&quot;Completed: groupAttendeeReconcile.&quot;)
-
-        returnValue(number_of_links)
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def groupAttendeeData(self, txn):
-        &quot;&quot;&quot;
-        Sync the remote group attendee links to the local store.
-        &quot;&quot;&quot;
-        remote_home = yield self._remoteHome(txn)
-        remote_group_attendees = yield remote_home.getAllGroupAttendees()
-
-        # Get all remote-&gt;local object maps
-        records = yield CalendarObjectMigrationRecord.querysimple(
-            txn, calendarHomeResourceID=self.homeId
-        )
-        objectIDMap = dict([(record.remoteResourceID, record.localResourceID) for record in records])
-
-        returnValue((remote_group_attendees, objectIDMap,))
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def groupAttendeeProcess(self, txn, results, objectIDMap):
-        &quot;&quot;&quot;
-        Sync the remote group attendee links to the local store.
-        &quot;&quot;&quot;
-        # Map each result to a local resource
-        for groupAttendee, group in results:
-            local_group = yield txn.groupByUID(group.groupUID)
-            groupAttendee.groupID = local_group.groupID
-            try:
-                groupAttendee.resourceID = objectIDMap[groupAttendee.resourceID]
-            except KeyError:
-                continue
-            yield groupAttendee.insert(txn)
-
-
-    @inlineCallbacks
-    def notificationsReconcile(self):
-        &quot;&quot;&quot;
-        Sync all the existing L{NotificationObject} resources from the remote store.
-        &quot;&quot;&quot;
-
-        self.accounting(&quot;Starting: notificationsReconcile...&quot;)
-        records = yield self.notificationRecords()
-        self.accounting(&quot;  Found {} notifications&quot;.format(len(records)))
-
-        # Batch setting resources for the local home
-        len_records = len(records)
-        while records:
-            yield self.makeNotifications(records[:50])
-            records = records[50:]
-
-        self.accounting(&quot;Completed: notificationsReconcile.&quot;)
-
-        returnValue(len_records)
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def notificationRecords(self, txn):
-        &quot;&quot;&quot;
-        Get all the existing L{NotificationObjectRecord}'s from the remote store.
-        &quot;&quot;&quot;
-
-        notifications = yield self._remoteNotificationsHome(txn)
-        records = yield notifications.notificationObjectRecords()
-        for record in records:
-            # This needs to be reset when added to the local store
-            del record.resourceID
-
-            # Map the remote id to the local one.
-            record.notificationHomeResourceID = notifications.id()
-
-        returnValue(records)
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def makeNotifications(self, txn, records):
-        &quot;&quot;&quot;
-        Create L{NotificationObjectRecord} records in the local store.
-        &quot;&quot;&quot;
-
-        notifications = yield NotificationCollection.notificationsWithUID(txn, self.diruid, status=_HOME_STATUS_MIGRATING, create=True)
-        for record in records:
-            # Do this via the &quot;write&quot; API so that sync revisions are updated properly, rather than just
-            # inserting the records directly.
-            notification = yield notifications.writeNotificationObject(record.notificationUID, record.notificationType, record.notificationData)
-            self.accounting(&quot;  Added notification local-id={}.&quot;.format(notification.id()))
-
-
-    @inlineCallbacks
-    def sharedByCollectionsReconcile(self):
-        &quot;&quot;&quot;
-        Sync all the collections shared by the migrating user from the remote store. We will do this one calendar at a time since
-        there could be a large number of sharees per calendar.
-
-        Here is the logic we need: first assume we have three pods: A, B, C, and we are migrating a user from A-&gt;B. We start
-        with a set of shares (X -&gt; Y - where X is the sharer and Y the sharee) on pod A. We migrate the sharer to pod B. We
-        then need to have a set of bind records on pod B, and adjust the set on pod A. Note that no changes are required on pod C.
-
-        Original      |  Changes                     | Changes
-        Shares        |  on B                        | on A
-        --------------|------------------------------|---------------------
-        A -&gt; A        |  B -&gt; A (new)                | B -&gt; A (modify existing)
-        A -&gt; B        |  B -&gt; B (modify existing)    | (removed)
-        A -&gt; C        |  B -&gt; C (new)                | (removed)
-        &quot;&quot;&quot;
-
-        self.accounting(&quot;Starting: sharedByCollectionsReconcile...&quot;)
-        calendars = yield self.getSyncState()
-
-        len_records = 0
-        for calendar in calendars.values():
-            records, bindUID = yield self.sharedByCollectionRecords(calendar.remoteResourceID, calendar.localResourceID)
-            if not records:
-                continue
-            records = records.items()
-
-            self.accounting(&quot;  Found shared by calendar local-id={0.localResourceID}, remote-id={0.remoteResourceID} with {1} sharees&quot;.format(
-                calendar, len(records),
-            ))
-
-            # Batch setting resources for the local home
-            len_records += len(records)
-            while records:
-                yield self.makeSharedByCollections(records[:50], calendar.localResourceID)
-                records = records[50:]
-
-            # Get groups from remote pod
-            yield self.syncGroupSharees(calendar.remoteResourceID, calendar.localResourceID)
-
-            # Update the remote pod to switch over the shares
-            yield self.updatedRemoteSharedByCollections(calendar.remoteResourceID, bindUID)
-
-        self.accounting(&quot;Completed: sharedByCollectionsReconcile.&quot;)
-
-        returnValue(len_records)
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def sharedByCollectionRecords(self, txn, remote_id, local_id):
-        &quot;&quot;&quot;
-        Get all the existing L{CalendarBindRecord}'s from the remote store. Also make sure a
-        bindUID exists for the local calendar.
-        &quot;&quot;&quot;
-
-        remote_home = yield self._remoteHome(txn)
-        remote_calendar = yield remote_home.childWithID(remote_id)
-        records = yield remote_calendar.sharingBindRecords()
-
-        # Check bindUID
-        local_records = yield CalendarBindRecord.querysimple(
-            txn,
-            calendarHomeResourceID=self.homeId,
-            calendarResourceID=local_id,
-        )
-        if records and not local_records[0].bindUID:
-            yield local_records[0].update(bindUID=str(uuid4()))
-
-        returnValue((records, local_records[0].bindUID,))
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def makeSharedByCollections(self, txn, records, calendar_id):
-        &quot;&quot;&quot;
-        Create L{CalendarBindRecord} records in the local store.
-        &quot;&quot;&quot;
-
-        for shareeUID, record in records:
-            shareeHome = yield txn.calendarHomeWithUID(shareeUID, create=True)
-
-            # First look for an existing record that could be present if the migrating user had
-            # previously shared with this sharee as a cross-pod share
-            oldrecord = yield CalendarBindRecord.querysimple(
-                txn,
-                calendarHomeResourceID=shareeHome.id(),
-                calendarResourceName=record.calendarResourceName,
-            )
-
-            # FIXME: need to figure out sync-token and bind revision changes
-
-            if oldrecord:
-                # Point old record to the new local calendar being shared
-                yield oldrecord[0].update(
-                    calendarResourceID=calendar_id,
-                    bindRevision=0,
-                )
-                self.accounting(&quot;    Updating existing sharee {}&quot;.format(shareeHome.uid()))
-            else:
-                # Map the record resource ids and insert a new record
-                record.calendarHomeResourceID = shareeHome.id()
-                record.calendarResourceID = calendar_id
-                record.bindRevision = 0
-                yield record.insert(txn)
-                self.accounting(&quot;    Adding new sharee {}&quot;.format(shareeHome.uid()))
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def syncGroupSharees(self, txn, remote_id, local_id):
-        &quot;&quot;&quot;
-        Sync the group sharees for a remote share.
-        &quot;&quot;&quot;
-        remote_home = yield self._remoteHome(txn)
-        remote_calendar = yield remote_home.childWithID(remote_id)
-        results = yield remote_calendar.groupSharees()
-        groups = dict([(group.groupID, group.groupUID,) for group in results[&quot;groups&quot;]])
-        for share in results[&quot;sharees&quot;]:
-            local_group = yield txn.groupByUID(groups[share.groupID])
-            share.groupID = local_group.groupID
-            share.calendarID = local_id
-            yield share.insert(txn)
-            self.accounting(&quot;    Adding group sharee {}&quot;.format(local_group.groupUID))
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def updatedRemoteSharedByCollections(self, txn, remote_id, bindUID):
-        &quot;&quot;&quot;
-        Get all the existing L{CalendarBindRecord}'s from the remote store.
-        &quot;&quot;&quot;
-
-        remote_home = yield self._remoteHome(txn)
-        remote_calendar = yield remote_home.childWithID(remote_id)
-        records = yield remote_calendar.migrateBindRecords(bindUID)
-        self.accounting(&quot;    Updating remote records&quot;)
-        returnValue(records)
-
-
-    @inlineCallbacks
-    def sharedToCollectionsReconcile(self):
-        &quot;&quot;&quot;
-        Sync all the collections shared to the migrating user from the remote store.
-
-        Here is the logic we need: first assume we have three pods: A, B, C, and we are migrating a user from A-&gt;B. We start
-        with a set of shares (X -&gt; Y - where X is the sharer and Y the sharee) with sharee on pod A. We migrate the sharee to pod B. We
-        then need to have a set of bind records on pod B, and adjust the set on pod A. Note that no changes are required on pod C.
-
-        Original      |  Changes                     | Changes
-        Shares        |  on B                        | on A
-        --------------|------------------------------|---------------------
-        A -&gt; A        |  A -&gt; B (new)                | A -&gt; B (modify existing)
-        B -&gt; A        |  B -&gt; B (modify existing)    | (removed)
-        C -&gt; A        |  C -&gt; B (new)                | (removed)
-        &quot;&quot;&quot;
-
-        self.accounting(&quot;Starting: sharedToCollectionsReconcile...&quot;)
-
-        records = yield self.sharedToCollectionRecords()
-        records = records.items()
-        len_records = len(records)
-        self.accounting(&quot;  Found {} shared to collections&quot;.format(len_records))
-
-        while records:
-            yield self.makeSharedToCollections(records[:50])
-            records = records[50:]
-
-        self.accounting(&quot;Completed: sharedToCollectionsReconcile.&quot;)
-
-        returnValue(len_records)
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def sharedToCollectionRecords(self, txn):
-        &quot;&quot;&quot;
-        Get the names and sharer UIDs for remote shared calendars.
-        &quot;&quot;&quot;
-
-        # List of calendars from the remote side
-        home = yield self._remoteHome(txn)
-        if home is None:
-            returnValue(None)
-        results = yield home.sharedToBindRecords()
-        returnValue(results)
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def makeSharedToCollections(self, txn, records):
-        &quot;&quot;&quot;
-        Create L{CalendarBindRecord} records in the local store.
-        &quot;&quot;&quot;
-
-        for sharerUID, (shareeRecord, ownerRecord, metadataRecord) in records:
-            sharerHome = yield txn.calendarHomeWithUID(sharerUID, create=True)
-
-            # We need to figure out the right thing to do based on whether the sharer is local to this pod
-            # (the one where the migrated user will be hosted) vs located on another pod
-
-            if sharerHome.normal():
-                # First look for an existing record that must be present if the migrating user had
-                # previously been shared with by this sharee
-                oldrecord = yield CalendarBindRecord.querysimple(
-                    txn,
-                    calendarResourceName=shareeRecord.calendarResourceName,
-                )
-                if len(oldrecord) == 1:
-                    # Point old record to the new local calendar home
-                    yield oldrecord[0].update(
-                        calendarHomeResourceID=self.homeId,
-                    )
-                    self.accounting(&quot;  Updated existing local sharer record {}&quot;.format(sharerHome.uid()))
-                else:
-                    raise AssertionError(&quot;An existing share must be present&quot;)
-            else:
-                # We have an external user. That sharer may have already shared the calendar with some other user
-                # on this pod, in which case there is already a CALENDAR table entry for it, and we need the
-                # resource ID from that to use in the new CALENDAR_BIND record we create. If a pre-existing share
-                # is not present, then we have to create the CALENDAR table entry and associated pieces
-
-                remote_id = shareeRecord.calendarResourceID
-
-                # Look for pre-existing share with the same external ID
-                oldrecord = yield CalendarBindRecord.querysimple(
-                    txn,
-                    calendarHomeResourceID=sharerHome.id(),
-                    bindUID=ownerRecord.bindUID,
-                )
-                if oldrecord:
-                    # Map the record resource ids and insert a new record
-                    calendar_id = oldrecord.calendarResourceID
-                    log_op = &quot;Updated&quot;
-                else:
-                    sharerView = yield sharerHome.createCollectionForExternalShare(
-                        ownerRecord.calendarResourceName,
-                        ownerRecord.bindUID,
-                        metadataRecord.supportedComponents,
-                    )
-                    calendar_id = sharerView.id()
-                    log_op = &quot;Created&quot;
-
-                shareeRecord.calendarHomeResourceID = self.homeId
-                shareeRecord.calendarResourceID = calendar_id
-                shareeRecord.bindRevision = 0
-                yield shareeRecord.insert(txn)
-                self.accounting(&quot;  {} remote sharer record {}&quot;.format(log_op, sharerHome.uid()))
-
-                yield self.updatedRemoteSharedToCollection(remote_id, txn=txn)
-
-
-    @inTransactionWrapper
-    @inlineCallbacks
-    def updatedRemoteSharedToCollection(self, txn, remote_id):
-        &quot;&quot;&quot;
-        Get all the existing L{CalendarBindRecord}'s from the remote store.
-        &quot;&quot;&quot;
-
-        remote_home = yield self._remoteHome(txn)
-        remote_calendar = yield remote_home.childWithID(remote_id)
-        records = yield remote_calendar.migrateBindRecords(None)
-        self.accounting(&quot;    Updating remote records&quot;)
-        returnValue(records)
</del></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationhome_syncpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastorepoddingmigrationhome_syncpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/podding/migration/home_sync.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/home_sync.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/migration/home_sync.py                                (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/home_sync.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,1353 @@
</span><ins>+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from functools import wraps
+
+from twext.python.log import Logger
+from twisted.internet.defer import returnValue, inlineCallbacks
+from twisted.python.failure import Failure
+from twistedcaldav.accounting import emitAccounting
+from txdav.caldav.icalendarstore import ComponentUpdateState
+from txdav.common.datastore.podding.migration.sync_metadata import CalendarMigrationRecord, \
+    CalendarObjectMigrationRecord, AttachmentMigrationRecord
+from txdav.caldav.datastore.sql import ManagedAttachment, CalendarBindRecord
+from txdav.common.datastore.sql_external import NotificationCollectionExternal
+from txdav.common.datastore.sql_notification import NotificationCollection
+from txdav.common.datastore.sql_tables import _HOME_STATUS_MIGRATING, _HOME_STATUS_DISABLED, \
+    _HOME_STATUS_EXTERNAL, _HOME_STATUS_NORMAL
+from txdav.common.idirectoryservice import DirectoryRecordNotFoundError
+
+from uuid import uuid4
+import datetime
+
+log = Logger()
+
+ACCOUNTING_TYPE = &quot;migration&quot;
+ACCOUNTING_LOG = &quot;migration.log&quot;
+
+def inTransactionWrapper(operation):
+    &quot;&quot;&quot;
+    This wrapper converts an instance method that takes a transaction as its
+    first parameter into one where the transaction parameter is an optional
+    keyword argument. If the keyword argument is present and not None, then
+    the instance method is called with that keyword as the first positional
+    argument (i.e., almost a NoOp). If the keyword argument is not present,
+    then a new transaction is created and the instance method called with
+    it as the first positional argument, plus the call is wrapped with
+    try/except/else to ensure proper commit and abort of the internally
+    created transaction is done.
+
+    So this wrapper allows for a method that requires a transaction to be run
+    with either an existing transaction or one created just for the purpose
+    of running it.
+
+    @param operation: a callable that takes an L{IAsyncTransaction} as its first
+        argument, and returns a value.
+    &quot;&quot;&quot;
+
+    @wraps(operation)
+    @inlineCallbacks
+    def _inTxn(self, *args, **kwargs):
+        label = self.label(operation.__name__)
+        if &quot;txn&quot; in kwargs:
+            txn = kwargs[&quot;txn&quot;]
+            del kwargs[&quot;txn&quot;]
+            result = yield operation(self, txn, *args, **kwargs)
+            returnValue(result)
+        else:
+            txn = self.store.newTransaction(label=label)
+            try:
+                result = yield operation(self, txn, *args, **kwargs)
+            except Exception as ex:
+                f = Failure()
+                yield txn.abort()
+                log.error(&quot;{label} failed: {e}&quot;.format(label=label, e=str(ex)))
+                returnValue(f)
+            else:
+                yield txn.commit()
+                returnValue(result)
+
+    return _inTxn
+
+
+
+# Cross-pod synchronization of an entire calendar home
+class CrossPodHomeSync(object):
+
+    BATCH_SIZE = 50
+
+    def __init__(self, store, diruid, final=False, uselog=None):
+        &quot;&quot;&quot;
+        @param store: the data store
+        @type store: L{CommonDataStore}
+        @param diruid: directory uid of the user whose home is to be sync'd
+        @type diruid: L{str}
+        @param final: indicates whether this is in the final sync stage with the remote home
+            already disabled
+        @type final: L{bool}
+        @param uselog: additional logging written to this object
+        @type: L{File}
+        &quot;&quot;&quot;
+
+        self.store = store
+        self.diruid = diruid
+        self.disabledRemote = final
+        self.uselog = uselog
+        self.record = None
+        self.homeId = None
+
+
+    def label(self, detail):
+        return &quot;Cross-pod Migration Sync for {}: {}&quot;.format(self.diruid, detail)
+
+
+    def accounting(self, logstr):
+        emitAccounting(ACCOUNTING_TYPE, self.record, &quot;{} {}\n&quot;.format(datetime.datetime.now().isoformat(), logstr), filename=ACCOUNTING_LOG)
+        if self.uselog is not None:
+            self.uselog.write(&quot;CrossPodHomeSync: {}\n&quot;.format(logstr))
+
+
+    @inlineCallbacks
+    def migrateHere(self):
+        &quot;&quot;&quot;
+        This is a full, serialized version of a data migration (minus any directory
+        update) that can be triggered via a command line tool. It is designed to
+        minimize down time for the migrating user.
+        &quot;&quot;&quot;
+
+        # Step 1 - initial full sync
+        yield self.sync()
+
+        # Step 2 - increment sync (since the initial sync may take a long time
+        # to run we should do one incremental sync before bringing down the
+        # account being migrated)
+        yield self.sync()
+
+        # Step 3 - disable remote home
+        # NB Any failure from this point on will need to be caught and
+        # handled by re-enabling the old home (and fixing any sharing state
+        # that may have been changed)
+        yield self.disableRemoteHome()
+
+        # Step 4 - final incremental sync
+        yield self.sync()
+
+        # Step 5 - final overall sync of meta-data (including sharing re-linking)
+        yield self.finalSync()
+
+        # Step 6 - enable new home
+        yield self.enableLocalHome()
+
+        # Step 7 - remove remote home
+        yield self.removeRemoteHome()
+
+        # Step 8 - say phew! TODO: Actually alert everyone else
+        pass
+
+
+    @inlineCallbacks
+    def sync(self):
+        &quot;&quot;&quot;
+        Initiate a sync of the home. This is a simple data sync that does not
+        reconcile sharing state etc. The L{finalSync} method will do a full
+        sharing reconcile as well as disable the migration source home.
+        &quot;&quot;&quot;
+
+        yield self.loadRecord()
+        self.accounting(&quot;Starting: sync...&quot;)
+        yield self.prepareCalendarHome()
+
+        # Calendar list and calendar data
+        yield self.syncCalendarList()
+
+        # Sync home metadata such as alarms, default calendars, etc
+        yield self.syncCalendarHomeMetaData()
+
+        # Sync attachments
+        yield self.syncAttachments()
+
+        self.accounting(&quot;Completed: sync.\n&quot;)
+
+
+    @inlineCallbacks
+    def finalSync(self):
+        &quot;&quot;&quot;
+        Do the final sync up of any additional data, re-link sharing bind
+        rows, recalculate quota etc.
+        &quot;&quot;&quot;
+
+        yield self.loadRecord()
+        self.accounting(&quot;Starting: finalSync...&quot;)
+        yield self.prepareCalendarHome()
+
+        # Link attachments to resources: ATTACHMENT_CALENDAR_OBJECT table
+        yield self.linkAttachments()
+
+        # TODO: Re-write attachment URIs - not sure if we need this as reverse proxy may take care of it
+        pass
+
+        # Group attendee reconcile
+        yield self.groupAttendeeReconcile()
+
+        # Delegates reconcile
+        yield self.delegateReconcile()
+
+        # Shared collections reconcile (including group sharees)
+        yield self.sharedByCollectionsReconcile()
+        yield self.sharedToCollectionsReconcile()
+
+        # Notifications
+        yield self.notificationsReconcile()
+
+        # TODO: work items
+        pass
+
+        self.accounting(&quot;Completed: finalSync.\n&quot;)
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def disableRemoteHome(self, txn):
+        &quot;&quot;&quot;
+        Mark the remote home as disabled.
+        &quot;&quot;&quot;
+
+        yield self.loadRecord()
+        self.accounting(&quot;Starting: disableRemoteHome...&quot;)
+        yield self.prepareCalendarHome()
+
+        # Calendar home
+        remote_home = yield self._remoteHome(txn)
+        yield remote_home.setStatus(_HOME_STATUS_DISABLED)
+
+        # Notification home
+        notifications = yield self._remoteNotificationsHome(txn)
+        yield notifications.setStatus(_HOME_STATUS_DISABLED)
+
+        self.disabledRemote = True
+
+        self.accounting(&quot;Completed: disableRemoteHome.\n&quot;)
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def enableLocalHome(self, txn):
+        &quot;&quot;&quot;
+        Mark the local home as enabled and remove any previously existing external home.
+        &quot;&quot;&quot;
+
+        yield self.loadRecord()
+        self.accounting(&quot;Starting: enableLocalHome...&quot;)
+        yield self.prepareCalendarHome()
+
+        # Disable any local external homes
+        oldhome = yield txn.calendarHomeWithUID(self.diruid, status=_HOME_STATUS_EXTERNAL)
+        if oldhome is not None:
+            yield oldhome.setLocalStatus(_HOME_STATUS_DISABLED)
+        oldnotifications = yield txn.notificationsWithUID(self.diruid, status=_HOME_STATUS_EXTERNAL)
+        if oldnotifications:
+            yield oldnotifications.setLocalStatus(_HOME_STATUS_DISABLED)
+
+        # Enable the migrating ones
+        newhome = yield txn.calendarHomeWithUID(self.diruid, status=_HOME_STATUS_MIGRATING)
+        if newhome is not None:
+            yield newhome.setStatus(_HOME_STATUS_NORMAL)
+        newnotifications = yield txn.notificationsWithUID(self.diruid, status=_HOME_STATUS_MIGRATING)
+        if newnotifications:
+            yield newnotifications.setStatus(_HOME_STATUS_NORMAL)
+
+        # TODO: purge the old ones
+        pass
+
+        self.accounting(&quot;Completed: enableLocalHome.\n&quot;)
+
+
+    @inlineCallbacks
+    def removeRemoteHome(self):
+        &quot;&quot;&quot;
+        Remove all the old data on the remote pod.
+        &quot;&quot;&quot;
+
+        # TODO: implement API on CommonHome to purge the old data without
+        # any side-effects (scheduling, sharing etc).
+        yield self.loadRecord()
+        self.accounting(&quot;Starting: removeRemoteHome...&quot;)
+        yield self.prepareCalendarHome()
+
+        self.accounting(&quot;Completed: removeRemoteHome.\n&quot;)
+
+
+    @inlineCallbacks
+    def loadRecord(self):
+        &quot;&quot;&quot;
+        Initiate a sync of the home.
+        &quot;&quot;&quot;
+
+        if self.record is None:
+            self.record = yield self.store.directoryService().recordWithUID(self.diruid)
+            if self.record is None:
+                raise DirectoryRecordNotFoundError(&quot;Cross-pod Migration Sync missing directory record for {}&quot;.format(self.diruid))
+            if self.record.thisServer():
+                raise ValueError(&quot;Cross-pod Migration Sync cannot sync with user already on this server: {}&quot;.format(self.diruid))
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def prepareCalendarHome(self, txn):
+        &quot;&quot;&quot;
+        Make sure the inactive home to migrate into is present on this pod.
+        &quot;&quot;&quot;
+
+        if self.homeId is None:
+            home = yield self._localHome(txn)
+            if home is None:
+                if self.disabledRemote:
+                    self.homeId = None
+                else:
+                    home = yield txn.calendarHomeWithUID(self.diruid, status=_HOME_STATUS_MIGRATING, create=True)
+                    self.accounting(&quot;  Created new home collection to migrate into.&quot;)
+            self.homeId = home.id() if home is not None else None
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def syncCalendarHomeMetaData(self, txn):
+        &quot;&quot;&quot;
+        Make sure the home meta-data (alarms, default calendars) is properly sync'd
+        &quot;&quot;&quot;
+
+        self.accounting(&quot;Starting: syncCalendarHomeMetaData...&quot;)
+        remote_home = yield self._remoteHome(txn)
+        yield remote_home.readMetaData()
+
+        calendars = yield CalendarMigrationRecord.querysimple(txn, calendarHomeResourceID=self.homeId)
+        calendarIDMap = dict((item.remoteResourceID, item.localResourceID) for item in calendars)
+
+        local_home = yield self._localHome(txn)
+        yield local_home.copyMetadata(remote_home, calendarIDMap)
+
+        self.accounting(&quot;Completed: syncCalendarHomeMetaData.&quot;)
+
+
+    @inlineCallbacks
+    def _remoteHome(self, txn):
+        &quot;&quot;&quot;
+        Create a synthetic external home object that maps to the actual remote home.
+        &quot;&quot;&quot;
+
+        from txdav.caldav.datastore.sql_external import CalendarHomeExternal
+        resourceID = yield txn.store().conduit.send_home_resource_id(txn, self.record, migrating=True)
+        home = CalendarHomeExternal.makeSyntheticExternalHome(txn, self.record.uid, resourceID) if resourceID is not None else None
+        if self.disabledRemote:
+            home._migratingHome = True
+        returnValue(home)
+
+
+    @inlineCallbacks
+    def _remoteNotificationsHome(self, txn):
+        &quot;&quot;&quot;
+        Create a synthetic external home object that maps to the actual remote home.
+        &quot;&quot;&quot;
+
+        notifications = yield NotificationCollectionExternal.notificationsWithUID(txn, self.diruid, create=True)
+        if self.disabledRemote:
+            notifications._migratingHome = True
+        returnValue(notifications)
+
+
+    def _localHome(self, txn):
+        &quot;&quot;&quot;
+        Get the home on this pod that will have data migrated to it.
+        &quot;&quot;&quot;
+
+        return txn.calendarHomeWithUID(self.diruid, status=_HOME_STATUS_MIGRATING)
+
+
+    @inlineCallbacks
+    def syncCalendarList(self):
+        &quot;&quot;&quot;
+        Synchronize each owned calendar.
+        &quot;&quot;&quot;
+
+        self.accounting(&quot;Starting: syncCalendarList...&quot;)
+
+        # Remote sync details
+        remote_sync_state = yield self.getCalendarSyncList()
+        self.accounting(&quot;  Found {} remote calendars to sync.&quot;.format(len(remote_sync_state)))
+
+        # Get local sync details from local DB
+        local_sync_state = yield self.getSyncState()
+        self.accounting(&quot;  Found {} local calendars to sync.&quot;.format(len(local_sync_state)))
+
+        # Remove local calendars no longer on the remote side
+        yield self.purgeLocal(local_sync_state, remote_sync_state)
+
+        # Sync each calendar that matches on both sides
+        for remoteID in remote_sync_state.keys():
+            yield self.syncCalendar(remoteID, local_sync_state, remote_sync_state)
+
+        self.accounting(&quot;Completed: syncCalendarList.&quot;)
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def getCalendarSyncList(self, txn):
+        &quot;&quot;&quot;
+        Get the names and sync-tokens for each remote owned calendar.
+        &quot;&quot;&quot;
+
+        # List of calendars from the remote side
+        home = yield self._remoteHome(txn)
+        if home is None:
+            returnValue(None)
+        calendars = yield home.loadChildren()
+        results = {}
+        for calendar in calendars:
+            if calendar.owned():
+                sync_token = yield calendar.syncToken()
+                results[calendar.id()] = CalendarMigrationRecord.make(
+                    calendarHomeResourceID=home.id(),
+                    remoteResourceID=calendar.id(),
+                    localResourceID=0,
+                    lastSyncToken=sync_token,
+                )
+
+        returnValue(results)
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def getSyncState(self, txn):
+        &quot;&quot;&quot;
+        Get local synchronization state for the home being migrated.
+        &quot;&quot;&quot;
+        records = yield CalendarMigrationRecord.querysimple(
+            txn, calendarHomeResourceID=self.homeId
+        )
+        returnValue(dict([(record.remoteResourceID, record) for record in records]))
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def updateSyncState(self, txn, stateRecord, newSyncToken):
+        &quot;&quot;&quot;
+        Update or insert an L{CalendarMigrationRecord} with the new specified sync token.
+        &quot;&quot;&quot;
+        if stateRecord.isnew():
+            stateRecord.lastSyncToken = newSyncToken
+            yield stateRecord.insert(txn)
+        else:
+            # The existing stateRecord has a stale txn, but valid column values. We have
+            # to duplicate it before we can give it a different txn.
+            stateRecord = stateRecord.duplicate()
+            stateRecord.transaction = txn
+            yield stateRecord.update(lastSyncToken=newSyncToken)
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def purgeLocal(self, txn, local_sync_state, remote_sync_state):
+        &quot;&quot;&quot;
+        Remove (silently - i.e., no scheduling) local calendars that are no longer on the remote side.
+
+        @param txn: transaction to use
+        @type txn: L{CommonStoreTransaction}
+        @param local_sync_state: local sync state
+        @type local_sync_state: L{dict}
+        @param remote_sync_state: remote sync state
+        @type remote_sync_state: L{dict}
+        &quot;&quot;&quot;
+        home = yield self._localHome(txn)
+        for localID in set(local_sync_state.keys()) - set(remote_sync_state.keys()):
+            calendar = yield home.childWithID(local_sync_state[localID].localResourceID)
+            if calendar is not None:
+                yield calendar.purge()
+            del local_sync_state[localID]
+            self.accounting(&quot;  Purged calendar local-id={} that no longer exists on the remote pod.&quot;.format(localID))
+
+
+    @inlineCallbacks
+    def syncCalendar(self, remoteID, local_sync_state, remote_sync_state):
+        &quot;&quot;&quot;
+        Sync the contents of a calendar from the remote side. The local calendar may need to be created
+        on initial sync. Make use of sync tokens to avoid unnecessary work.
+
+        @param remoteID: id of the remote calendar to sync
+        @type remoteID: L{int}
+        @param local_sync_state: local sync state
+        @type local_sync_state: L{dict}
+        @param remote_sync_state: remote sync state
+        @type remote_sync_state: L{dict}
+        &quot;&quot;&quot;
+
+        self.accounting(&quot;Starting: syncCalendar.&quot;)
+
+        # See if we need to create the local one first
+        if remoteID not in local_sync_state:
+            localID = yield self.newCalendar()
+            local_sync_state[remoteID] = CalendarMigrationRecord.make(
+                calendarHomeResourceID=self.homeId,
+                remoteResourceID=remoteID,
+                localResourceID=localID,
+                lastSyncToken=None,
+            )
+            self.accounting(&quot;  Created new calendar local-id={}, remote-id={}.&quot;.format(localID, remoteID))
+        else:
+            localID = local_sync_state.get(remoteID).localResourceID
+            self.accounting(&quot;  Updating calendar local-id={}, remote-id={}.&quot;.format(localID, remoteID))
+        local_record = local_sync_state.get(remoteID)
+
+        remote_token = remote_sync_state[remoteID].lastSyncToken
+        if local_record.lastSyncToken != remote_token:
+            # Sync meta-data such as name, alarms, supported-components, transp, etc
+            yield self.syncCalendarMetaData(local_record)
+
+            # Sync object resources
+            changed, removed = yield self.findObjectsToSync(local_record)
+            self.accounting(&quot;  Calendar objects changed={}, removed={}.&quot;.format(len(changed), len(removed)))
+            yield self.purgeDeletedObjectsInBatches(local_record, removed)
+            yield self.updateChangedObjectsInBatches(local_record, changed)
+
+        yield self.updateSyncState(local_record, remote_token)
+        self.accounting(&quot;Completed: syncCalendar.&quot;)
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def newCalendar(self, txn):
+        &quot;&quot;&quot;
+        Create a new local calendar to sync remote data to. We don't care about the name
+        of the calendar right now - it will be sync'd later.
+        &quot;&quot;&quot;
+
+        home = yield self._localHome(txn)
+        calendar = yield home.createChildWithName(str(uuid4()))
+        returnValue(calendar.id())
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def syncCalendarMetaData(self, txn, migrationRecord):
+        &quot;&quot;&quot;
+        Sync the metadata of a calendar from the remote side.
+
+        @param migrationRecord: current migration record
+        @type localID: L{CalendarMigrationRecord}
+        &quot;&quot;&quot;
+
+        # Remote changes
+        remote_home = yield self._remoteHome(txn)
+        remote_calendar = yield remote_home.childWithID(migrationRecord.remoteResourceID)
+        if remote_calendar is None:
+            returnValue(None)
+
+        # Check whether the deleted set items
+        local_home = yield self._localHome(txn)
+        local_calendar = yield local_home.childWithID(migrationRecord.localResourceID)
+        yield local_calendar.copyMetadata(remote_calendar)
+        self.accounting(&quot;  Copied calendar meta-data for calendar local-id={0.localResourceID}, remote-id={0.remoteResourceID}.&quot;.format(migrationRecord))
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def findObjectsToSync(self, txn, migrationRecord):
+        &quot;&quot;&quot;
+        Find the set of object resources that need to be sync'd from the remote
+        side and the set that need to be removed locally. Take into account the
+        possibility that this is a partial sync and removals or additions might
+        be false positives.
+
+        @param migrationRecord: current migration record
+        @type localID: L{CalendarMigrationRecord}
+        &quot;&quot;&quot;
+
+        # Remote changes
+        remote_home = yield self._remoteHome(txn)
+        remote_calendar = yield remote_home.childWithID(migrationRecord.remoteResourceID)
+        if remote_calendar is None:
+            returnValue(None)
+        changed, deleted, _ignore_invalid = yield remote_calendar.resourceNamesSinceToken(migrationRecord.lastSyncToken)
+
+        # Check whether the deleted set items
+        local_home = yield self._localHome(txn)
+        local_calendar = yield local_home.childWithID(migrationRecord.localResourceID)
+
+        # Check the md5's on each changed remote with the local one to filter out ones
+        # we don't actually need to sync
+        remote_changes = yield remote_calendar.objectResourcesWithNames(changed)
+        remote_changes = dict([(calendar.name(), calendar) for calendar in remote_changes])
+
+        local_changes = yield local_calendar.objectResourcesWithNames(changed)
+        local_changes = dict([(calendar.name(), calendar) for calendar in local_changes])
+
+        actual_changes = []
+        for name, calendar in remote_changes.items():
+            if name not in local_changes or remote_changes[name].md5() != local_changes[name].md5():
+                actual_changes.append(name)
+
+        returnValue((actual_changes, deleted,))
+
+
+    @inlineCallbacks
+    def purgeDeletedObjectsInBatches(self, migrationRecord, deleted):
+        &quot;&quot;&quot;
+        Purge (silently remove) the specified object resources. This needs to
+        succeed in the case where some or all resources have already been deleted.
+        Do this in batches to keep transaction times small.
+
+        @param migrationRecord: local calendar migration record
+        @type migrationRecord: L{CalendarMigrationRecord}
+        @param deleted: list of names to purge
+        @type deleted: L{list} of L{str}
+        &quot;&quot;&quot;
+
+        remaining = list(deleted)
+        while remaining:
+            yield self.purgeBatch(migrationRecord.localResourceID, remaining[:self.BATCH_SIZE])
+            del remaining[:self.BATCH_SIZE]
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def purgeBatch(self, txn, localID, purge_names):
+        &quot;&quot;&quot;
+        Purge a bunch of object resources from the specified calendar.
+
+        @param txn: transaction to use
+        @type txn: L{CommonStoreTransaction}
+        @param localID: id of the local calendar to sync
+        @type localID: L{int}
+        @param purge_names: object resource names to purge
+        @type purge_names: L{list} of L{str}
+        &quot;&quot;&quot;
+
+        # Check whether the deleted set items
+        local_home = yield self._localHome(txn)
+        local_calendar = yield local_home.childWithID(localID)
+        local_objects = yield local_calendar.objectResourcesWithNames(purge_names)
+
+        for local_object in local_objects:
+            yield local_object.purge()
+            self.accounting(&quot;  Purged calendar object local-id={}.&quot;.format(local_object.id()))
+
+
+    @inlineCallbacks
+    def updateChangedObjectsInBatches(self, migrationRecord, changed):
+        &quot;&quot;&quot;
+        Update the specified object resources. This needs to succeed in the
+        case where some or all resources have already been deleted.
+        Do this in batches to keep transaction times small.
+
+        @param migrationRecord: local calendar migration record
+        @type migrationRecord: L{CalendarMigrationRecord}
+        @param changed: list of names to update
+        @type changed: L{list} of L{str}
+        &quot;&quot;&quot;
+
+        remaining = list(changed)
+        while remaining:
+            yield self.updateBatch(
+                migrationRecord.localResourceID,
+                migrationRecord.remoteResourceID,
+                remaining[:self.BATCH_SIZE],
+            )
+            del remaining[:self.BATCH_SIZE]
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def updateBatch(self, txn, localID, remoteID, remaining):
+        &quot;&quot;&quot;
+        Update a bunch of object resources from the specified remote calendar.
+
+        @param txn: transaction to use
+        @type txn: L{CommonStoreTransaction}
+        @param localID: id of the local calendar to sync
+        @type localID: L{int}
+        @param remoteID: id of the remote calendar to sync with
+        @type remoteID: L{int}
+        @param purge_names: object resource names to update
+        @type purge_names: L{list} of L{str}
+        &quot;&quot;&quot;
+
+        # Get remote objects
+        remote_home = yield self._remoteHome(txn)
+        remote_calendar = yield remote_home.childWithID(remoteID)
+        if remote_calendar is None:
+            returnValue(None)
+        remote_objects = yield remote_calendar.objectResourcesWithNames(remaining)
+        remote_objects = dict([(obj.name(), obj) for obj in remote_objects])
+
+        # Get local objects
+        local_home = yield self._localHome(txn)
+        local_calendar = yield local_home.childWithID(localID)
+        local_objects = yield local_calendar.objectResourcesWithNames(remaining)
+        local_objects = dict([(obj.name(), obj) for obj in local_objects])
+
+        # Sync ones that still exist - use txn._migrating together with stuffing the remote md5
+        # value onto the component being stored to ensure that the md5 value stored locally
+        # matches the remote one (which should help reduce the need for a client to resync
+        # the data when moved from one pod to the other).
+        txn._migrating = True
+        for obj_name in remote_objects.keys():
+            remote_object = remote_objects[obj_name]
+            remote_data = yield remote_object.component()
+            remote_data.md5 = remote_object.md5()
+            if obj_name in local_objects:
+                local_object = yield local_objects[obj_name]
+                yield local_object._setComponentInternal(remote_data, internal_state=ComponentUpdateState.RAW)
+                del local_objects[obj_name]
+                log_op = &quot;Updated&quot;
+            else:
+                local_object = yield local_calendar._createCalendarObjectWithNameInternal(obj_name, remote_data, internal_state=ComponentUpdateState.RAW)
+
+                # Maintain the mapping from the remote to local id. Note that this mapping never changes as the ids on both
+                # sides are immutable - though it may get deleted if the local object is removed during sync (via a cascade).
+                yield CalendarObjectMigrationRecord.create(
+                    txn,
+                    calendarHomeResourceID=self.homeId,
+                    remoteResourceID=remote_object.id(),
+                    localResourceID=local_object.id()
+                )
+                log_op = &quot;Created&quot;
+
+            # Sync meta-data such as schedule object, schedule tags, access mode etc
+            yield local_object.copyMetadata(remote_object)
+            self.accounting(&quot;  {} calendar object local-id={}, remote-id={}.&quot;.format(log_op, local_object.id(), remote_object.id()))
+
+        # Purge the ones that remain
+        for local_object in local_objects.values():
+            yield local_object.purge()
+            self.accounting(&quot;  Purged calendar object local-id={}.&quot;.format(local_object.id()))
+
+
+    @inlineCallbacks
+    def syncAttachments(self):
+        &quot;&quot;&quot;
+        Sync attachments (both metadata and actual attachment data) for the home being migrated.
+        &quot;&quot;&quot;
+
+        self.accounting(&quot;Starting: syncAttachments...&quot;)
+
+        # Two steps - sync the table first in one txn, then sync each attachment's data
+        changed_ids, removed_ids = yield self.syncAttachmentTable()
+        self.accounting(&quot;  Attachments changed={}, removed={}&quot;.format(len(changed_ids), len(removed_ids)))
+
+        for local_id in changed_ids:
+            yield self.syncAttachmentData(local_id)
+
+        self.accounting(&quot;Completed: syncAttachments.&quot;)
+
+        returnValue((changed_ids, removed_ids,))
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def syncAttachmentTable(self, txn):
+        &quot;&quot;&quot;
+        Sync the ATTACHMENT table data for the home being migrated. Return the list of local attachment ids that
+        now need there attachment data sync'd from the server.
+        &quot;&quot;&quot;
+
+        remote_home = yield self._remoteHome(txn)
+        rattachments = yield remote_home.getAllAttachments()
+        rmap = dict([(attachment.id(), attachment) for attachment in rattachments])
+
+        local_home = yield self._localHome(txn)
+        lattachments = yield local_home.getAllAttachments()
+        lmap = dict([(attachment.id(), attachment) for attachment in lattachments])
+
+        # Figure out the differences
+        records = yield AttachmentMigrationRecord.querysimple(
+            txn, calendarHomeResourceID=self.homeId
+        )
+        mapping = dict([(record.remoteResourceID, record) for record in records])
+
+        # Removed - remove attachment and migration state
+        removed = set(mapping.keys()) - set(rmap.keys())
+        for remove_id in removed:
+            record = mapping[remove_id]
+            att = yield ManagedAttachment.load(txn, None, None, attachmentID=record.localResourceID)
+            if att:
+                yield att.remove(adjustQuota=False)
+            else:
+                yield record.delete()
+
+        # Track which ones need attachment data sync'd over
+        data_ids = set()
+
+        # Added - add new attachment and migration state
+        added = set(rmap.keys()) - set(mapping.keys())
+        for added_id in added:
+            attachment = yield ManagedAttachment._create(txn, None, self.homeId)
+            yield AttachmentMigrationRecord.create(
+                txn,
+                calendarHomeResourceID=self.homeId,
+                remoteResourceID=added_id,
+                localResourceID=attachment.id(),
+            )
+            data_ids.add(attachment.id())
+
+        # Possible updates - check for md5 change and sync
+        updates = set(mapping.keys()) &amp; set(rmap.keys())
+        for updated_id in updates:
+            local_id = mapping[updated_id].localResourceID
+            if rmap[updated_id].md5() != lmap[local_id].md5():
+                yield lmap[local_id].copyRemote(rmap[updated_id])
+                data_ids.add(local_id)
+
+        returnValue((data_ids, removed,))
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def syncAttachmentData(self, txn, local_id):
+        &quot;&quot;&quot;
+        Sync the attachment data for the home being migrated.
+        &quot;&quot;&quot;
+
+        remote_home = yield self._remoteHome(txn)
+        local_home = yield self._localHome(txn)
+        attachment = yield local_home.getAttachmentByID(local_id)
+        if attachment is None:
+            returnValue(None)
+
+        records = yield AttachmentMigrationRecord.querysimple(
+            txn, calendarHomeResourceID=self.homeId, localResourceID=local_id
+        )
+        if records:
+            # Read the data from the conduit
+            yield remote_home.readAttachmentData(records[0].remoteResourceID, attachment)
+            self.accounting(&quot;  Read attachment local-id={0.localResourceID}, remote-id={0.remoteResourceID}&quot;.format(records[0]))
+
+
+    @inlineCallbacks
+    def linkAttachments(self):
+        &quot;&quot;&quot;
+        Link attachments to the calendar objects they belong to.
+        &quot;&quot;&quot;
+
+        self.accounting(&quot;Starting: linkAttachments...&quot;)
+
+        # Get the map of links for the remote home
+        links = yield self.getAttachmentLinks()
+        self.accounting(&quot;  Linking {} attachments&quot;.format(len(links)))
+
+        # Get remote-&gt;local ID mappings
+        attachmentIDMap, objectIDMap = yield self.getAttachmentMappings()
+
+        # Batch setting links for the local home
+        len_links = len(links)
+        while links:
+            yield self.makeAttachmentLinks(links[:50], attachmentIDMap, objectIDMap)
+            links = links[50:]
+
+        self.accounting(&quot;Completed: linkAttachments.&quot;)
+
+        returnValue(len_links)
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def getAttachmentLinks(self, txn):
+        &quot;&quot;&quot;
+        Get the remote link information.
+        &quot;&quot;&quot;
+
+        # Get the map of links for the remote home
+        remote_home = yield self._remoteHome(txn)
+        links = yield remote_home.getAttachmentLinks()
+        returnValue(links)
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def getAttachmentMappings(self, txn):
+        &quot;&quot;&quot;
+        Get the remote link information.
+        &quot;&quot;&quot;
+
+        # Get migration mappings
+        records = yield AttachmentMigrationRecord.querysimple(
+            txn, calendarHomeResourceID=self.homeId
+        )
+        attachmentIDMap = dict([(record.remoteResourceID, record) for record in records])
+
+        records = yield CalendarObjectMigrationRecord.querysimple(
+            txn, calendarHomeResourceID=self.homeId
+        )
+        objectIDMap = dict([(record.remoteResourceID, record) for record in records])
+
+        returnValue((attachmentIDMap, objectIDMap,))
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def makeAttachmentLinks(self, txn, links, attachmentIDMap, objectIDMap):
+        &quot;&quot;&quot;
+        Map remote links to local links.
+        &quot;&quot;&quot;
+
+        for link in links:
+            # Remote link has an invalid txn at this point so replace that first
+            link._txn = txn
+
+            # Now re-map the attachment ID and calendar_object_id to the local ones
+            link._attachmentID = attachmentIDMap[link._attachmentID].localResourceID
+            link._calendarObjectID = objectIDMap[link._calendarObjectID].localResourceID
+
+            yield link.insert()
+
+
+    @inlineCallbacks
+    def delegateReconcile(self):
+        &quot;&quot;&quot;
+        Sync the delegate assignments from the remote home to the local home. We won't use
+        a fake directory UID locally.
+        &quot;&quot;&quot;
+
+        self.accounting(&quot;Starting: delegateReconcile...&quot;)
+
+        yield self.individualDelegateReconcile()
+        yield self.groupDelegateReconcile()
+        yield self.externalDelegateReconcile()
+
+        self.accounting(&quot;Completed: delegateReconcile.&quot;)
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def individualDelegateReconcile(self, txn):
+        &quot;&quot;&quot;
+        Sync the delegate assignments from the remote home to the local home. We won't use
+        a fake directory UID locally.
+        &quot;&quot;&quot;
+        remote_records = yield txn.dumpIndividualDelegatesExternal(self.record)
+        for record in remote_records:
+            yield record.insert(txn)
+
+        self.accounting(&quot;  Found {} individual delegates&quot;.format(len(remote_records)))
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def groupDelegateReconcile(self, txn):
+        &quot;&quot;&quot;
+        Sync the delegate assignments from the remote home to the local home. We won't use
+        a fake directory UID locally.
+        &quot;&quot;&quot;
+        remote_records = yield txn.dumpGroupDelegatesExternal(self.record)
+        for delegator, group in remote_records:
+            # We need to make sure the group exists locally first and map the groupID to the local one
+            local_group = yield txn.groupByUID(group.groupUID)
+            delegator.groupID = local_group.groupID
+            yield delegator.insert(txn)
+
+        self.accounting(&quot;  Found {} group delegates&quot;.format(len(remote_records)))
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def externalDelegateReconcile(self, txn):
+        &quot;&quot;&quot;
+        Sync the external delegate assignments from the remote home to the local home. We won't use
+        a fake directory UID locally.
+        &quot;&quot;&quot;
+        remote_records = yield txn.dumpExternalDelegatesExternal(self.record)
+        for record in remote_records:
+            yield record.insert(txn)
+
+        self.accounting(&quot;  Found {} external delegates&quot;.format(len(remote_records)))
+
+
+    @inlineCallbacks
+    def groupAttendeeReconcile(self):
+        &quot;&quot;&quot;
+        Sync the remote group attendee links to the local store.
+        &quot;&quot;&quot;
+
+        self.accounting(&quot;Starting: groupAttendeeReconcile...&quot;)
+
+        # Get remote data and local mapping information
+        remote_group_attendees, objectIDMap = yield self.groupAttendeeData()
+        self.accounting(&quot;  Found {} group attendees&quot;.format(len(remote_group_attendees)))
+
+        # Map each result to a local resource (in batches)
+        number_of_links = len(remote_group_attendees)
+        while remote_group_attendees:
+            yield self.groupAttendeeProcess(remote_group_attendees[:50], objectIDMap)
+            remote_group_attendees = remote_group_attendees[50:]
+
+        self.accounting(&quot;Completed: groupAttendeeReconcile.&quot;)
+
+        returnValue(number_of_links)
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def groupAttendeeData(self, txn):
+        &quot;&quot;&quot;
+        Sync the remote group attendee links to the local store.
+        &quot;&quot;&quot;
+        remote_home = yield self._remoteHome(txn)
+        remote_group_attendees = yield remote_home.getAllGroupAttendees()
+
+        # Get all remote-&gt;local object maps
+        records = yield CalendarObjectMigrationRecord.querysimple(
+            txn, calendarHomeResourceID=self.homeId
+        )
+        objectIDMap = dict([(record.remoteResourceID, record.localResourceID) for record in records])
+
+        returnValue((remote_group_attendees, objectIDMap,))
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def groupAttendeeProcess(self, txn, results, objectIDMap):
+        &quot;&quot;&quot;
+        Sync the remote group attendee links to the local store.
+        &quot;&quot;&quot;
+        # Map each result to a local resource
+        for groupAttendee, group in results:
+            local_group = yield txn.groupByUID(group.groupUID)
+            groupAttendee.groupID = local_group.groupID
+            try:
+                groupAttendee.resourceID = objectIDMap[groupAttendee.resourceID]
+            except KeyError:
+                continue
+            yield groupAttendee.insert(txn)
+
+
+    @inlineCallbacks
+    def notificationsReconcile(self):
+        &quot;&quot;&quot;
+        Sync all the existing L{NotificationObject} resources from the remote store.
+        &quot;&quot;&quot;
+
+        self.accounting(&quot;Starting: notificationsReconcile...&quot;)
+        records = yield self.notificationRecords()
+        self.accounting(&quot;  Found {} notifications&quot;.format(len(records)))
+
+        # Batch setting resources for the local home
+        len_records = len(records)
+        while records:
+            yield self.makeNotifications(records[:50])
+            records = records[50:]
+
+        self.accounting(&quot;Completed: notificationsReconcile.&quot;)
+
+        returnValue(len_records)
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def notificationRecords(self, txn):
+        &quot;&quot;&quot;
+        Get all the existing L{NotificationObjectRecord}'s from the remote store.
+        &quot;&quot;&quot;
+
+        notifications = yield self._remoteNotificationsHome(txn)
+        records = yield notifications.notificationObjectRecords()
+        for record in records:
+            # This needs to be reset when added to the local store
+            del record.resourceID
+
+            # Map the remote id to the local one.
+            record.notificationHomeResourceID = notifications.id()
+
+        returnValue(records)
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def makeNotifications(self, txn, records):
+        &quot;&quot;&quot;
+        Create L{NotificationObjectRecord} records in the local store.
+        &quot;&quot;&quot;
+
+        notifications = yield NotificationCollection.notificationsWithUID(txn, self.diruid, status=_HOME_STATUS_MIGRATING, create=True)
+        for record in records:
+            # Do this via the &quot;write&quot; API so that sync revisions are updated properly, rather than just
+            # inserting the records directly.
+            notification = yield notifications.writeNotificationObject(record.notificationUID, record.notificationType, record.notificationData)
+            self.accounting(&quot;  Added notification local-id={}.&quot;.format(notification.id()))
+
+
+    @inlineCallbacks
+    def sharedByCollectionsReconcile(self):
+        &quot;&quot;&quot;
+        Sync all the collections shared by the migrating user from the remote store. We will do this one calendar at a time since
+        there could be a large number of sharees per calendar.
+
+        Here is the logic we need: first assume we have three pods: A, B, C, and we are migrating a user from A-&gt;B. We start
+        with a set of shares (X -&gt; Y - where X is the sharer and Y the sharee) on pod A. We migrate the sharer to pod B. We
+        then need to have a set of bind records on pod B, and adjust the set on pod A. Note that no changes are required on pod C.
+
+        Original      |  Changes                     | Changes
+        Shares        |  on B                        | on A
+        --------------|------------------------------|---------------------
+        A -&gt; A        |  B -&gt; A (new)                | B -&gt; A (modify existing)
+        A -&gt; B        |  B -&gt; B (modify existing)    | (removed)
+        A -&gt; C        |  B -&gt; C (new)                | (removed)
+        &quot;&quot;&quot;
+
+        self.accounting(&quot;Starting: sharedByCollectionsReconcile...&quot;)
+        calendars = yield self.getSyncState()
+
+        len_records = 0
+        for calendar in calendars.values():
+            records, bindUID = yield self.sharedByCollectionRecords(calendar.remoteResourceID, calendar.localResourceID)
+            if not records:
+                continue
+            records = records.items()
+
+            self.accounting(&quot;  Found shared by calendar local-id={0.localResourceID}, remote-id={0.remoteResourceID} with {1} sharees&quot;.format(
+                calendar, len(records),
+            ))
+
+            # Batch setting resources for the local home
+            len_records += len(records)
+            while records:
+                yield self.makeSharedByCollections(records[:50], calendar.localResourceID)
+                records = records[50:]
+
+            # Get groups from remote pod
+            yield self.syncGroupSharees(calendar.remoteResourceID, calendar.localResourceID)
+
+            # Update the remote pod to switch over the shares
+            yield self.updatedRemoteSharedByCollections(calendar.remoteResourceID, bindUID)
+
+        self.accounting(&quot;Completed: sharedByCollectionsReconcile.&quot;)
+
+        returnValue(len_records)
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def sharedByCollectionRecords(self, txn, remote_id, local_id):
+        &quot;&quot;&quot;
+        Get all the existing L{CalendarBindRecord}'s from the remote store. Also make sure a
+        bindUID exists for the local calendar.
+        &quot;&quot;&quot;
+
+        remote_home = yield self._remoteHome(txn)
+        remote_calendar = yield remote_home.childWithID(remote_id)
+        records = yield remote_calendar.sharingBindRecords()
+
+        # Check bindUID
+        local_records = yield CalendarBindRecord.querysimple(
+            txn,
+            calendarHomeResourceID=self.homeId,
+            calendarResourceID=local_id,
+        )
+        if records and not local_records[0].bindUID:
+            yield local_records[0].update(bindUID=str(uuid4()))
+
+        returnValue((records, local_records[0].bindUID,))
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def makeSharedByCollections(self, txn, records, calendar_id):
+        &quot;&quot;&quot;
+        Create L{CalendarBindRecord} records in the local store.
+        &quot;&quot;&quot;
+
+        for shareeUID, record in records:
+            shareeHome = yield txn.calendarHomeWithUID(shareeUID, create=True)
+
+            # First look for an existing record that could be present if the migrating user had
+            # previously shared with this sharee as a cross-pod share
+            oldrecord = yield CalendarBindRecord.querysimple(
+                txn,
+                calendarHomeResourceID=shareeHome.id(),
+                calendarResourceName=record.calendarResourceName,
+            )
+
+            # FIXME: need to figure out sync-token and bind revision changes
+
+            if oldrecord:
+                # Point old record to the new local calendar being shared
+                yield oldrecord[0].update(
+                    calendarResourceID=calendar_id,
+                    bindRevision=0,
+                )
+                self.accounting(&quot;    Updating existing sharee {}&quot;.format(shareeHome.uid()))
+            else:
+                # Map the record resource ids and insert a new record
+                record.calendarHomeResourceID = shareeHome.id()
+                record.calendarResourceID = calendar_id
+                record.bindRevision = 0
+                yield record.insert(txn)
+                self.accounting(&quot;    Adding new sharee {}&quot;.format(shareeHome.uid()))
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def syncGroupSharees(self, txn, remote_id, local_id):
+        &quot;&quot;&quot;
+        Sync the group sharees for a remote share.
+        &quot;&quot;&quot;
+        remote_home = yield self._remoteHome(txn)
+        remote_calendar = yield remote_home.childWithID(remote_id)
+        results = yield remote_calendar.groupSharees()
+        groups = dict([(group.groupID, group.groupUID,) for group in results[&quot;groups&quot;]])
+        for share in results[&quot;sharees&quot;]:
+            local_group = yield txn.groupByUID(groups[share.groupID])
+            share.groupID = local_group.groupID
+            share.calendarID = local_id
+            yield share.insert(txn)
+            self.accounting(&quot;    Adding group sharee {}&quot;.format(local_group.groupUID))
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def updatedRemoteSharedByCollections(self, txn, remote_id, bindUID):
+        &quot;&quot;&quot;
+        Get all the existing L{CalendarBindRecord}'s from the remote store.
+        &quot;&quot;&quot;
+
+        remote_home = yield self._remoteHome(txn)
+        remote_calendar = yield remote_home.childWithID(remote_id)
+        records = yield remote_calendar.migrateBindRecords(bindUID)
+        self.accounting(&quot;    Updating remote records&quot;)
+        returnValue(records)
+
+
+    @inlineCallbacks
+    def sharedToCollectionsReconcile(self):
+        &quot;&quot;&quot;
+        Sync all the collections shared to the migrating user from the remote store.
+
+        Here is the logic we need: first assume we have three pods: A, B, C, and we are migrating a user from A-&gt;B. We start
+        with a set of shares (X -&gt; Y - where X is the sharer and Y the sharee) with sharee on pod A. We migrate the sharee to pod B. We
+        then need to have a set of bind records on pod B, and adjust the set on pod A. Note that no changes are required on pod C.
+
+        Original      |  Changes                     | Changes
+        Shares        |  on B                        | on A
+        --------------|------------------------------|---------------------
+        A -&gt; A        |  A -&gt; B (new)                | A -&gt; B (modify existing)
+        B -&gt; A        |  B -&gt; B (modify existing)    | (removed)
+        C -&gt; A        |  C -&gt; B (new)                | (removed)
+        &quot;&quot;&quot;
+
+        self.accounting(&quot;Starting: sharedToCollectionsReconcile...&quot;)
+
+        records = yield self.sharedToCollectionRecords()
+        records = records.items()
+        len_records = len(records)
+        self.accounting(&quot;  Found {} shared to collections&quot;.format(len_records))
+
+        while records:
+            yield self.makeSharedToCollections(records[:50])
+            records = records[50:]
+
+        self.accounting(&quot;Completed: sharedToCollectionsReconcile.&quot;)
+
+        returnValue(len_records)
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def sharedToCollectionRecords(self, txn):
+        &quot;&quot;&quot;
+        Get the names and sharer UIDs for remote shared calendars.
+        &quot;&quot;&quot;
+
+        # List of calendars from the remote side
+        home = yield self._remoteHome(txn)
+        if home is None:
+            returnValue(None)
+        results = yield home.sharedToBindRecords()
+        returnValue(results)
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def makeSharedToCollections(self, txn, records):
+        &quot;&quot;&quot;
+        Create L{CalendarBindRecord} records in the local store.
+        &quot;&quot;&quot;
+
+        for sharerUID, (shareeRecord, ownerRecord, metadataRecord) in records:
+            sharerHome = yield txn.calendarHomeWithUID(sharerUID, create=True)
+
+            # We need to figure out the right thing to do based on whether the sharer is local to this pod
+            # (the one where the migrated user will be hosted) vs located on another pod
+
+            if sharerHome.normal():
+                # First look for an existing record that must be present if the migrating user had
+                # previously been shared with by this sharee
+                oldrecord = yield CalendarBindRecord.querysimple(
+                    txn,
+                    calendarResourceName=shareeRecord.calendarResourceName,
+                )
+                if len(oldrecord) == 1:
+                    # Point old record to the new local calendar home
+                    yield oldrecord[0].update(
+                        calendarHomeResourceID=self.homeId,
+                    )
+                    self.accounting(&quot;  Updated existing local sharer record {}&quot;.format(sharerHome.uid()))
+                else:
+                    raise AssertionError(&quot;An existing share must be present&quot;)
+            else:
+                # We have an external user. That sharer may have already shared the calendar with some other user
+                # on this pod, in which case there is already a CALENDAR table entry for it, and we need the
+                # resource ID from that to use in the new CALENDAR_BIND record we create. If a pre-existing share
+                # is not present, then we have to create the CALENDAR table entry and associated pieces
+
+                remote_id = shareeRecord.calendarResourceID
+
+                # Look for pre-existing share with the same external ID
+                oldrecord = yield CalendarBindRecord.querysimple(
+                    txn,
+                    calendarHomeResourceID=sharerHome.id(),
+                    bindUID=ownerRecord.bindUID,
+                )
+                if oldrecord:
+                    # Map the record resource ids and insert a new record
+                    calendar_id = oldrecord.calendarResourceID
+                    log_op = &quot;Updated&quot;
+                else:
+                    sharerView = yield sharerHome.createCollectionForExternalShare(
+                        ownerRecord.calendarResourceName,
+                        ownerRecord.bindUID,
+                        metadataRecord.supportedComponents,
+                    )
+                    calendar_id = sharerView.id()
+                    log_op = &quot;Created&quot;
+
+                shareeRecord.calendarHomeResourceID = self.homeId
+                shareeRecord.calendarResourceID = calendar_id
+                shareeRecord.bindRevision = 0
+                yield shareeRecord.insert(txn)
+                self.accounting(&quot;  {} remote sharer record {}&quot;.format(log_op, sharerHome.uid()))
+
+                yield self.updatedRemoteSharedToCollection(remote_id, txn=txn)
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def updatedRemoteSharedToCollection(self, txn, remote_id):
+        &quot;&quot;&quot;
+        Get all the existing L{CalendarBindRecord}'s from the remote store.
+        &quot;&quot;&quot;
+
+        remote_home = yield self._remoteHome(txn)
+        remote_calendar = yield remote_home.childWithID(remote_id)
+        records = yield remote_calendar.migrateBindRecords(None)
+        self.accounting(&quot;    Updating remote records&quot;)
+        returnValue(records)
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationsync_metadatapy"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/common/datastore/podding/migration/sync_metadata.py (14520 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/sync_metadata.py        2015-03-06 21:20:35 UTC (rev 14520)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/sync_metadata.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,58 +0,0 @@
</span><del>-##
-# Copyright (c) 2015 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-from twext.enterprise.dal.record import Record, fromTable
-from txdav.common.datastore.sql_tables import schema
-from twext.enterprise.dal.syntax import Parameter, Delete
-from twisted.internet.defer import inlineCallbacks
-
-&quot;&quot;&quot;
-Module that manages store-level metadata objects used during the migration process.
-&quot;&quot;&quot;
-
-class CalendarMigrationRecord(Record, fromTable(schema.CALENDAR_MIGRATION)):
-    &quot;&quot;&quot;
-    @DynamicAttrs
-    L{Record} for L{schema.CALENDAR_MIGRATION}.
-    &quot;&quot;&quot;
-
-    @classmethod
-    @inlineCallbacks
-    def deleteremotes(cls, txn, homeid, remotes):
-        return Delete(
-            From=cls.table,
-            Where=(cls.calendarHomeResourceID == homeid).And(
-                cls.remoteResourceID.In(Parameter(&quot;remotes&quot;, len(remotes)))
-            ),
-        ).on(txn, remotes=remotes)
-
-
-
-class CalendarObjectMigrationRecord(Record, fromTable(schema.CALENDAR_OBJECT_MIGRATION)):
-    &quot;&quot;&quot;
-    @DynamicAttrs
-    L{Record} for L{schema.CALENDAR_OBJECT_MIGRATION}.
-    &quot;&quot;&quot;
-    pass
-
-
-
-class AttachmentMigrationRecord(Record, fromTable(schema.ATTACHMENT_MIGRATION)):
-    &quot;&quot;&quot;
-    @DynamicAttrs
-    L{Record} for L{schema.ATTACHMENT_MIGRATION}.
-    &quot;&quot;&quot;
-    pass
</del></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationsync_metadatapyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastorepoddingmigrationsync_metadatapy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/podding/migration/sync_metadata.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/sync_metadata.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/migration/sync_metadata.py                                (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/sync_metadata.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,58 @@
</span><ins>+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twext.enterprise.dal.record import Record, fromTable
+from txdav.common.datastore.sql_tables import schema
+from twext.enterprise.dal.syntax import Parameter, Delete
+from twisted.internet.defer import inlineCallbacks
+
+&quot;&quot;&quot;
+Module that manages store-level metadata objects used during the migration process.
+&quot;&quot;&quot;
+
+class CalendarMigrationRecord(Record, fromTable(schema.CALENDAR_MIGRATION)):
+    &quot;&quot;&quot;
+    @DynamicAttrs
+    L{Record} for L{schema.CALENDAR_MIGRATION}.
+    &quot;&quot;&quot;
+
+    @classmethod
+    @inlineCallbacks
+    def deleteremotes(cls, txn, homeid, remotes):
+        return Delete(
+            From=cls.table,
+            Where=(cls.calendarHomeResourceID == homeid).And(
+                cls.remoteResourceID.In(Parameter(&quot;remotes&quot;, len(remotes)))
+            ),
+        ).on(txn, remotes=remotes)
+
+
+
+class CalendarObjectMigrationRecord(Record, fromTable(schema.CALENDAR_OBJECT_MIGRATION)):
+    &quot;&quot;&quot;
+    @DynamicAttrs
+    L{Record} for L{schema.CALENDAR_OBJECT_MIGRATION}.
+    &quot;&quot;&quot;
+    pass
+
+
+
+class AttachmentMigrationRecord(Record, fromTable(schema.ATTACHMENT_MIGRATION)):
+    &quot;&quot;&quot;
+    @DynamicAttrs
+    L{Record} for L{schema.ATTACHMENT_MIGRATION}.
+    &quot;&quot;&quot;
+    pass
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtest__init__py"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/__init__.py (14520 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/__init__.py        2015-03-06 21:20:35 UTC (rev 14520)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/__init__.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,15 +0,0 @@
</span><del>-##
-# Copyright (c) 2015 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
</del></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtest__init__pyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastorepoddingmigrationtest__init__py"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/__init__.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/__init__.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/migration/test/__init__.py                                (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/__init__.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,15 @@
</span><ins>+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtestaccountsaugmentsxml"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/augments.xml (14520 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/accounts/augments.xml        2015-03-06 21:20:35 UTC (rev 14520)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/augments.xml        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,142 +0,0 @@
</span><del>-&lt;?xml version=&quot;1.0&quot; encoding=&quot;utf-8&quot;?&gt;
-
-&lt;!--
-Copyright (c) 2009-2015 Apple Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
- --&gt;
-
-&lt;!DOCTYPE augments SYSTEM &quot;../../../conf/auth/augments.dtd&quot;&gt;
-
-&lt;augments&gt;
-        &lt;record&gt;
-            &lt;uid&gt;user01&lt;/uid&gt;
-            &lt;server-id&gt;A&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-        &lt;record&gt;
-            &lt;uid&gt;user02&lt;/uid&gt;
-            &lt;server-id&gt;A&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-        &lt;record&gt;
-            &lt;uid&gt;user03&lt;/uid&gt;
-            &lt;server-id&gt;A&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-        &lt;record&gt;
-            &lt;uid&gt;user04&lt;/uid&gt;
-            &lt;server-id&gt;A&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-        &lt;record&gt;
-            &lt;uid&gt;user05&lt;/uid&gt;
-            &lt;server-id&gt;A&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-        &lt;record&gt;
-            &lt;uid&gt;user06&lt;/uid&gt;
-            &lt;server-id&gt;A&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-        &lt;record&gt;
-            &lt;uid&gt;user07&lt;/uid&gt;
-            &lt;server-id&gt;A&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-        &lt;record&gt;
-            &lt;uid&gt;user08&lt;/uid&gt;
-            &lt;server-id&gt;A&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-        &lt;record&gt;
-            &lt;uid&gt;user09&lt;/uid&gt;
-            &lt;server-id&gt;A&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-        &lt;record&gt;
-            &lt;uid&gt;user10&lt;/uid&gt;
-            &lt;server-id&gt;A&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-        &lt;record&gt;
-            &lt;uid&gt;puser01&lt;/uid&gt;
-            &lt;server-id&gt;B&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-        &lt;record&gt;
-            &lt;uid&gt;puser02&lt;/uid&gt;
-            &lt;server-id&gt;B&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-        &lt;record&gt;
-            &lt;uid&gt;puser03&lt;/uid&gt;
-            &lt;server-id&gt;B&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-        &lt;record&gt;
-            &lt;uid&gt;puser04&lt;/uid&gt;
-            &lt;server-id&gt;B&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-        &lt;record&gt;
-            &lt;uid&gt;puser05&lt;/uid&gt;
-            &lt;server-id&gt;B&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-        &lt;record&gt;
-            &lt;uid&gt;puser06&lt;/uid&gt;
-            &lt;server-id&gt;B&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-        &lt;record&gt;
-            &lt;uid&gt;puser07&lt;/uid&gt;
-            &lt;server-id&gt;B&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-        &lt;record&gt;
-            &lt;uid&gt;puser08&lt;/uid&gt;
-            &lt;server-id&gt;B&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-        &lt;record&gt;
-            &lt;uid&gt;puser09&lt;/uid&gt;
-            &lt;server-id&gt;B&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-        &lt;record&gt;
-            &lt;uid&gt;puser10&lt;/uid&gt;
-            &lt;server-id&gt;B&lt;/server-id&gt;
-            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
-            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
-        &lt;/record&gt;
-&lt;/augments&gt;
</del></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtestaccountsaugmentsxmlfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastorepoddingmigrationtestaccountsaugmentsxml"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/augments.xml (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/accounts/augments.xml) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/augments.xml                                (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/augments.xml        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,142 @@
</span><ins>+&lt;?xml version=&quot;1.0&quot; encoding=&quot;utf-8&quot;?&gt;
+
+&lt;!--
+Copyright (c) 2009-2015 Apple Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+ --&gt;
+
+&lt;!DOCTYPE augments SYSTEM &quot;../../../conf/auth/augments.dtd&quot;&gt;
+
+&lt;augments&gt;
+        &lt;record&gt;
+            &lt;uid&gt;user01&lt;/uid&gt;
+            &lt;server-id&gt;A&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+        &lt;record&gt;
+            &lt;uid&gt;user02&lt;/uid&gt;
+            &lt;server-id&gt;A&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+        &lt;record&gt;
+            &lt;uid&gt;user03&lt;/uid&gt;
+            &lt;server-id&gt;A&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+        &lt;record&gt;
+            &lt;uid&gt;user04&lt;/uid&gt;
+            &lt;server-id&gt;A&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+        &lt;record&gt;
+            &lt;uid&gt;user05&lt;/uid&gt;
+            &lt;server-id&gt;A&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+        &lt;record&gt;
+            &lt;uid&gt;user06&lt;/uid&gt;
+            &lt;server-id&gt;A&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+        &lt;record&gt;
+            &lt;uid&gt;user07&lt;/uid&gt;
+            &lt;server-id&gt;A&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+        &lt;record&gt;
+            &lt;uid&gt;user08&lt;/uid&gt;
+            &lt;server-id&gt;A&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+        &lt;record&gt;
+            &lt;uid&gt;user09&lt;/uid&gt;
+            &lt;server-id&gt;A&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+        &lt;record&gt;
+            &lt;uid&gt;user10&lt;/uid&gt;
+            &lt;server-id&gt;A&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+        &lt;record&gt;
+            &lt;uid&gt;puser01&lt;/uid&gt;
+            &lt;server-id&gt;B&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+        &lt;record&gt;
+            &lt;uid&gt;puser02&lt;/uid&gt;
+            &lt;server-id&gt;B&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+        &lt;record&gt;
+            &lt;uid&gt;puser03&lt;/uid&gt;
+            &lt;server-id&gt;B&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+        &lt;record&gt;
+            &lt;uid&gt;puser04&lt;/uid&gt;
+            &lt;server-id&gt;B&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+        &lt;record&gt;
+            &lt;uid&gt;puser05&lt;/uid&gt;
+            &lt;server-id&gt;B&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+        &lt;record&gt;
+            &lt;uid&gt;puser06&lt;/uid&gt;
+            &lt;server-id&gt;B&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+        &lt;record&gt;
+            &lt;uid&gt;puser07&lt;/uid&gt;
+            &lt;server-id&gt;B&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+        &lt;record&gt;
+            &lt;uid&gt;puser08&lt;/uid&gt;
+            &lt;server-id&gt;B&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+        &lt;record&gt;
+            &lt;uid&gt;puser09&lt;/uid&gt;
+            &lt;server-id&gt;B&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+        &lt;record&gt;
+            &lt;uid&gt;puser10&lt;/uid&gt;
+            &lt;server-id&gt;B&lt;/server-id&gt;
+            &lt;enable-calendar&gt;true&lt;/enable-calendar&gt;
+            &lt;enable-addressbook&gt;true&lt;/enable-addressbook&gt;
+        &lt;/record&gt;
+&lt;/augments&gt;
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtestaccountsgroupAccountsxml"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml (14520 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml        2015-03-06 21:20:35 UTC (rev 14520)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,211 +0,0 @@
</span><del>-&lt;?xml version=&quot;1.0&quot; encoding=&quot;utf-8&quot;?&gt;
-
-&lt;!--
-Copyright (c) 2006-2015 Apple Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
- --&gt;
-
-&lt;!DOCTYPE accounts SYSTEM &quot;accounts.dtd&quot;&gt;
-
-&lt;directory realm=&quot;Test Realm&quot;&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;short-name&gt;user01&lt;/short-name&gt;
-            &lt;uid&gt;user01&lt;/uid&gt;
-            &lt;guid&gt;10000000-0000-0000-0000-000000000001&lt;/guid&gt;
-            &lt;password&gt;user01&lt;/password&gt;
-            &lt;full-name&gt;User 01&lt;/full-name&gt;
-            &lt;email&gt;user01@example.com&lt;/email&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;short-name&gt;user02&lt;/short-name&gt;
-            &lt;uid&gt;user02&lt;/uid&gt;
-            &lt;guid&gt;10000000-0000-0000-0000-000000000002&lt;/guid&gt;
-            &lt;password&gt;user02&lt;/password&gt;
-            &lt;full-name&gt;User 02&lt;/full-name&gt;
-            &lt;email&gt;user02@example.com&lt;/email&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;short-name&gt;user03&lt;/short-name&gt;
-            &lt;uid&gt;user03&lt;/uid&gt;
-            &lt;guid&gt;10000000-0000-0000-0000-000000000003&lt;/guid&gt;
-            &lt;password&gt;user03&lt;/password&gt;
-            &lt;full-name&gt;User 03&lt;/full-name&gt;
-            &lt;email&gt;user03@example.com&lt;/email&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;short-name&gt;user04&lt;/short-name&gt;
-            &lt;uid&gt;user04&lt;/uid&gt;
-            &lt;guid&gt;10000000-0000-0000-0000-000000000004&lt;/guid&gt;
-            &lt;password&gt;user04&lt;/password&gt;
-            &lt;full-name&gt;User 04&lt;/full-name&gt;
-            &lt;email&gt;user04@example.com&lt;/email&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;short-name&gt;user05&lt;/short-name&gt;
-            &lt;uid&gt;user05&lt;/uid&gt;
-            &lt;guid&gt;10000000-0000-0000-0000-000000000005&lt;/guid&gt;
-            &lt;password&gt;user05&lt;/password&gt;
-            &lt;full-name&gt;User 05&lt;/full-name&gt;
-            &lt;email&gt;user05@example.com&lt;/email&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;short-name&gt;user06&lt;/short-name&gt;
-            &lt;uid&gt;user06&lt;/uid&gt;
-            &lt;guid&gt;10000000-0000-0000-0000-000000000006&lt;/guid&gt;
-            &lt;password&gt;user06&lt;/password&gt;
-            &lt;full-name&gt;User 06&lt;/full-name&gt;
-            &lt;email&gt;user06@example.com&lt;/email&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;short-name&gt;user07&lt;/short-name&gt;
-            &lt;uid&gt;user07&lt;/uid&gt;
-            &lt;guid&gt;10000000-0000-0000-0000-000000000007&lt;/guid&gt;
-            &lt;password&gt;user07&lt;/password&gt;
-            &lt;full-name&gt;User 07&lt;/full-name&gt;
-            &lt;email&gt;user07@example.com&lt;/email&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;short-name&gt;user08&lt;/short-name&gt;
-            &lt;uid&gt;user08&lt;/uid&gt;
-            &lt;guid&gt;10000000-0000-0000-0000-000000000008&lt;/guid&gt;
-            &lt;password&gt;user08&lt;/password&gt;
-            &lt;full-name&gt;User 08&lt;/full-name&gt;
-            &lt;email&gt;user08@example.com&lt;/email&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;short-name&gt;user09&lt;/short-name&gt;
-            &lt;uid&gt;user09&lt;/uid&gt;
-            &lt;guid&gt;10000000-0000-0000-0000-000000000009&lt;/guid&gt;
-            &lt;password&gt;user09&lt;/password&gt;
-            &lt;full-name&gt;User 09&lt;/full-name&gt;
-            &lt;email&gt;user09@example.com&lt;/email&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;short-name&gt;user10&lt;/short-name&gt;
-            &lt;uid&gt;user10&lt;/uid&gt;
-            &lt;guid&gt;10000000-0000-0000-0000-000000000010&lt;/guid&gt;
-            &lt;password&gt;user10&lt;/password&gt;
-            &lt;full-name&gt;User 10&lt;/full-name&gt;
-            &lt;email&gt;user10@example.com&lt;/email&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;group&quot;&gt;
-            &lt;short-name&gt;group01&lt;/short-name&gt;
-            &lt;uid&gt;group01&lt;/uid&gt;
-            &lt;guid&gt;20000000-0000-0000-0000-000000000001&lt;/guid&gt;
-            &lt;full-name&gt;Group 01&lt;/full-name&gt;
-            &lt;email&gt;group01@example.com&lt;/email&gt;
-            &lt;member-uid&gt;user01&lt;/member-uid&gt;
-            &lt;member-uid&gt;puser01&lt;/member-uid&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;group&quot;&gt;
-            &lt;short-name&gt;group02&lt;/short-name&gt;
-            &lt;uid&gt;group02&lt;/uid&gt;
-            &lt;guid&gt;20000000-0000-0000-0000-000000000002&lt;/guid&gt;
-            &lt;full-name&gt;Group 02&lt;/full-name&gt;
-            &lt;email&gt;group02@example.com&lt;/email&gt;
-            &lt;member-uid&gt;user06&lt;/member-uid&gt;
-            &lt;member-uid&gt;user07&lt;/member-uid&gt;
-            &lt;member-uid&gt;user08&lt;/member-uid&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;group&quot;&gt;
-            &lt;short-name&gt;group03&lt;/short-name&gt;
-            &lt;uid&gt;group03&lt;/uid&gt;
-            &lt;guid&gt;20000000-0000-0000-0000-000000000003&lt;/guid&gt;
-            &lt;full-name&gt;Group 03&lt;/full-name&gt;
-            &lt;email&gt;group03@example.com&lt;/email&gt;
-            &lt;member-uid&gt;user07&lt;/member-uid&gt;
-            &lt;member-uid&gt;user08&lt;/member-uid&gt;
-            &lt;member-uid&gt;user09&lt;/member-uid&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;group&quot;&gt;
-            &lt;short-name&gt;group04&lt;/short-name&gt;
-            &lt;uid&gt;group04&lt;/uid&gt;
-            &lt;guid&gt;20000000-0000-0000-0000-000000000004&lt;/guid&gt;
-            &lt;full-name&gt;Group 04&lt;/full-name&gt;
-            &lt;email&gt;group04@example.com&lt;/email&gt;
-            &lt;member-uid&gt;group02&lt;/member-uid&gt;
-            &lt;member-uid&gt;group03&lt;/member-uid&gt;
-            &lt;member-uid&gt;user10&lt;/member-uid&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;uid&gt;puser01&lt;/uid&gt;
-            &lt;short-name&gt;puser01&lt;/short-name&gt;
-            &lt;password&gt;puser01&lt;/password&gt;
-            &lt;full-name&gt;Puser 01&lt;/full-name&gt;
-            &lt;email&gt;puser01@example.com&lt;/email&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;uid&gt;puser02&lt;/uid&gt;
-            &lt;short-name&gt;puser02&lt;/short-name&gt;
-            &lt;password&gt;puser02&lt;/password&gt;
-            &lt;full-name&gt;Puser 02&lt;/full-name&gt;
-            &lt;email&gt;puser02@example.com&lt;/email&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;uid&gt;puser03&lt;/uid&gt;
-            &lt;short-name&gt;puser03&lt;/short-name&gt;
-            &lt;password&gt;puser03&lt;/password&gt;
-            &lt;full-name&gt;Puser 03&lt;/full-name&gt;
-            &lt;email&gt;puser03@example.com&lt;/email&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;uid&gt;puser04&lt;/uid&gt;
-            &lt;short-name&gt;puser04&lt;/short-name&gt;
-            &lt;password&gt;puser04&lt;/password&gt;
-            &lt;full-name&gt;Puser 04&lt;/full-name&gt;
-            &lt;email&gt;puser04@example.com&lt;/email&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;uid&gt;puser05&lt;/uid&gt;
-            &lt;short-name&gt;puser05&lt;/short-name&gt;
-            &lt;password&gt;puser05&lt;/password&gt;
-            &lt;full-name&gt;Puser 05&lt;/full-name&gt;
-            &lt;email&gt;puser05@example.com&lt;/email&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;uid&gt;puser06&lt;/uid&gt;
-            &lt;short-name&gt;puser06&lt;/short-name&gt;
-            &lt;password&gt;puser06&lt;/password&gt;
-            &lt;full-name&gt;Puser 06&lt;/full-name&gt;
-            &lt;email&gt;puser06@example.com&lt;/email&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;uid&gt;puser07&lt;/uid&gt;
-            &lt;short-name&gt;puser07&lt;/short-name&gt;
-            &lt;password&gt;puser07&lt;/password&gt;
-            &lt;full-name&gt;Puser 07&lt;/full-name&gt;
-            &lt;email&gt;puser07@example.com&lt;/email&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;uid&gt;puser08&lt;/uid&gt;
-            &lt;short-name&gt;puser08&lt;/short-name&gt;
-            &lt;password&gt;puser08&lt;/password&gt;
-            &lt;full-name&gt;Puser 08&lt;/full-name&gt;
-            &lt;email&gt;puser08@example.com&lt;/email&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;uid&gt;puser09&lt;/uid&gt;
-            &lt;short-name&gt;puser09&lt;/short-name&gt;
-            &lt;password&gt;puser09&lt;/password&gt;
-            &lt;full-name&gt;Puser 09&lt;/full-name&gt;
-            &lt;email&gt;puser09@example.com&lt;/email&gt;
-        &lt;/record&gt;
-        &lt;record type=&quot;user&quot;&gt;
-            &lt;uid&gt;puser10&lt;/uid&gt;
-            &lt;short-name&gt;puser10&lt;/short-name&gt;
-            &lt;password&gt;puser10&lt;/password&gt;
-            &lt;full-name&gt;Puser 10&lt;/full-name&gt;
-            &lt;email&gt;puser10@example.com&lt;/email&gt;
-        &lt;/record&gt;
-&lt;/directory&gt;
</del></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtestaccountsgroupAccountsxmlfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastorepoddingmigrationtestaccountsgroupAccountsxml"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml                                (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,211 @@
</span><ins>+&lt;?xml version=&quot;1.0&quot; encoding=&quot;utf-8&quot;?&gt;
+
+&lt;!--
+Copyright (c) 2006-2015 Apple Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+ --&gt;
+
+&lt;!DOCTYPE accounts SYSTEM &quot;accounts.dtd&quot;&gt;
+
+&lt;directory realm=&quot;Test Realm&quot;&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;short-name&gt;user01&lt;/short-name&gt;
+            &lt;uid&gt;user01&lt;/uid&gt;
+            &lt;guid&gt;10000000-0000-0000-0000-000000000001&lt;/guid&gt;
+            &lt;password&gt;user01&lt;/password&gt;
+            &lt;full-name&gt;User 01&lt;/full-name&gt;
+            &lt;email&gt;user01@example.com&lt;/email&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;short-name&gt;user02&lt;/short-name&gt;
+            &lt;uid&gt;user02&lt;/uid&gt;
+            &lt;guid&gt;10000000-0000-0000-0000-000000000002&lt;/guid&gt;
+            &lt;password&gt;user02&lt;/password&gt;
+            &lt;full-name&gt;User 02&lt;/full-name&gt;
+            &lt;email&gt;user02@example.com&lt;/email&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;short-name&gt;user03&lt;/short-name&gt;
+            &lt;uid&gt;user03&lt;/uid&gt;
+            &lt;guid&gt;10000000-0000-0000-0000-000000000003&lt;/guid&gt;
+            &lt;password&gt;user03&lt;/password&gt;
+            &lt;full-name&gt;User 03&lt;/full-name&gt;
+            &lt;email&gt;user03@example.com&lt;/email&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;short-name&gt;user04&lt;/short-name&gt;
+            &lt;uid&gt;user04&lt;/uid&gt;
+            &lt;guid&gt;10000000-0000-0000-0000-000000000004&lt;/guid&gt;
+            &lt;password&gt;user04&lt;/password&gt;
+            &lt;full-name&gt;User 04&lt;/full-name&gt;
+            &lt;email&gt;user04@example.com&lt;/email&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;short-name&gt;user05&lt;/short-name&gt;
+            &lt;uid&gt;user05&lt;/uid&gt;
+            &lt;guid&gt;10000000-0000-0000-0000-000000000005&lt;/guid&gt;
+            &lt;password&gt;user05&lt;/password&gt;
+            &lt;full-name&gt;User 05&lt;/full-name&gt;
+            &lt;email&gt;user05@example.com&lt;/email&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;short-name&gt;user06&lt;/short-name&gt;
+            &lt;uid&gt;user06&lt;/uid&gt;
+            &lt;guid&gt;10000000-0000-0000-0000-000000000006&lt;/guid&gt;
+            &lt;password&gt;user06&lt;/password&gt;
+            &lt;full-name&gt;User 06&lt;/full-name&gt;
+            &lt;email&gt;user06@example.com&lt;/email&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;short-name&gt;user07&lt;/short-name&gt;
+            &lt;uid&gt;user07&lt;/uid&gt;
+            &lt;guid&gt;10000000-0000-0000-0000-000000000007&lt;/guid&gt;
+            &lt;password&gt;user07&lt;/password&gt;
+            &lt;full-name&gt;User 07&lt;/full-name&gt;
+            &lt;email&gt;user07@example.com&lt;/email&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;short-name&gt;user08&lt;/short-name&gt;
+            &lt;uid&gt;user08&lt;/uid&gt;
+            &lt;guid&gt;10000000-0000-0000-0000-000000000008&lt;/guid&gt;
+            &lt;password&gt;user08&lt;/password&gt;
+            &lt;full-name&gt;User 08&lt;/full-name&gt;
+            &lt;email&gt;user08@example.com&lt;/email&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;short-name&gt;user09&lt;/short-name&gt;
+            &lt;uid&gt;user09&lt;/uid&gt;
+            &lt;guid&gt;10000000-0000-0000-0000-000000000009&lt;/guid&gt;
+            &lt;password&gt;user09&lt;/password&gt;
+            &lt;full-name&gt;User 09&lt;/full-name&gt;
+            &lt;email&gt;user09@example.com&lt;/email&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;short-name&gt;user10&lt;/short-name&gt;
+            &lt;uid&gt;user10&lt;/uid&gt;
+            &lt;guid&gt;10000000-0000-0000-0000-000000000010&lt;/guid&gt;
+            &lt;password&gt;user10&lt;/password&gt;
+            &lt;full-name&gt;User 10&lt;/full-name&gt;
+            &lt;email&gt;user10@example.com&lt;/email&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;group&quot;&gt;
+            &lt;short-name&gt;group01&lt;/short-name&gt;
+            &lt;uid&gt;group01&lt;/uid&gt;
+            &lt;guid&gt;20000000-0000-0000-0000-000000000001&lt;/guid&gt;
+            &lt;full-name&gt;Group 01&lt;/full-name&gt;
+            &lt;email&gt;group01@example.com&lt;/email&gt;
+            &lt;member-uid&gt;user01&lt;/member-uid&gt;
+            &lt;member-uid&gt;puser01&lt;/member-uid&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;group&quot;&gt;
+            &lt;short-name&gt;group02&lt;/short-name&gt;
+            &lt;uid&gt;group02&lt;/uid&gt;
+            &lt;guid&gt;20000000-0000-0000-0000-000000000002&lt;/guid&gt;
+            &lt;full-name&gt;Group 02&lt;/full-name&gt;
+            &lt;email&gt;group02@example.com&lt;/email&gt;
+            &lt;member-uid&gt;user06&lt;/member-uid&gt;
+            &lt;member-uid&gt;user07&lt;/member-uid&gt;
+            &lt;member-uid&gt;user08&lt;/member-uid&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;group&quot;&gt;
+            &lt;short-name&gt;group03&lt;/short-name&gt;
+            &lt;uid&gt;group03&lt;/uid&gt;
+            &lt;guid&gt;20000000-0000-0000-0000-000000000003&lt;/guid&gt;
+            &lt;full-name&gt;Group 03&lt;/full-name&gt;
+            &lt;email&gt;group03@example.com&lt;/email&gt;
+            &lt;member-uid&gt;user07&lt;/member-uid&gt;
+            &lt;member-uid&gt;user08&lt;/member-uid&gt;
+            &lt;member-uid&gt;user09&lt;/member-uid&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;group&quot;&gt;
+            &lt;short-name&gt;group04&lt;/short-name&gt;
+            &lt;uid&gt;group04&lt;/uid&gt;
+            &lt;guid&gt;20000000-0000-0000-0000-000000000004&lt;/guid&gt;
+            &lt;full-name&gt;Group 04&lt;/full-name&gt;
+            &lt;email&gt;group04@example.com&lt;/email&gt;
+            &lt;member-uid&gt;group02&lt;/member-uid&gt;
+            &lt;member-uid&gt;group03&lt;/member-uid&gt;
+            &lt;member-uid&gt;user10&lt;/member-uid&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;uid&gt;puser01&lt;/uid&gt;
+            &lt;short-name&gt;puser01&lt;/short-name&gt;
+            &lt;password&gt;puser01&lt;/password&gt;
+            &lt;full-name&gt;Puser 01&lt;/full-name&gt;
+            &lt;email&gt;puser01@example.com&lt;/email&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;uid&gt;puser02&lt;/uid&gt;
+            &lt;short-name&gt;puser02&lt;/short-name&gt;
+            &lt;password&gt;puser02&lt;/password&gt;
+            &lt;full-name&gt;Puser 02&lt;/full-name&gt;
+            &lt;email&gt;puser02@example.com&lt;/email&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;uid&gt;puser03&lt;/uid&gt;
+            &lt;short-name&gt;puser03&lt;/short-name&gt;
+            &lt;password&gt;puser03&lt;/password&gt;
+            &lt;full-name&gt;Puser 03&lt;/full-name&gt;
+            &lt;email&gt;puser03@example.com&lt;/email&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;uid&gt;puser04&lt;/uid&gt;
+            &lt;short-name&gt;puser04&lt;/short-name&gt;
+            &lt;password&gt;puser04&lt;/password&gt;
+            &lt;full-name&gt;Puser 04&lt;/full-name&gt;
+            &lt;email&gt;puser04@example.com&lt;/email&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;uid&gt;puser05&lt;/uid&gt;
+            &lt;short-name&gt;puser05&lt;/short-name&gt;
+            &lt;password&gt;puser05&lt;/password&gt;
+            &lt;full-name&gt;Puser 05&lt;/full-name&gt;
+            &lt;email&gt;puser05@example.com&lt;/email&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;uid&gt;puser06&lt;/uid&gt;
+            &lt;short-name&gt;puser06&lt;/short-name&gt;
+            &lt;password&gt;puser06&lt;/password&gt;
+            &lt;full-name&gt;Puser 06&lt;/full-name&gt;
+            &lt;email&gt;puser06@example.com&lt;/email&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;uid&gt;puser07&lt;/uid&gt;
+            &lt;short-name&gt;puser07&lt;/short-name&gt;
+            &lt;password&gt;puser07&lt;/password&gt;
+            &lt;full-name&gt;Puser 07&lt;/full-name&gt;
+            &lt;email&gt;puser07@example.com&lt;/email&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;uid&gt;puser08&lt;/uid&gt;
+            &lt;short-name&gt;puser08&lt;/short-name&gt;
+            &lt;password&gt;puser08&lt;/password&gt;
+            &lt;full-name&gt;Puser 08&lt;/full-name&gt;
+            &lt;email&gt;puser08@example.com&lt;/email&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;uid&gt;puser09&lt;/uid&gt;
+            &lt;short-name&gt;puser09&lt;/short-name&gt;
+            &lt;password&gt;puser09&lt;/password&gt;
+            &lt;full-name&gt;Puser 09&lt;/full-name&gt;
+            &lt;email&gt;puser09@example.com&lt;/email&gt;
+        &lt;/record&gt;
+        &lt;record type=&quot;user&quot;&gt;
+            &lt;uid&gt;puser10&lt;/uid&gt;
+            &lt;short-name&gt;puser10&lt;/short-name&gt;
+            &lt;password&gt;puser10&lt;/password&gt;
+            &lt;full-name&gt;Puser 10&lt;/full-name&gt;
+            &lt;email&gt;puser10@example.com&lt;/email&gt;
+        &lt;/record&gt;
+&lt;/directory&gt;
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtesttest_home_syncpy"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_home_sync.py (14520 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/test_home_sync.py        2015-03-06 21:20:35 UTC (rev 14520)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_home_sync.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,1307 +0,0 @@
</span><del>-##
-# Copyright (c) 2005-2015 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-from pycalendar.datetime import DateTime
-from twext.enterprise.dal.syntax import Select
-from twext.enterprise.jobqueue import JobItem
-from twisted.internet import reactor
-from twisted.internet.defer import inlineCallbacks, returnValue
-from twisted.python.filepath import FilePath
-from twistedcaldav.config import config
-from twistedcaldav.ical import Component, normalize_iCalStr
-from txdav.caldav.datastore.sql import ManagedAttachment
-from txdav.caldav.datastore.sql_directory import GroupShareeRecord
-from txdav.common.datastore.podding.migration.home_sync import CrossPodHomeSync
-from txdav.common.datastore.podding.migration.sync_metadata import CalendarMigrationRecord, \
-    AttachmentMigrationRecord
-from txdav.common.datastore.podding.test.util import MultiStoreConduitTest
-from txdav.common.datastore.sql_directory import DelegateRecord, \
-    ExternalDelegateGroupsRecord, DelegateGroupsRecord, GroupsRecord
-from txdav.common.datastore.sql_notification import NotificationCollection
-from txdav.common.datastore.sql_tables import schema, _HOME_STATUS_EXTERNAL, \
-    _BIND_MODE_READ, _HOME_STATUS_MIGRATING, _HOME_STATUS_NORMAL, \
-    _HOME_STATUS_DISABLED
-from txdav.common.datastore.test.util import populateCalendarsFrom
-from txdav.who.delegates import Delegates
-from txweb2.http_headers import MimeType
-from txweb2.stream import MemoryStream
-from uuid import uuid4
-import json
-
-
-class TestCrossPodHomeSync(MultiStoreConduitTest):
-    &quot;&quot;&quot;
-    Test that L{CrossPodHomeSync} works.
-    &quot;&quot;&quot;
-
-    nowYear = {&quot;now&quot;: DateTime.getToday().getYear()}
-
-    caldata1 = &quot;&quot;&quot;BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid1
-DTSTART:{now:04d}0102T140000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-RRULE:FREQ=WEEKLY
-SUMMARY:instance
-END:VEVENT
-END:VCALENDAR
-&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**nowYear)
-
-    caldata1_changed = &quot;&quot;&quot;BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid1
-DTSTART:{now:04d}0102T150000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-RRULE:FREQ=WEEKLY
-SUMMARY:instance changed
-END:VEVENT
-END:VCALENDAR
-&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**nowYear)
-
-    caldata2 = &quot;&quot;&quot;BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid2
-DTSTART:{now:04d}0102T160000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-RRULE:FREQ=WEEKLY
-SUMMARY:instance
-END:VEVENT
-END:VCALENDAR
-&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**nowYear)
-
-    caldata3 = &quot;&quot;&quot;BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid3
-DTSTART:{now:04d}0102T160000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-RRULE:FREQ=WEEKLY
-SUMMARY:instance
-END:VEVENT
-END:VCALENDAR
-&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**nowYear)
-
-    caldata4 = &quot;&quot;&quot;BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid4
-DTSTART:{now:04d}0102T180000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-RRULE:FREQ=DAILY
-SUMMARY:instance
-END:VEVENT
-END:VCALENDAR
-&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**nowYear)
-
-
-    @inlineCallbacks
-    def test_remote_home(self):
-        &quot;&quot;&quot;
-        Test that a remote home can be accessed.
-        &quot;&quot;&quot;
-
-        home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        self.assertTrue(home01 is not None)
-        yield self.commitTransaction(0)
-
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
-        yield syncer.loadRecord()
-        home = yield syncer._remoteHome(self.theTransactionUnderTest(1))
-        self.assertTrue(home is not None)
-        self.assertEqual(home.id(), home01.id())
-        yield self.commitTransaction(1)
-
-
-    @inlineCallbacks
-    def test_prepare_home(self):
-        &quot;&quot;&quot;
-        Test that L{prepareCalendarHome} creates a home.
-        &quot;&quot;&quot;
-
-        # No home present
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
-        home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        self.assertTrue(home is None)
-        yield self.commitTransaction(1)
-
-        yield syncer.prepareCalendarHome()
-
-        # Home is present
-        home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        self.assertTrue(home is not None)
-        children = yield home.listChildren()
-        self.assertEqual(len(children), 0)
-        yield self.commitTransaction(1)
-
-
-    @inlineCallbacks
-    def test_prepare_home_external_txn(self):
-        &quot;&quot;&quot;
-        Test that L{prepareCalendarHome} creates a home.
-        &quot;&quot;&quot;
-
-        # No home present
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
-        home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        self.assertTrue(home is None)
-        yield self.commitTransaction(1)
-
-        yield syncer.prepareCalendarHome(txn=self.theTransactionUnderTest(1))
-        yield self.commitTransaction(1)
-
-        # Home is present
-        home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        self.assertTrue(home is not None)
-        children = yield home.listChildren()
-        self.assertEqual(len(children), 0)
-        yield self.commitTransaction(1)
-
-
-    @inlineCallbacks
-    def test_home_metadata(self):
-        &quot;&quot;&quot;
-        Test that L{syncCalendarHomeMetaData} sync home metadata correctly.
-        &quot;&quot;&quot;
-
-        alarm_event_timed = &quot;&quot;&quot;BEGIN:VALARM
-ACTION:DISPLAY
-DESCRIPTION:alarm_event_timed
-TRIGGER:-PT10M
-END:VALARM
-&quot;&quot;&quot;
-        alarm_event_allday = &quot;&quot;&quot;BEGIN:VALARM
-ACTION:DISPLAY
-DESCRIPTION:alarm_event_allday
-TRIGGER:-PT10M
-END:VALARM
-&quot;&quot;&quot;
-        alarm_todo_timed = &quot;&quot;&quot;BEGIN:VALARM
-ACTION:DISPLAY
-DESCRIPTION:alarm_todo_timed
-TRIGGER:-PT10M
-END:VALARM
-&quot;&quot;&quot;
-        alarm_todo_allday = &quot;&quot;&quot;BEGIN:VALARM
-ACTION:DISPLAY
-DESCRIPTION:alarm_todo_allday
-TRIGGER:-PT10M
-END:VALARM
-&quot;&quot;&quot;
-        availability = &quot;&quot;&quot;BEGIN:VCALENDAR
-VERSION:2.0
-PRODID:-//Example Inc.//Example Calendar//EN
-BEGIN:VAVAILABILITY
-UID:20061005T133225Z-00001-availability@example.com
-DTSTART:20060101T000000Z
-DTEND:20060108T000000Z
-DTSTAMP:20061005T133225Z
-ORGANIZER:mailto:bernard@example.com
-BEGIN:AVAILABLE
-UID:20061005T133225Z-00001-A-availability@example.com
-DTSTART:20060102T090000Z
-DTEND:20060102T120000Z
-DTSTAMP:20061005T133225Z
-RRULE:FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR
-SUMMARY:Weekdays from 9:00 to 12:00
-END:AVAILABLE
-END:VAVAILABILITY
-END:VCALENDAR
-&quot;&quot;&quot;
-
-        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        events0 = yield home0.createChildWithName(&quot;events&quot;)
-        yield home0.setDefaultCalendar(events0, &quot;VEVENT&quot;)
-        yield home0.setDefaultAlarm(alarm_event_timed, True, True)
-        yield home0.setDefaultAlarm(alarm_event_allday, True, False)
-        yield home0.setDefaultAlarm(alarm_todo_timed, False, True)
-        yield home0.setDefaultAlarm(alarm_todo_allday, False, False)
-        yield home0.setAvailability(Component.fromString(availability))
-        yield self.commitTransaction(0)
-
-        # Trigger sync
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
-        yield syncer.sync()
-
-        # Home is present with correct metadata
-        home1 = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        self.assertTrue(home1 is not None)
-        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
-        events1 = yield home1.childWithName(&quot;events&quot;)
-        tasks1 = yield home1.childWithName(&quot;tasks&quot;)
-        self.assertFalse(home1.isDefaultCalendar(calendar1))
-        self.assertTrue(home1.isDefaultCalendar(events1))
-        self.assertTrue(home1.isDefaultCalendar(tasks1))
-        self.assertEqual(home1.getDefaultAlarm(True, True), alarm_event_timed)
-        self.assertEqual(home1.getDefaultAlarm(True, False), alarm_event_allday)
-        self.assertEqual(home1.getDefaultAlarm(False, True), alarm_todo_timed)
-        self.assertEqual(home1.getDefaultAlarm(False, False), alarm_todo_allday)
-        self.assertEqual(normalize_iCalStr(home1.getAvailability()), normalize_iCalStr(availability))
-        yield self.commitTransaction(1)
-
-        # Make some changes
-        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        calendar0 = yield home0.childWithName(&quot;calendar&quot;)
-        yield home0.setDefaultCalendar(calendar0, &quot;VEVENT&quot;)
-        yield home0.setDefaultAlarm(None, True, True)
-        yield home0.setDefaultAlarm(None, False, True)
-        yield self.commitTransaction(0)
-
-        # Trigger sync again
-        yield syncer.sync()
-
-        # Home is present with correct metadata
-        home1 = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        self.assertTrue(home1 is not None)
-        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
-        events1 = yield home1.childWithName(&quot;events&quot;)
-        tasks1 = yield home1.childWithName(&quot;tasks&quot;)
-        self.assertTrue(home1.isDefaultCalendar(calendar1))
-        self.assertFalse(home1.isDefaultCalendar(events1))
-        self.assertTrue(home1.isDefaultCalendar(tasks1))
-        self.assertEqual(home1.getDefaultAlarm(True, True), None)
-        self.assertEqual(home1.getDefaultAlarm(True, False), alarm_event_allday)
-        self.assertEqual(home1.getDefaultAlarm(False, True), None)
-        self.assertEqual(home1.getDefaultAlarm(False, False), alarm_todo_allday)
-        self.assertEqual(normalize_iCalStr(home1.getAvailability()), normalize_iCalStr(availability))
-        yield self.commitTransaction(1)
-
-
-    @inlineCallbacks
-    def test_get_calendar_sync_list(self):
-        &quot;&quot;&quot;
-        Test that L{getCalendarSyncList} returns the correct results.
-        &quot;&quot;&quot;
-
-        yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        yield self.commitTransaction(0)
-        home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;)
-        self.assertTrue(home01 is not None)
-        calendars01 = yield home01.loadChildren()
-        results01 = {}
-        for calendar in calendars01:
-            if calendar.owned():
-                sync_token = yield calendar.syncToken()
-                results01[calendar.id()] = CalendarMigrationRecord.make(
-                    calendarHomeResourceID=home01.id(),
-                    remoteResourceID=calendar.id(),
-                    localResourceID=0,
-                    lastSyncToken=sync_token,
-                )
-
-        yield self.commitTransaction(0)
-
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
-        yield syncer.loadRecord()
-        results = yield syncer.getCalendarSyncList()
-        self.assertEqual(results, results01)
-
-
-    @inlineCallbacks
-    def test_sync_calendar_initial_empty(self):
-        &quot;&quot;&quot;
-        Test that L{syncCalendar} syncs an initially non-existent local calendar with
-        an empty remote calendar.
-        &quot;&quot;&quot;
-
-        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        calendar0 = yield home0.childWithName(&quot;calendar&quot;)
-        remote_id = calendar0.id()
-        remote_sync_token = yield calendar0.syncToken()
-        yield self.commitTransaction(0)
-
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
-        yield syncer.loadRecord()
-        yield syncer.prepareCalendarHome()
-
-        # No local calendar exists yet
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        children = yield home1.listChildren()
-        self.assertEqual(len(children), 0)
-        yield self.commitTransaction(1)
-
-        # Trigger sync of the one calendar
-        local_sync_state = {}
-        remote_sync_state = {remote_id: CalendarMigrationRecord.make(
-            calendarHomeResourceID=home0.id(),
-            remoteResourceID=remote_id,
-            localResourceID=0,
-            lastSyncToken=remote_sync_token,
-        )}
-        yield syncer.syncCalendar(
-            remote_id,
-            local_sync_state,
-            remote_sync_state,
-        )
-        self.assertEqual(len(local_sync_state), 1)
-        self.assertEqual(local_sync_state[remote_id].lastSyncToken, remote_sync_state[remote_id].lastSyncToken)
-
-        # Local calendar exists
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
-        self.assertTrue(calendar1 is not None)
-        yield self.commitTransaction(1)
-
-
-    @inlineCallbacks
-    def test_sync_calendar_initial_with_data(self):
-        &quot;&quot;&quot;
-        Test that L{syncCalendar} syncs an initially non-existent local calendar with
-        a remote calendar containing data. Also check a change to one event is then
-        sync'd the second time.
-        &quot;&quot;&quot;
-
-        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        calendar0 = yield home0.childWithName(&quot;calendar&quot;)
-        o1 = yield calendar0.createCalendarObjectWithName(&quot;1.ics&quot;, Component.fromString(self.caldata1))
-        o2 = yield calendar0.createCalendarObjectWithName(&quot;2.ics&quot;, Component.fromString(self.caldata2))
-        o3 = yield calendar0.createCalendarObjectWithName(&quot;3.ics&quot;, Component.fromString(self.caldata3))
-        remote_id = calendar0.id()
-        mapping0 = dict([(o.name(), o.id()) for o in (o1, o2, o3)])
-        yield self.commitTransaction(0)
-
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
-        yield syncer.loadRecord()
-        yield syncer.prepareCalendarHome()
-
-        # No local calendar exists yet
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
-        self.assertTrue(calendar1 is None)
-        yield self.commitTransaction(1)
-
-        # Trigger sync of the one calendar
-        local_sync_state = {}
-        remote_sync_state = yield syncer.getCalendarSyncList()
-        yield syncer.syncCalendar(
-            remote_id,
-            local_sync_state,
-            remote_sync_state,
-        )
-        self.assertEqual(len(local_sync_state), 1)
-        self.assertEqual(local_sync_state[remote_id].lastSyncToken, remote_sync_state[remote_id].lastSyncToken)
-
-        @inlineCallbacks
-        def _checkCalendarObjectMigrationState(home, mapping1):
-            com = schema.CALENDAR_OBJECT_MIGRATION
-            mappings = yield Select(
-                columns=[com.REMOTE_RESOURCE_ID, com.LOCAL_RESOURCE_ID],
-                From=com,
-                Where=(com.CALENDAR_HOME_RESOURCE_ID == home.id())
-            ).on(self.theTransactionUnderTest(1))
-            expected_mappings = dict([(mapping0[name], mapping1[name]) for name in mapping0.keys()])
-            self.assertEqual(dict(mappings), expected_mappings)
-
-
-        # Local calendar exists
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
-        self.assertTrue(calendar1 is not None)
-        children = yield calendar1.objectResources()
-        self.assertEqual(set([child.name() for child in children]), set((&quot;1.ics&quot;, &quot;2.ics&quot;, &quot;3.ics&quot;,)))
-        mapping1 = dict([(o.name(), o.id()) for o in children])
-        yield _checkCalendarObjectMigrationState(home1, mapping1)
-        yield self.commitTransaction(1)
-
-        # Change one resource
-        object0 = yield self.calendarObjectUnderTest(
-            txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;1.ics&quot;
-        )
-        yield object0.setComponent(Component.fromString(self.caldata1_changed))
-        yield self.commitTransaction(0)
-
-        remote_sync_state = yield syncer.getCalendarSyncList()
-        yield syncer.syncCalendar(
-            remote_id,
-            local_sync_state,
-            remote_sync_state,
-        )
-
-        object1 = yield self.calendarObjectUnderTest(
-            txn=self.theTransactionUnderTest(1), home=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING, calendar_name=&quot;calendar&quot;, name=&quot;1.ics&quot;
-        )
-        caldata = yield object1.component()
-        self.assertEqual(normalize_iCalStr(caldata), normalize_iCalStr(self.caldata1_changed))
-        yield self.commitTransaction(1)
-
-        # Remove one resource
-        object0 = yield self.calendarObjectUnderTest(
-            txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;2.ics&quot;
-        )
-        yield object0.remove()
-        del mapping0[&quot;2.ics&quot;]
-        yield self.commitTransaction(0)
-
-        remote_sync_state = yield syncer.getCalendarSyncList()
-        yield syncer.syncCalendar(
-            remote_id,
-            local_sync_state,
-            remote_sync_state,
-        )
-
-        calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING, name=&quot;calendar&quot;)
-        children = yield calendar1.objectResources()
-        self.assertEqual(set([child.name() for child in children]), set((&quot;1.ics&quot;, &quot;3.ics&quot;,)))
-        mapping1 = dict([(o.name(), o.id()) for o in children])
-        yield _checkCalendarObjectMigrationState(home1, mapping1)
-        yield self.commitTransaction(1)
-
-        # Add one resource
-        calendar0 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, name=&quot;calendar&quot;)
-        o4 = yield calendar0.createCalendarObjectWithName(&quot;4.ics&quot;, Component.fromString(self.caldata4))
-        mapping0[o4.name()] = o4.id()
-        yield self.commitTransaction(0)
-
-        remote_sync_state = yield syncer.getCalendarSyncList()
-        yield syncer.syncCalendar(
-            remote_id,
-            local_sync_state,
-            remote_sync_state,
-        )
-
-        calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING, name=&quot;calendar&quot;)
-        children = yield calendar1.objectResources()
-        self.assertEqual(set([child.name() for child in children]), set((&quot;1.ics&quot;, &quot;3.ics&quot;, &quot;4.ics&quot;)))
-        mapping1 = dict([(o.name(), o.id()) for o in children])
-        yield _checkCalendarObjectMigrationState(home1, mapping1)
-        yield self.commitTransaction(1)
-
-
-    @inlineCallbacks
-    def test_sync_calendars_add_remove(self):
-        &quot;&quot;&quot;
-        Test that L{syncCalendar} syncs an initially non-existent local calendar with
-        a remote calendar containing data. Also check a change to one event is then
-        sync'd the second time.
-        &quot;&quot;&quot;
-
-        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        children0 = yield home0.loadChildren()
-        details0 = dict([(child.id(), child.name()) for child in children0])
-        yield self.commitTransaction(0)
-
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
-        yield syncer.loadRecord()
-        yield syncer.prepareCalendarHome()
-
-        # No local calendar exists yet
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        children1 = yield home1.loadChildren()
-        self.assertEqual(len(children1), 0)
-        yield self.commitTransaction(1)
-
-        # Trigger sync
-        yield syncer.syncCalendarList()
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        children1 = yield home1.loadChildren()
-        details1 = dict([(child.id(), child.name()) for child in children1])
-        self.assertEqual(set(details1.values()), set(details0.values()))
-        yield self.commitTransaction(1)
-
-        # Add a calendar
-        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        newcalendar0 = yield home0.createCalendarWithName(&quot;new-calendar&quot;)
-        details0[newcalendar0.id()] = newcalendar0.name()
-        yield self.commitTransaction(0)
-
-        # Trigger sync
-        yield syncer.syncCalendarList()
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        children1 = yield home1.loadChildren()
-        details1 = dict([(child.id(), child.name()) for child in children1])
-        self.assertTrue(&quot;new-calendar&quot; in details1.values())
-        self.assertEqual(set(details1.values()), set(details0.values()))
-        yield self.commitTransaction(1)
-
-        # Remove a calendar
-        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        calendar0 = yield home0.childWithName(&quot;new-calendar&quot;)
-        del details0[calendar0.id()]
-        yield calendar0.remove()
-        yield self.commitTransaction(0)
-
-        # Trigger sync
-        yield syncer.syncCalendarList()
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        children1 = yield home1.loadChildren()
-        details1 = dict([(child.id(), child.name()) for child in children1])
-        self.assertTrue(&quot;new-calendar&quot; not in details1.values())
-        self.assertEqual(set(details1.values()), set(details0.values()))
-        yield self.commitTransaction(1)
-
-
-    @inlineCallbacks
-    def test_sync_attachments_add_remove(self):
-        &quot;&quot;&quot;
-        Test that L{syncAttachments} syncs attachment data, then an update to the data,
-        and finally a removal of the data.
-        &quot;&quot;&quot;
-
-
-        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        calendar0 = yield home0.childWithName(&quot;calendar&quot;)
-        yield calendar0.createCalendarObjectWithName(&quot;1.ics&quot;, Component.fromString(self.caldata1))
-        yield calendar0.createCalendarObjectWithName(&quot;2.ics&quot;, Component.fromString(self.caldata2))
-        yield calendar0.createCalendarObjectWithName(&quot;3.ics&quot;, Component.fromString(self.caldata3))
-        remote_id = calendar0.id()
-        mapping0 = dict()
-        yield self.commitTransaction(0)
-
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
-        yield syncer.loadRecord()
-        yield syncer.prepareCalendarHome()
-
-        # Trigger sync of the one calendar
-        local_sync_state = {}
-        remote_sync_state = yield syncer.getCalendarSyncList()
-        yield syncer.syncCalendar(
-            remote_id,
-            local_sync_state,
-            remote_sync_state,
-        )
-        self.assertEqual(len(local_sync_state), 1)
-        self.assertEqual(local_sync_state[remote_id].lastSyncToken, remote_sync_state[remote_id].lastSyncToken)
-
-        @inlineCallbacks
-        def _mapLocalIDToRemote(remote_id):
-            records = yield AttachmentMigrationRecord.all(self.theTransactionUnderTest(1))
-            yield self.commitTransaction(1)
-            for record in records:
-                if record.remoteResourceID == remote_id:
-                    returnValue(record.localResourceID)
-            else:
-                returnValue(None)
-
-        # Sync attachments
-        changed, removed = yield syncer.syncAttachments()
-        self.assertEqual(changed, set())
-        self.assertEqual(removed, set())
-
-        @inlineCallbacks
-        def _checkAttachmentObjectMigrationState(home, mapping1):
-            am = schema.ATTACHMENT_MIGRATION
-            mappings = yield Select(
-                columns=[am.REMOTE_RESOURCE_ID, am.LOCAL_RESOURCE_ID],
-                From=am,
-                Where=(am.CALENDAR_HOME_RESOURCE_ID == home.id())
-            ).on(self.theTransactionUnderTest(1))
-            expected_mappings = dict([(mapping0[name], mapping1[name]) for name in mapping0.keys()])
-            self.assertEqual(dict(mappings), expected_mappings)
-
-
-        # Local calendar exists
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
-        self.assertTrue(calendar1 is not None)
-        children = yield calendar1.objectResources()
-        self.assertEqual(set([child.name() for child in children]), set((&quot;1.ics&quot;, &quot;2.ics&quot;, &quot;3.ics&quot;,)))
-
-        attachments = yield home1.getAllAttachments()
-        mapping1 = dict([(o.md5(), o.id()) for o in attachments])
-        yield _checkAttachmentObjectMigrationState(home1, mapping1)
-        yield self.commitTransaction(1)
-
-        # Add one attachment
-        object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;1.ics&quot;)
-        attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString(&quot;text/plain&quot;), &quot;test.txt&quot;, MemoryStream(&quot;Here is some text #1.&quot;))
-        id0_1 = attachment.id()
-        md50_1 = attachment.md5()
-        managedid0_1 = attachment.managedID()
-        mapping0[md50_1] = id0_1
-        yield self.commitTransaction(0)
-
-        # Sync attachments
-        changed, removed = yield syncer.syncAttachments()
-        self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1)),)))
-        self.assertEqual(removed, set())
-
-        # Validate changes
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        attachments = yield home1.getAllAttachments()
-        mapping1 = dict([(o.md5(), o.id()) for o in attachments])
-        yield _checkAttachmentObjectMigrationState(home1, mapping1)
-
-        # Add another attachment
-        object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;2.ics&quot;)
-        attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString(&quot;text/plain&quot;), &quot;test2.txt&quot;, MemoryStream(&quot;Here is some text #2.&quot;))
-        id0_2 = attachment.id()
-        md50_2 = attachment.md5()
-        mapping0[md50_2] = id0_2
-        yield self.commitTransaction(0)
-
-        # Sync attachments
-        changed, removed = yield syncer.syncAttachments()
-        self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_2)),)))
-        self.assertEqual(removed, set())
-
-        # Validate changes
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        attachments = yield home1.getAllAttachments()
-        mapping1 = dict([(o.md5(), o.id()) for o in attachments])
-        yield _checkAttachmentObjectMigrationState(home1, mapping1)
-
-        # Change original attachment (this is actually a remove and a create all in one)
-        object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;1.ics&quot;)
-        attachment, _ignore_location = yield object1.updateAttachment(managedid0_1, MimeType.fromString(&quot;text/plain&quot;), &quot;test.txt&quot;, MemoryStream(&quot;Here is some text #1 - changed.&quot;))
-        del mapping0[md50_1]
-        id0_1_changed = attachment.id()
-        md50_1_changed = attachment.md5()
-        managedid0_1_changed = attachment.managedID()
-        mapping0[md50_1_changed] = id0_1_changed
-        yield self.commitTransaction(0)
-
-        # Sync attachments
-        changed, removed = yield syncer.syncAttachments()
-        self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1_changed)),)))
-        self.assertEqual(removed, set((id0_1,)))
-
-        # Validate changes
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        attachments = yield home1.getAllAttachments()
-        mapping1 = dict([(o.md5(), o.id()) for o in attachments])
-        yield _checkAttachmentObjectMigrationState(home1, mapping1)
-
-        # Add original to a different resource
-        object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;1.ics&quot;)
-        component = yield object1.componentForUser()
-        attach = component.mainComponent().getProperty(&quot;ATTACH&quot;)
-
-        object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;3.ics&quot;)
-        component = yield object1.componentForUser()
-        attach = component.mainComponent().addProperty(attach)
-        yield object1.setComponent(component)
-        yield self.commitTransaction(0)
-
-        # Sync attachments
-        changed, removed = yield syncer.syncAttachments()
-        self.assertEqual(changed, set())
-        self.assertEqual(removed, set())
-
-        # Validate changes
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        attachments = yield home1.getAllAttachments()
-        mapping1 = dict([(o.md5(), o.id()) for o in attachments])
-        yield _checkAttachmentObjectMigrationState(home1, mapping1)
-
-        # Change original attachment in original resource (this creates a new one and does not remove the old)
-        object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;1.ics&quot;)
-        attachment, _ignore_location = yield object1.updateAttachment(managedid0_1_changed, MimeType.fromString(&quot;text/plain&quot;), &quot;test.txt&quot;, MemoryStream(&quot;Here is some text #1 - changed again.&quot;))
-        id0_1_changed_again = attachment.id()
-        md50_1_changed_again = attachment.md5()
-        mapping0[md50_1_changed_again] = id0_1_changed_again
-        yield self.commitTransaction(0)
-
-        # Sync attachments
-        changed, removed = yield syncer.syncAttachments()
-        self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1_changed_again)),)))
-        self.assertEqual(removed, set())
-
-        # Validate changes
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        attachments = yield home1.getAllAttachments()
-        mapping1 = dict([(o.md5(), o.id()) for o in attachments])
-        yield _checkAttachmentObjectMigrationState(home1, mapping1)
-
-
-    @inlineCallbacks
-    def test_link_attachments(self):
-        &quot;&quot;&quot;
-        Test that L{linkAttachments} links attachment data to the associated calendar object.
-        &quot;&quot;&quot;
-
-        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        yield self.notificationCollectionUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        calendar0 = yield home0.childWithName(&quot;calendar&quot;)
-        object0_1 = yield calendar0.createCalendarObjectWithName(&quot;1.ics&quot;, Component.fromString(self.caldata1))
-        object0_2 = yield calendar0.createCalendarObjectWithName(&quot;2.ics&quot;, Component.fromString(self.caldata2))
-        yield calendar0.createCalendarObjectWithName(&quot;3.ics&quot;, Component.fromString(self.caldata3))
-        remote_id = calendar0.id()
-
-        attachment, _ignore_location = yield object0_1.addAttachment(None, MimeType.fromString(&quot;text/plain&quot;), &quot;test.txt&quot;, MemoryStream(&quot;Here is some text #1.&quot;))
-        id0_1 = attachment.id()
-        md50_1 = attachment.md5()
-        managedid0_1 = attachment.managedID()
-        pathID0_1 = ManagedAttachment.lastSegmentOfUriPath(managedid0_1, attachment.name())
-
-        attachment, _ignore_location = yield object0_2.addAttachment(None, MimeType.fromString(&quot;text/plain&quot;), &quot;test2.txt&quot;, MemoryStream(&quot;Here is some text #2.&quot;))
-        id0_2 = attachment.id()
-        md50_2 = attachment.md5()
-        managedid0_2 = attachment.managedID()
-        pathID0_2 = ManagedAttachment.lastSegmentOfUriPath(managedid0_2, attachment.name())
-
-        yield self.commitTransaction(0)
-
-        # Add original to a different resource
-        object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;1.ics&quot;)
-        component = yield object1.componentForUser()
-        attach = component.mainComponent().getProperty(&quot;ATTACH&quot;)
-
-        object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;3.ics&quot;)
-        component = yield object1.componentForUser()
-        attach = component.mainComponent().addProperty(attach)
-        yield object1.setComponent(component)
-        yield self.commitTransaction(0)
-
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
-        yield syncer.loadRecord()
-        yield syncer.prepareCalendarHome()
-
-        # Trigger sync of the one calendar
-        local_sync_state = {}
-        remote_sync_state = yield syncer.getCalendarSyncList()
-        yield syncer.syncCalendar(
-            remote_id,
-            local_sync_state,
-            remote_sync_state,
-        )
-        self.assertEqual(len(local_sync_state), 1)
-        self.assertEqual(local_sync_state[remote_id].lastSyncToken, remote_sync_state[remote_id].lastSyncToken)
-
-        # Sync attachments
-        changed, removed = yield syncer.syncAttachments()
-
-        @inlineCallbacks
-        def _mapLocalIDToRemote(remote_id):
-            records = yield AttachmentMigrationRecord.all(self.theTransactionUnderTest(1))
-            yield self.commitTransaction(1)
-            for record in records:
-                if record.remoteResourceID == remote_id:
-                    returnValue(record.localResourceID)
-            else:
-                returnValue(None)
-
-        self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1)), (yield _mapLocalIDToRemote(id0_2)),)))
-        self.assertEqual(removed, set())
-
-        # Link attachments (after home is disabled)
-        yield syncer.disableRemoteHome()
-        len_links = yield syncer.linkAttachments()
-        self.assertEqual(len_links, 3)
-
-        # Local calendar exists
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
-        self.assertTrue(calendar1 is not None)
-        children = yield calendar1.objectResources()
-        self.assertEqual(set([child.name() for child in children]), set((&quot;1.ics&quot;, &quot;2.ics&quot;, &quot;3.ics&quot;,)))
-
-        # Make sure calendar object is associated with attachment
-        object1 = yield calendar1.objectResourceWithName(&quot;1.ics&quot;)
-        attachments = yield object1.managedAttachmentList()
-        self.assertEqual(attachments, [pathID0_1, ])
-
-        attachment = yield object1.attachmentWithManagedID(managedid0_1)
-        self.assertTrue(attachment is not None)
-        self.assertEqual(attachment.md5(), md50_1)
-
-        # Make sure calendar object is associated with attachment
-        object1 = yield calendar1.objectResourceWithName(&quot;2.ics&quot;)
-        attachments = yield object1.managedAttachmentList()
-        self.assertEqual(attachments, [pathID0_2, ])
-
-        attachment = yield object1.attachmentWithManagedID(managedid0_2)
-        self.assertTrue(attachment is not None)
-        self.assertEqual(attachment.md5(), md50_2)
-
-        # Make sure calendar object is associated with attachment
-        object1 = yield calendar1.objectResourceWithName(&quot;3.ics&quot;)
-        attachments = yield object1.managedAttachmentList()
-        self.assertEqual(attachments, [pathID0_1, ])
-
-        attachment = yield object1.attachmentWithManagedID(managedid0_1)
-        self.assertTrue(attachment is not None)
-        self.assertEqual(attachment.md5(), md50_1)
-
-
-    @inlineCallbacks
-    def test_delegate_reconcile(self):
-        &quot;&quot;&quot;
-        Test that L{delegateReconcile} copies over the full set of delegates and caches associated groups..
-        &quot;&quot;&quot;
-
-        # Create remote home
-        yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        yield self.notificationCollectionUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        yield self.commitTransaction(0)
-
-        # Add some delegates
-        txn = self.theTransactionUnderTest(0)
-        record01 = yield txn.directoryService().recordWithUID(u&quot;user01&quot;)
-        record02 = yield txn.directoryService().recordWithUID(u&quot;user02&quot;)
-        record03 = yield txn.directoryService().recordWithUID(u&quot;user03&quot;)
-
-        group01 = yield txn.directoryService().recordWithUID(u&quot;__top_group_1__&quot;)
-        group02 = yield txn.directoryService().recordWithUID(u&quot;right_coast&quot;)
-
-        # Add user02 and user03 as individual delegates
-        yield Delegates.addDelegate(txn, record01, record02, True)
-        yield Delegates.addDelegate(txn, record01, record03, False)
-
-        # Add group delegates
-        yield Delegates.addDelegate(txn, record01, group01, True)
-        yield Delegates.addDelegate(txn, record01, group02, False)
-
-        # Add external delegates
-        yield txn.assignExternalDelegates(u&quot;user01&quot;, None, None, u&quot;external1&quot;, u&quot;external2&quot;)
-
-        yield self.commitTransaction(0)
-
-
-        # Initially no local delegates
-        txn = self.theTransactionUnderTest(1)
-        delegates = yield txn.dumpIndividualDelegatesLocal(u&quot;user01&quot;)
-        self.assertEqual(len(delegates), 0)
-        delegates = yield txn.dumpGroupDelegatesLocal(u&quot;user04&quot;)
-        self.assertEqual(len(delegates), 0)
-        externals = yield txn.dumpExternalDelegatesLocal(u&quot;user01&quot;)
-        self.assertEqual(len(externals), 0)
-        yield self.commitTransaction(1)
-
-        # Sync from remote side
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
-        yield syncer.loadRecord()
-        yield syncer.disableRemoteHome()
-        yield syncer.delegateReconcile()
-
-        # Now have local delegates
-        txn = self.theTransactionUnderTest(1)
-
-        delegates = yield txn.dumpIndividualDelegatesLocal(u&quot;user01&quot;)
-        self.assertEqual(
-            set(delegates),
-            set((
-                DelegateRecord.make(delegator=&quot;user01&quot;, delegate=&quot;user02&quot;, readWrite=1),
-                DelegateRecord.make(delegator=&quot;user01&quot;, delegate=&quot;user03&quot;, readWrite=0),
-            )),
-        )
-
-        delegateGroups = yield txn.dumpGroupDelegatesLocal(u&quot;user01&quot;)
-        group_top = yield txn.groupByUID(u&quot;__top_group_1__&quot;)
-        group_right = yield txn.groupByUID(u&quot;right_coast&quot;)
-        self.assertEqual(
-            set([item[0] for item in delegateGroups]),
-            set((
-                DelegateGroupsRecord.make(delegator=&quot;user01&quot;, groupID=group_top.groupID, readWrite=1, isExternal=False),
-                DelegateGroupsRecord.make(delegator=&quot;user01&quot;, groupID=group_right.groupID, readWrite=0, isExternal=False),
-            )),
-        )
-
-        externals = yield txn.dumpExternalDelegatesLocal(u&quot;user01&quot;)
-        self.assertEqual(
-            set(externals),
-            set((
-                ExternalDelegateGroupsRecord.make(
-                    delegator=&quot;user01&quot;,
-                    groupUIDRead=&quot;external1&quot;,
-                    groupUIDWrite=&quot;external2&quot;,
-                ),
-            )),
-        )
-
-        yield self.commitTransaction(1)
-
-
-    @inlineCallbacks
-    def test_notifications_reconcile(self):
-        &quot;&quot;&quot;
-        Test that L{delegateReconcile} copies over the full set of delegates and caches associated groups..
-        &quot;&quot;&quot;
-
-        # Create remote home - and add some fake notifications
-        yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        notifications = yield self.theTransactionUnderTest(0).notificationsWithUID(&quot;user01&quot;, create=True)
-        uid1 = str(uuid4())
-        obj1 = yield notifications.writeNotificationObject(uid1, &quot;type1&quot;, &quot;data1&quot;)
-        id1 = obj1.id()
-        uid2 = str(uuid4())
-        obj2 = yield notifications.writeNotificationObject(uid2, &quot;type2&quot;, &quot;data2&quot;)
-        id2 = obj2.id()
-        yield self.commitTransaction(0)
-
-        # Sync from remote side
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
-        yield syncer.loadRecord()
-        yield syncer.prepareCalendarHome()
-        yield syncer.disableRemoteHome()
-        changes = yield syncer.notificationsReconcile()
-        self.assertEqual(changes, 2)
-
-        # Now have local notifications
-        notifications = yield NotificationCollection.notificationsWithUID(
-            self.theTransactionUnderTest(1),
-            &quot;user01&quot;,
-            status=_HOME_STATUS_MIGRATING,
-        )
-        results = yield notifications.notificationObjects()
-        self.assertEqual(len(results), 2)
-        for result in results:
-            for test_uid, test_id, test_type, test_data in ((uid1, id1, &quot;type1&quot;, &quot;data1&quot;,), (uid2, id2, &quot;type2&quot;, &quot;data2&quot;,),):
-                if result.uid() == test_uid:
-                    self.assertNotEqual(result.id(), test_id)
-                    self.assertEqual(json.loads(result.notificationType()), test_type)
-                    data = yield result.notificationData()
-                    self.assertEqual(json.loads(data), test_data)
-                    break
-            else:
-                self.fail(&quot;Notification uid {} not found&quot;.format(result.uid()))
-        yield self.commitTransaction(1)
-
-
-    @inlineCallbacks
-    def test_disable_remote_home(self):
-        &quot;&quot;&quot;
-        Test that L{disableRemoteHome} changes the remote status and prevents a normal state
-        home from being created.
-        &quot;&quot;&quot;
-
-        # Create remote home - and add some fake notifications
-        yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        yield self.theTransactionUnderTest(0).notificationsWithUID(&quot;user01&quot;, create=True)
-        yield self.commitTransaction(0)
-
-        # Sync from remote side
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
-        yield syncer.loadRecord()
-        yield syncer.prepareCalendarHome()
-        yield syncer.disableRemoteHome()
-
-        # It is disabled
-        home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;)
-        self.assertTrue(home is None)
-        home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_NORMAL)
-        self.assertTrue(home is None)
-        home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_DISABLED)
-        self.assertTrue(home is not None)
-        yield self.commitTransaction(0)
-
-
-
-class TestSharingSync(MultiStoreConduitTest):
-    &quot;&quot;&quot;
-    Test that L{CrossPodHomeSync} sharing sync works.
-    &quot;&quot;&quot;
-
-    @inlineCallbacks
-    def setUp(self):
-        self.accounts = FilePath(__file__).sibling(&quot;accounts&quot;).child(&quot;groupAccounts.xml&quot;)
-        self.augments = FilePath(__file__).sibling(&quot;accounts&quot;).child(&quot;augments.xml&quot;)
-        yield super(TestSharingSync, self).setUp()
-        yield self.populate()
-
-
-    def configure(self):
-        super(TestSharingSync, self).configure()
-        config.Sharing.Enabled = True
-        config.Sharing.Calendars.Enabled = True
-        config.Sharing.Calendars.Groups.Enabled = True
-        config.Sharing.Calendars.Groups.ReconciliationDelaySeconds = 0
-
-
-    @inlineCallbacks
-    def populate(self):
-        yield populateCalendarsFrom(self.requirements, self.theStoreUnderTest(0))
-
-    requirements = {
-        &quot;user01&quot; : None,
-        &quot;user02&quot; : None,
-        &quot;user06&quot; : None,
-        &quot;user07&quot; : None,
-        &quot;user08&quot; : None,
-        &quot;user09&quot; : None,
-        &quot;user10&quot; : None,
-    }
-
-
-    @inlineCallbacks
-    def _createShare(self, shareFrom, shareTo, accept=True):
-        # Invite
-        txnindex = 1 if shareFrom[0] == &quot;p&quot; else 0
-        home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(txnindex), name=shareFrom, create=True)
-        calendar = yield home.childWithName(&quot;calendar&quot;)
-        shareeView = yield calendar.inviteUIDToShare(shareTo, _BIND_MODE_READ, &quot;summary&quot;)
-        yield self.commitTransaction(txnindex)
-
-        # Accept
-        if accept:
-            inviteUID = shareeView.shareUID()
-            txnindex = 1 if shareTo[0] == &quot;p&quot; else 0
-            shareeHome = yield self.homeUnderTest(txn=self.theTransactionUnderTest(txnindex), name=shareTo)
-            shareeView = yield shareeHome.acceptShare(inviteUID)
-            sharedName = shareeView.name()
-            yield self.commitTransaction(txnindex)
-        else:
-            sharedName = None
-
-        returnValue(sharedName)
-
-
-    @inlineCallbacks
-    def test_shared_collections_reconcile(self):
-        &quot;&quot;&quot;
-        Test that L{sharedCollectionsReconcile} copies over the full set of delegates and caches associated groups..
-        &quot;&quot;&quot;
-
-        # Create home
-        yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        yield self.notificationCollectionUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        yield self.commitTransaction(0)
-
-        # Shared by migrating user
-        shared_name_02 = yield self._createShare(&quot;user01&quot;, &quot;user02&quot;)
-        shared_name_03 = yield self._createShare(&quot;user01&quot;, &quot;puser03&quot;)
-
-        # Shared to migrating user
-        shared_name_04 = yield self._createShare(&quot;user04&quot;, &quot;user01&quot;)
-        shared_name_05 = yield self._createShare(&quot;puser05&quot;, &quot;user01&quot;)
-
-        # Sync from remote side
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
-        yield syncer.loadRecord()
-        yield syncer.sync()
-        yield syncer.disableRemoteHome()
-        changes = yield syncer.sharedByCollectionsReconcile()
-        self.assertEqual(changes, 2)
-        changes = yield syncer.sharedToCollectionsReconcile()
-        self.assertEqual(changes, 2)
-
-        # Local calendar exists with shares
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
-        invites1 = yield calendar1.sharingInvites()
-        self.assertEqual(len(invites1), 2)
-        self.assertEqual(set([invite.uid for invite in invites1]), set((shared_name_02, shared_name_03,)))
-        yield self.commitTransaction(1)
-
-        # Remote sharee can access it
-        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user02&quot;)
-        calendar0 = yield home0.childWithName(shared_name_02)
-        self.assertTrue(calendar0 is not None)
-
-        # Local sharee can access it
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;puser03&quot;)
-        calendar1 = yield home1.childWithName(shared_name_03)
-        self.assertTrue(calendar1 is not None)
-
-        # Local shared calendars exist
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        calendar1 = yield home1.childWithName(shared_name_04)
-        self.assertTrue(calendar1 is not None)
-        calendar1 = yield home1.childWithName(shared_name_05)
-        self.assertTrue(calendar1 is not None)
-        yield self.commitTransaction(1)
-
-        # Sharers see migrated user as sharee
-        externalHome0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_EXTERNAL)
-        calendar0 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user04&quot;, name=&quot;calendar&quot;)
-        invites = yield calendar0.allInvitations()
-        self.assertEqual(len(invites), 1)
-        self.assertEqual(invites[0].shareeUID, &quot;user01&quot;)
-        self.assertEqual(invites[0].shareeHomeID, externalHome0.id())
-        yield self.commitTransaction(0)
-
-        shareeHome1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home=&quot;puser05&quot;, name=&quot;calendar&quot;)
-        invites = yield calendar1.allInvitations()
-        self.assertEqual(len(invites), 1)
-        self.assertEqual(invites[0].shareeUID, &quot;user01&quot;)
-        self.assertEqual(invites[0].shareeHomeID, shareeHome1.id())
-        yield self.commitTransaction(1)
-
-
-    @inlineCallbacks
-    def test_group_shared_collections_reconcile(self):
-        &quot;&quot;&quot;
-        Test that L{sharedCollectionsReconcile} copies over the full set of delegates and caches associated groups..
-        &quot;&quot;&quot;
-
-        # Create home
-        yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        yield self.notificationCollectionUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        yield self.commitTransaction(0)
-
-        # Shared by migrating user
-        yield self._createShare(&quot;user01&quot;, &quot;group02&quot;, accept=False)
-
-        # Sync from remote side
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
-        yield syncer.loadRecord()
-        yield syncer.sync()
-        yield syncer.disableRemoteHome()
-        changes = yield syncer.sharedByCollectionsReconcile()
-        self.assertEqual(changes, 3)
-        changes = yield syncer.sharedToCollectionsReconcile()
-        self.assertEqual(changes, 0)
-
-        # Local calendar exists with shares
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
-        invites1 = yield calendar1.sharingInvites()
-        self.assertEqual(len(invites1), 3)
-        sharee = yield GroupShareeRecord.querysimple(self.theTransactionUnderTest(1), calendarID=calendar1.id())
-        self.assertEqual(len(sharee), 1)
-        group = yield GroupsRecord.querysimple(self.theTransactionUnderTest(1), groupID=sharee[0].groupID)
-        self.assertEqual(len(group), 1)
-        self.assertEqual(group[0].groupUID, &quot;group02&quot;)
-        yield self.commitTransaction(1)
-
-
-
-class TestGroupAttendeeSync(MultiStoreConduitTest):
-    &quot;&quot;&quot;
-    GroupAttendeeReconciliation tests
-    &quot;&quot;&quot;
-
-    now = {&quot;now1&quot;: DateTime.getToday().getYear() + 1}
-
-    groupdata1 = &quot;&quot;&quot;BEGIN:VCALENDAR
-CALSCALE:GREGORIAN
-PRODID:-//Example Inc.//Example Calendar//EN
-VERSION:2.0
-BEGIN:VEVENT
-DTSTAMP:20051222T205953Z
-CREATED:20060101T150000Z
-DTSTART:{now1:04d}0101T100000Z
-DURATION:PT1H
-SUMMARY:event 1
-UID:event1@ninevah.local
-END:VEVENT
-END:VCALENDAR&quot;&quot;&quot;.format(**now)
-
-    groupdata2 = &quot;&quot;&quot;BEGIN:VCALENDAR
-CALSCALE:GREGORIAN
-PRODID:-//Example Inc.//Example Calendar//EN
-VERSION:2.0
-BEGIN:VEVENT
-DTSTAMP:20051222T205953Z
-CREATED:20060101T150000Z
-DTSTART:{now1:04d}0101T100000Z
-DURATION:PT1H
-SUMMARY:event 2
-UID:event2@ninevah.local
-ORGANIZER:mailto:user01@example.com
-ATTENDEE:mailto:user01@example.com
-ATTENDEE:mailto:group02@example.com
-END:VEVENT
-END:VCALENDAR&quot;&quot;&quot;.format(**now)
-
-    groupdata3 = &quot;&quot;&quot;BEGIN:VCALENDAR
-CALSCALE:GREGORIAN
-PRODID:-//Example Inc.//Example Calendar//EN
-VERSION:2.0
-BEGIN:VEVENT
-DTSTAMP:20051222T205953Z
-CREATED:20060101T150000Z
-DTSTART:{now1:04d}0101T100000Z
-DURATION:PT1H
-SUMMARY:event 3
-UID:event3@ninevah.local
-ORGANIZER:mailto:user01@example.com
-ATTENDEE:mailto:user01@example.com
-ATTENDEE:mailto:group04@example.com
-END:VEVENT
-END:VCALENDAR&quot;&quot;&quot;.format(**now)
-
-    @inlineCallbacks
-    def setUp(self):
-        self.accounts = FilePath(__file__).sibling(&quot;accounts&quot;).child(&quot;groupAccounts.xml&quot;)
-        yield super(TestGroupAttendeeSync, self).setUp()
-        yield self.populate()
-
-
-    def configure(self):
-        super(TestGroupAttendeeSync, self).configure()
-        config.GroupAttendees.Enabled = True
-        config.GroupAttendees.ReconciliationDelaySeconds = 0
-        config.GroupAttendees.AutoUpdateSecondsFromNow = 0
-
-
-    @inlineCallbacks
-    def populate(self):
-        yield populateCalendarsFrom(self.requirements, self.theStoreUnderTest(0))
-
-    requirements = {
-        &quot;user01&quot; : None,
-        &quot;user02&quot; : None,
-        &quot;user06&quot; : None,
-        &quot;user07&quot; : None,
-        &quot;user08&quot; : None,
-        &quot;user09&quot; : None,
-        &quot;user10&quot; : None,
-    }
-
-    @inlineCallbacks
-    def test_group_attendees(self):
-        &quot;&quot;&quot;
-        Test that L{groupAttendeeReconcile} links groups to the associated calendar object.
-        &quot;&quot;&quot;
-
-        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        calendar0 = yield home0.childWithName(&quot;calendar&quot;)
-        yield calendar0.createCalendarObjectWithName(&quot;1.ics&quot;, Component.fromString(self.groupdata1))
-        yield calendar0.createCalendarObjectWithName(&quot;2.ics&quot;, Component.fromString(self.groupdata2))
-        yield calendar0.createCalendarObjectWithName(&quot;3.ics&quot;, Component.fromString(self.groupdata3))
-        yield self.commitTransaction(0)
-
-        yield JobItem.waitEmpty(self.theStoreUnderTest(0).newTransaction, reactor, 60.0)
-
-        # Trigger sync
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
-        yield syncer.sync()
-
-        # Link groups
-        len_links = yield syncer.groupAttendeeReconcile()
-        self.assertEqual(len_links, 2)
-
-        # Local calendar exists
-        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
-        self.assertTrue(calendar1 is not None)
-        children = yield calendar1.objectResources()
-        self.assertEqual(set([child.name() for child in children]), set((&quot;1.ics&quot;, &quot;2.ics&quot;, &quot;3.ics&quot;,)))
-
-        object2 = yield calendar1.objectResourceWithName(&quot;2.ics&quot;)
-        record = (yield object2.groupEventLinks()).values()[0]
-        group02 = yield self.theTransactionUnderTest(1).groupByUID(u&quot;group02&quot;)
-        self.assertEqual(record.groupID, group02.groupID)
-        self.assertEqual(record.membershipHash, group02.membershipHash)
-
-        object3 = yield calendar1.objectResourceWithName(&quot;3.ics&quot;)
-        record = (yield object3.groupEventLinks()).values()[0]
-        group04 = yield self.theTransactionUnderTest(1).groupByUID(u&quot;group04&quot;)
-        self.assertEqual(record.groupID, group04.groupID)
-        self.assertEqual(record.membershipHash, group04.membershipHash)
</del></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtesttest_home_syncpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastorepoddingmigrationtesttest_home_syncpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_home_sync.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/test_home_sync.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_home_sync.py                                (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_home_sync.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,1307 @@
</span><ins>+##
+# Copyright (c) 2005-2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from pycalendar.datetime import DateTime
+from twext.enterprise.dal.syntax import Select
+from twext.enterprise.jobqueue import JobItem
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.python.filepath import FilePath
+from twistedcaldav.config import config
+from twistedcaldav.ical import Component, normalize_iCalStr
+from txdav.caldav.datastore.sql import ManagedAttachment
+from txdav.caldav.datastore.sql_directory import GroupShareeRecord
+from txdav.common.datastore.podding.migration.home_sync import CrossPodHomeSync
+from txdav.common.datastore.podding.migration.sync_metadata import CalendarMigrationRecord, \
+    AttachmentMigrationRecord
+from txdav.common.datastore.podding.test.util import MultiStoreConduitTest
+from txdav.common.datastore.sql_directory import DelegateRecord, \
+    ExternalDelegateGroupsRecord, DelegateGroupsRecord, GroupsRecord
+from txdav.common.datastore.sql_notification import NotificationCollection
+from txdav.common.datastore.sql_tables import schema, _HOME_STATUS_EXTERNAL, \
+    _BIND_MODE_READ, _HOME_STATUS_MIGRATING, _HOME_STATUS_NORMAL, \
+    _HOME_STATUS_DISABLED
+from txdav.common.datastore.test.util import populateCalendarsFrom
+from txdav.who.delegates import Delegates
+from txweb2.http_headers import MimeType
+from txweb2.stream import MemoryStream
+from uuid import uuid4
+import json
+
+
+class TestCrossPodHomeSync(MultiStoreConduitTest):
+    &quot;&quot;&quot;
+    Test that L{CrossPodHomeSync} works.
+    &quot;&quot;&quot;
+
+    nowYear = {&quot;now&quot;: DateTime.getToday().getYear()}
+
+    caldata1 = &quot;&quot;&quot;BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid1
+DTSTART:{now:04d}0102T140000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+RRULE:FREQ=WEEKLY
+SUMMARY:instance
+END:VEVENT
+END:VCALENDAR
+&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**nowYear)
+
+    caldata1_changed = &quot;&quot;&quot;BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid1
+DTSTART:{now:04d}0102T150000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+RRULE:FREQ=WEEKLY
+SUMMARY:instance changed
+END:VEVENT
+END:VCALENDAR
+&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**nowYear)
+
+    caldata2 = &quot;&quot;&quot;BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid2
+DTSTART:{now:04d}0102T160000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+RRULE:FREQ=WEEKLY
+SUMMARY:instance
+END:VEVENT
+END:VCALENDAR
+&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**nowYear)
+
+    caldata3 = &quot;&quot;&quot;BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid3
+DTSTART:{now:04d}0102T160000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+RRULE:FREQ=WEEKLY
+SUMMARY:instance
+END:VEVENT
+END:VCALENDAR
+&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**nowYear)
+
+    caldata4 = &quot;&quot;&quot;BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid4
+DTSTART:{now:04d}0102T180000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+RRULE:FREQ=DAILY
+SUMMARY:instance
+END:VEVENT
+END:VCALENDAR
+&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**nowYear)
+
+
+    @inlineCallbacks
+    def test_remote_home(self):
+        &quot;&quot;&quot;
+        Test that a remote home can be accessed.
+        &quot;&quot;&quot;
+
+        home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        self.assertTrue(home01 is not None)
+        yield self.commitTransaction(0)
+
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
+        yield syncer.loadRecord()
+        home = yield syncer._remoteHome(self.theTransactionUnderTest(1))
+        self.assertTrue(home is not None)
+        self.assertEqual(home.id(), home01.id())
+        yield self.commitTransaction(1)
+
+
+    @inlineCallbacks
+    def test_prepare_home(self):
+        &quot;&quot;&quot;
+        Test that L{prepareCalendarHome} creates a home.
+        &quot;&quot;&quot;
+
+        # No home present
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
+        home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        self.assertTrue(home is None)
+        yield self.commitTransaction(1)
+
+        yield syncer.prepareCalendarHome()
+
+        # Home is present
+        home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        self.assertTrue(home is not None)
+        children = yield home.listChildren()
+        self.assertEqual(len(children), 0)
+        yield self.commitTransaction(1)
+
+
+    @inlineCallbacks
+    def test_prepare_home_external_txn(self):
+        &quot;&quot;&quot;
+        Test that L{prepareCalendarHome} creates a home.
+        &quot;&quot;&quot;
+
+        # No home present
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
+        home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        self.assertTrue(home is None)
+        yield self.commitTransaction(1)
+
+        yield syncer.prepareCalendarHome(txn=self.theTransactionUnderTest(1))
+        yield self.commitTransaction(1)
+
+        # Home is present
+        home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        self.assertTrue(home is not None)
+        children = yield home.listChildren()
+        self.assertEqual(len(children), 0)
+        yield self.commitTransaction(1)
+
+
+    @inlineCallbacks
+    def test_home_metadata(self):
+        &quot;&quot;&quot;
+        Test that L{syncCalendarHomeMetaData} sync home metadata correctly.
+        &quot;&quot;&quot;
+
+        alarm_event_timed = &quot;&quot;&quot;BEGIN:VALARM
+ACTION:DISPLAY
+DESCRIPTION:alarm_event_timed
+TRIGGER:-PT10M
+END:VALARM
+&quot;&quot;&quot;
+        alarm_event_allday = &quot;&quot;&quot;BEGIN:VALARM
+ACTION:DISPLAY
+DESCRIPTION:alarm_event_allday
+TRIGGER:-PT10M
+END:VALARM
+&quot;&quot;&quot;
+        alarm_todo_timed = &quot;&quot;&quot;BEGIN:VALARM
+ACTION:DISPLAY
+DESCRIPTION:alarm_todo_timed
+TRIGGER:-PT10M
+END:VALARM
+&quot;&quot;&quot;
+        alarm_todo_allday = &quot;&quot;&quot;BEGIN:VALARM
+ACTION:DISPLAY
+DESCRIPTION:alarm_todo_allday
+TRIGGER:-PT10M
+END:VALARM
+&quot;&quot;&quot;
+        availability = &quot;&quot;&quot;BEGIN:VCALENDAR
+VERSION:2.0
+PRODID:-//Example Inc.//Example Calendar//EN
+BEGIN:VAVAILABILITY
+UID:20061005T133225Z-00001-availability@example.com
+DTSTART:20060101T000000Z
+DTEND:20060108T000000Z
+DTSTAMP:20061005T133225Z
+ORGANIZER:mailto:bernard@example.com
+BEGIN:AVAILABLE
+UID:20061005T133225Z-00001-A-availability@example.com
+DTSTART:20060102T090000Z
+DTEND:20060102T120000Z
+DTSTAMP:20061005T133225Z
+RRULE:FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR
+SUMMARY:Weekdays from 9:00 to 12:00
+END:AVAILABLE
+END:VAVAILABILITY
+END:VCALENDAR
+&quot;&quot;&quot;
+
+        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        events0 = yield home0.createChildWithName(&quot;events&quot;)
+        yield home0.setDefaultCalendar(events0, &quot;VEVENT&quot;)
+        yield home0.setDefaultAlarm(alarm_event_timed, True, True)
+        yield home0.setDefaultAlarm(alarm_event_allday, True, False)
+        yield home0.setDefaultAlarm(alarm_todo_timed, False, True)
+        yield home0.setDefaultAlarm(alarm_todo_allday, False, False)
+        yield home0.setAvailability(Component.fromString(availability))
+        yield self.commitTransaction(0)
+
+        # Trigger sync
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
+        yield syncer.sync()
+
+        # Home is present with correct metadata
+        home1 = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        self.assertTrue(home1 is not None)
+        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
+        events1 = yield home1.childWithName(&quot;events&quot;)
+        tasks1 = yield home1.childWithName(&quot;tasks&quot;)
+        self.assertFalse(home1.isDefaultCalendar(calendar1))
+        self.assertTrue(home1.isDefaultCalendar(events1))
+        self.assertTrue(home1.isDefaultCalendar(tasks1))
+        self.assertEqual(home1.getDefaultAlarm(True, True), alarm_event_timed)
+        self.assertEqual(home1.getDefaultAlarm(True, False), alarm_event_allday)
+        self.assertEqual(home1.getDefaultAlarm(False, True), alarm_todo_timed)
+        self.assertEqual(home1.getDefaultAlarm(False, False), alarm_todo_allday)
+        self.assertEqual(normalize_iCalStr(home1.getAvailability()), normalize_iCalStr(availability))
+        yield self.commitTransaction(1)
+
+        # Make some changes
+        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        calendar0 = yield home0.childWithName(&quot;calendar&quot;)
+        yield home0.setDefaultCalendar(calendar0, &quot;VEVENT&quot;)
+        yield home0.setDefaultAlarm(None, True, True)
+        yield home0.setDefaultAlarm(None, False, True)
+        yield self.commitTransaction(0)
+
+        # Trigger sync again
+        yield syncer.sync()
+
+        # Home is present with correct metadata
+        home1 = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        self.assertTrue(home1 is not None)
+        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
+        events1 = yield home1.childWithName(&quot;events&quot;)
+        tasks1 = yield home1.childWithName(&quot;tasks&quot;)
+        self.assertTrue(home1.isDefaultCalendar(calendar1))
+        self.assertFalse(home1.isDefaultCalendar(events1))
+        self.assertTrue(home1.isDefaultCalendar(tasks1))
+        self.assertEqual(home1.getDefaultAlarm(True, True), None)
+        self.assertEqual(home1.getDefaultAlarm(True, False), alarm_event_allday)
+        self.assertEqual(home1.getDefaultAlarm(False, True), None)
+        self.assertEqual(home1.getDefaultAlarm(False, False), alarm_todo_allday)
+        self.assertEqual(normalize_iCalStr(home1.getAvailability()), normalize_iCalStr(availability))
+        yield self.commitTransaction(1)
+
+
+    @inlineCallbacks
+    def test_get_calendar_sync_list(self):
+        &quot;&quot;&quot;
+        Test that L{getCalendarSyncList} returns the correct results.
+        &quot;&quot;&quot;
+
+        yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        yield self.commitTransaction(0)
+        home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;)
+        self.assertTrue(home01 is not None)
+        calendars01 = yield home01.loadChildren()
+        results01 = {}
+        for calendar in calendars01:
+            if calendar.owned():
+                sync_token = yield calendar.syncToken()
+                results01[calendar.id()] = CalendarMigrationRecord.make(
+                    calendarHomeResourceID=home01.id(),
+                    remoteResourceID=calendar.id(),
+                    localResourceID=0,
+                    lastSyncToken=sync_token,
+                )
+
+        yield self.commitTransaction(0)
+
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
+        yield syncer.loadRecord()
+        results = yield syncer.getCalendarSyncList()
+        self.assertEqual(results, results01)
+
+
+    @inlineCallbacks
+    def test_sync_calendar_initial_empty(self):
+        &quot;&quot;&quot;
+        Test that L{syncCalendar} syncs an initially non-existent local calendar with
+        an empty remote calendar.
+        &quot;&quot;&quot;
+
+        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        calendar0 = yield home0.childWithName(&quot;calendar&quot;)
+        remote_id = calendar0.id()
+        remote_sync_token = yield calendar0.syncToken()
+        yield self.commitTransaction(0)
+
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
+        yield syncer.loadRecord()
+        yield syncer.prepareCalendarHome()
+
+        # No local calendar exists yet
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        children = yield home1.listChildren()
+        self.assertEqual(len(children), 0)
+        yield self.commitTransaction(1)
+
+        # Trigger sync of the one calendar
+        local_sync_state = {}
+        remote_sync_state = {remote_id: CalendarMigrationRecord.make(
+            calendarHomeResourceID=home0.id(),
+            remoteResourceID=remote_id,
+            localResourceID=0,
+            lastSyncToken=remote_sync_token,
+        )}
+        yield syncer.syncCalendar(
+            remote_id,
+            local_sync_state,
+            remote_sync_state,
+        )
+        self.assertEqual(len(local_sync_state), 1)
+        self.assertEqual(local_sync_state[remote_id].lastSyncToken, remote_sync_state[remote_id].lastSyncToken)
+
+        # Local calendar exists
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
+        self.assertTrue(calendar1 is not None)
+        yield self.commitTransaction(1)
+
+
+    @inlineCallbacks
+    def test_sync_calendar_initial_with_data(self):
+        &quot;&quot;&quot;
+        Test that L{syncCalendar} syncs an initially non-existent local calendar with
+        a remote calendar containing data. Also check a change to one event is then
+        sync'd the second time.
+        &quot;&quot;&quot;
+
+        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        calendar0 = yield home0.childWithName(&quot;calendar&quot;)
+        o1 = yield calendar0.createCalendarObjectWithName(&quot;1.ics&quot;, Component.fromString(self.caldata1))
+        o2 = yield calendar0.createCalendarObjectWithName(&quot;2.ics&quot;, Component.fromString(self.caldata2))
+        o3 = yield calendar0.createCalendarObjectWithName(&quot;3.ics&quot;, Component.fromString(self.caldata3))
+        remote_id = calendar0.id()
+        mapping0 = dict([(o.name(), o.id()) for o in (o1, o2, o3)])
+        yield self.commitTransaction(0)
+
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
+        yield syncer.loadRecord()
+        yield syncer.prepareCalendarHome()
+
+        # No local calendar exists yet
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
+        self.assertTrue(calendar1 is None)
+        yield self.commitTransaction(1)
+
+        # Trigger sync of the one calendar
+        local_sync_state = {}
+        remote_sync_state = yield syncer.getCalendarSyncList()
+        yield syncer.syncCalendar(
+            remote_id,
+            local_sync_state,
+            remote_sync_state,
+        )
+        self.assertEqual(len(local_sync_state), 1)
+        self.assertEqual(local_sync_state[remote_id].lastSyncToken, remote_sync_state[remote_id].lastSyncToken)
+
+        @inlineCallbacks
+        def _checkCalendarObjectMigrationState(home, mapping1):
+            com = schema.CALENDAR_OBJECT_MIGRATION
+            mappings = yield Select(
+                columns=[com.REMOTE_RESOURCE_ID, com.LOCAL_RESOURCE_ID],
+                From=com,
+                Where=(com.CALENDAR_HOME_RESOURCE_ID == home.id())
+            ).on(self.theTransactionUnderTest(1))
+            expected_mappings = dict([(mapping0[name], mapping1[name]) for name in mapping0.keys()])
+            self.assertEqual(dict(mappings), expected_mappings)
+
+
+        # Local calendar exists
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
+        self.assertTrue(calendar1 is not None)
+        children = yield calendar1.objectResources()
+        self.assertEqual(set([child.name() for child in children]), set((&quot;1.ics&quot;, &quot;2.ics&quot;, &quot;3.ics&quot;,)))
+        mapping1 = dict([(o.name(), o.id()) for o in children])
+        yield _checkCalendarObjectMigrationState(home1, mapping1)
+        yield self.commitTransaction(1)
+
+        # Change one resource
+        object0 = yield self.calendarObjectUnderTest(
+            txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;1.ics&quot;
+        )
+        yield object0.setComponent(Component.fromString(self.caldata1_changed))
+        yield self.commitTransaction(0)
+
+        remote_sync_state = yield syncer.getCalendarSyncList()
+        yield syncer.syncCalendar(
+            remote_id,
+            local_sync_state,
+            remote_sync_state,
+        )
+
+        object1 = yield self.calendarObjectUnderTest(
+            txn=self.theTransactionUnderTest(1), home=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING, calendar_name=&quot;calendar&quot;, name=&quot;1.ics&quot;
+        )
+        caldata = yield object1.component()
+        self.assertEqual(normalize_iCalStr(caldata), normalize_iCalStr(self.caldata1_changed))
+        yield self.commitTransaction(1)
+
+        # Remove one resource
+        object0 = yield self.calendarObjectUnderTest(
+            txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;2.ics&quot;
+        )
+        yield object0.remove()
+        del mapping0[&quot;2.ics&quot;]
+        yield self.commitTransaction(0)
+
+        remote_sync_state = yield syncer.getCalendarSyncList()
+        yield syncer.syncCalendar(
+            remote_id,
+            local_sync_state,
+            remote_sync_state,
+        )
+
+        calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING, name=&quot;calendar&quot;)
+        children = yield calendar1.objectResources()
+        self.assertEqual(set([child.name() for child in children]), set((&quot;1.ics&quot;, &quot;3.ics&quot;,)))
+        mapping1 = dict([(o.name(), o.id()) for o in children])
+        yield _checkCalendarObjectMigrationState(home1, mapping1)
+        yield self.commitTransaction(1)
+
+        # Add one resource
+        calendar0 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, name=&quot;calendar&quot;)
+        o4 = yield calendar0.createCalendarObjectWithName(&quot;4.ics&quot;, Component.fromString(self.caldata4))
+        mapping0[o4.name()] = o4.id()
+        yield self.commitTransaction(0)
+
+        remote_sync_state = yield syncer.getCalendarSyncList()
+        yield syncer.syncCalendar(
+            remote_id,
+            local_sync_state,
+            remote_sync_state,
+        )
+
+        calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING, name=&quot;calendar&quot;)
+        children = yield calendar1.objectResources()
+        self.assertEqual(set([child.name() for child in children]), set((&quot;1.ics&quot;, &quot;3.ics&quot;, &quot;4.ics&quot;)))
+        mapping1 = dict([(o.name(), o.id()) for o in children])
+        yield _checkCalendarObjectMigrationState(home1, mapping1)
+        yield self.commitTransaction(1)
+
+
+    @inlineCallbacks
+    def test_sync_calendars_add_remove(self):
+        &quot;&quot;&quot;
+        Test that L{syncCalendar} syncs an initially non-existent local calendar with
+        a remote calendar containing data. Also check a change to one event is then
+        sync'd the second time.
+        &quot;&quot;&quot;
+
+        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        children0 = yield home0.loadChildren()
+        details0 = dict([(child.id(), child.name()) for child in children0])
+        yield self.commitTransaction(0)
+
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
+        yield syncer.loadRecord()
+        yield syncer.prepareCalendarHome()
+
+        # No local calendar exists yet
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        children1 = yield home1.loadChildren()
+        self.assertEqual(len(children1), 0)
+        yield self.commitTransaction(1)
+
+        # Trigger sync
+        yield syncer.syncCalendarList()
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        children1 = yield home1.loadChildren()
+        details1 = dict([(child.id(), child.name()) for child in children1])
+        self.assertEqual(set(details1.values()), set(details0.values()))
+        yield self.commitTransaction(1)
+
+        # Add a calendar
+        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        newcalendar0 = yield home0.createCalendarWithName(&quot;new-calendar&quot;)
+        details0[newcalendar0.id()] = newcalendar0.name()
+        yield self.commitTransaction(0)
+
+        # Trigger sync
+        yield syncer.syncCalendarList()
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        children1 = yield home1.loadChildren()
+        details1 = dict([(child.id(), child.name()) for child in children1])
+        self.assertTrue(&quot;new-calendar&quot; in details1.values())
+        self.assertEqual(set(details1.values()), set(details0.values()))
+        yield self.commitTransaction(1)
+
+        # Remove a calendar
+        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        calendar0 = yield home0.childWithName(&quot;new-calendar&quot;)
+        del details0[calendar0.id()]
+        yield calendar0.remove()
+        yield self.commitTransaction(0)
+
+        # Trigger sync
+        yield syncer.syncCalendarList()
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        children1 = yield home1.loadChildren()
+        details1 = dict([(child.id(), child.name()) for child in children1])
+        self.assertTrue(&quot;new-calendar&quot; not in details1.values())
+        self.assertEqual(set(details1.values()), set(details0.values()))
+        yield self.commitTransaction(1)
+
+
+    @inlineCallbacks
+    def test_sync_attachments_add_remove(self):
+        &quot;&quot;&quot;
+        Test that L{syncAttachments} syncs attachment data, then an update to the data,
+        and finally a removal of the data.
+        &quot;&quot;&quot;
+
+
+        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        calendar0 = yield home0.childWithName(&quot;calendar&quot;)
+        yield calendar0.createCalendarObjectWithName(&quot;1.ics&quot;, Component.fromString(self.caldata1))
+        yield calendar0.createCalendarObjectWithName(&quot;2.ics&quot;, Component.fromString(self.caldata2))
+        yield calendar0.createCalendarObjectWithName(&quot;3.ics&quot;, Component.fromString(self.caldata3))
+        remote_id = calendar0.id()
+        mapping0 = dict()
+        yield self.commitTransaction(0)
+
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
+        yield syncer.loadRecord()
+        yield syncer.prepareCalendarHome()
+
+        # Trigger sync of the one calendar
+        local_sync_state = {}
+        remote_sync_state = yield syncer.getCalendarSyncList()
+        yield syncer.syncCalendar(
+            remote_id,
+            local_sync_state,
+            remote_sync_state,
+        )
+        self.assertEqual(len(local_sync_state), 1)
+        self.assertEqual(local_sync_state[remote_id].lastSyncToken, remote_sync_state[remote_id].lastSyncToken)
+
+        @inlineCallbacks
+        def _mapLocalIDToRemote(remote_id):
+            records = yield AttachmentMigrationRecord.all(self.theTransactionUnderTest(1))
+            yield self.commitTransaction(1)
+            for record in records:
+                if record.remoteResourceID == remote_id:
+                    returnValue(record.localResourceID)
+            else:
+                returnValue(None)
+
+        # Sync attachments
+        changed, removed = yield syncer.syncAttachments()
+        self.assertEqual(changed, set())
+        self.assertEqual(removed, set())
+
+        @inlineCallbacks
+        def _checkAttachmentObjectMigrationState(home, mapping1):
+            am = schema.ATTACHMENT_MIGRATION
+            mappings = yield Select(
+                columns=[am.REMOTE_RESOURCE_ID, am.LOCAL_RESOURCE_ID],
+                From=am,
+                Where=(am.CALENDAR_HOME_RESOURCE_ID == home.id())
+            ).on(self.theTransactionUnderTest(1))
+            expected_mappings = dict([(mapping0[name], mapping1[name]) for name in mapping0.keys()])
+            self.assertEqual(dict(mappings), expected_mappings)
+
+
+        # Local calendar exists
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
+        self.assertTrue(calendar1 is not None)
+        children = yield calendar1.objectResources()
+        self.assertEqual(set([child.name() for child in children]), set((&quot;1.ics&quot;, &quot;2.ics&quot;, &quot;3.ics&quot;,)))
+
+        attachments = yield home1.getAllAttachments()
+        mapping1 = dict([(o.md5(), o.id()) for o in attachments])
+        yield _checkAttachmentObjectMigrationState(home1, mapping1)
+        yield self.commitTransaction(1)
+
+        # Add one attachment
+        object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;1.ics&quot;)
+        attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString(&quot;text/plain&quot;), &quot;test.txt&quot;, MemoryStream(&quot;Here is some text #1.&quot;))
+        id0_1 = attachment.id()
+        md50_1 = attachment.md5()
+        managedid0_1 = attachment.managedID()
+        mapping0[md50_1] = id0_1
+        yield self.commitTransaction(0)
+
+        # Sync attachments
+        changed, removed = yield syncer.syncAttachments()
+        self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1)),)))
+        self.assertEqual(removed, set())
+
+        # Validate changes
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        attachments = yield home1.getAllAttachments()
+        mapping1 = dict([(o.md5(), o.id()) for o in attachments])
+        yield _checkAttachmentObjectMigrationState(home1, mapping1)
+
+        # Add another attachment
+        object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;2.ics&quot;)
+        attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString(&quot;text/plain&quot;), &quot;test2.txt&quot;, MemoryStream(&quot;Here is some text #2.&quot;))
+        id0_2 = attachment.id()
+        md50_2 = attachment.md5()
+        mapping0[md50_2] = id0_2
+        yield self.commitTransaction(0)
+
+        # Sync attachments
+        changed, removed = yield syncer.syncAttachments()
+        self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_2)),)))
+        self.assertEqual(removed, set())
+
+        # Validate changes
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        attachments = yield home1.getAllAttachments()
+        mapping1 = dict([(o.md5(), o.id()) for o in attachments])
+        yield _checkAttachmentObjectMigrationState(home1, mapping1)
+
+        # Change original attachment (this is actually a remove and a create all in one)
+        object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;1.ics&quot;)
+        attachment, _ignore_location = yield object1.updateAttachment(managedid0_1, MimeType.fromString(&quot;text/plain&quot;), &quot;test.txt&quot;, MemoryStream(&quot;Here is some text #1 - changed.&quot;))
+        del mapping0[md50_1]
+        id0_1_changed = attachment.id()
+        md50_1_changed = attachment.md5()
+        managedid0_1_changed = attachment.managedID()
+        mapping0[md50_1_changed] = id0_1_changed
+        yield self.commitTransaction(0)
+
+        # Sync attachments
+        changed, removed = yield syncer.syncAttachments()
+        self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1_changed)),)))
+        self.assertEqual(removed, set((id0_1,)))
+
+        # Validate changes
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        attachments = yield home1.getAllAttachments()
+        mapping1 = dict([(o.md5(), o.id()) for o in attachments])
+        yield _checkAttachmentObjectMigrationState(home1, mapping1)
+
+        # Add original to a different resource
+        object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;1.ics&quot;)
+        component = yield object1.componentForUser()
+        attach = component.mainComponent().getProperty(&quot;ATTACH&quot;)
+
+        object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;3.ics&quot;)
+        component = yield object1.componentForUser()
+        attach = component.mainComponent().addProperty(attach)
+        yield object1.setComponent(component)
+        yield self.commitTransaction(0)
+
+        # Sync attachments
+        changed, removed = yield syncer.syncAttachments()
+        self.assertEqual(changed, set())
+        self.assertEqual(removed, set())
+
+        # Validate changes
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        attachments = yield home1.getAllAttachments()
+        mapping1 = dict([(o.md5(), o.id()) for o in attachments])
+        yield _checkAttachmentObjectMigrationState(home1, mapping1)
+
+        # Change original attachment in original resource (this creates a new one and does not remove the old)
+        object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;1.ics&quot;)
+        attachment, _ignore_location = yield object1.updateAttachment(managedid0_1_changed, MimeType.fromString(&quot;text/plain&quot;), &quot;test.txt&quot;, MemoryStream(&quot;Here is some text #1 - changed again.&quot;))
+        id0_1_changed_again = attachment.id()
+        md50_1_changed_again = attachment.md5()
+        mapping0[md50_1_changed_again] = id0_1_changed_again
+        yield self.commitTransaction(0)
+
+        # Sync attachments
+        changed, removed = yield syncer.syncAttachments()
+        self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1_changed_again)),)))
+        self.assertEqual(removed, set())
+
+        # Validate changes
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        attachments = yield home1.getAllAttachments()
+        mapping1 = dict([(o.md5(), o.id()) for o in attachments])
+        yield _checkAttachmentObjectMigrationState(home1, mapping1)
+
+
+    @inlineCallbacks
+    def test_link_attachments(self):
+        &quot;&quot;&quot;
+        Test that L{linkAttachments} links attachment data to the associated calendar object.
+        &quot;&quot;&quot;
+
+        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        yield self.notificationCollectionUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        calendar0 = yield home0.childWithName(&quot;calendar&quot;)
+        object0_1 = yield calendar0.createCalendarObjectWithName(&quot;1.ics&quot;, Component.fromString(self.caldata1))
+        object0_2 = yield calendar0.createCalendarObjectWithName(&quot;2.ics&quot;, Component.fromString(self.caldata2))
+        yield calendar0.createCalendarObjectWithName(&quot;3.ics&quot;, Component.fromString(self.caldata3))
+        remote_id = calendar0.id()
+
+        attachment, _ignore_location = yield object0_1.addAttachment(None, MimeType.fromString(&quot;text/plain&quot;), &quot;test.txt&quot;, MemoryStream(&quot;Here is some text #1.&quot;))
+        id0_1 = attachment.id()
+        md50_1 = attachment.md5()
+        managedid0_1 = attachment.managedID()
+        pathID0_1 = ManagedAttachment.lastSegmentOfUriPath(managedid0_1, attachment.name())
+
+        attachment, _ignore_location = yield object0_2.addAttachment(None, MimeType.fromString(&quot;text/plain&quot;), &quot;test2.txt&quot;, MemoryStream(&quot;Here is some text #2.&quot;))
+        id0_2 = attachment.id()
+        md50_2 = attachment.md5()
+        managedid0_2 = attachment.managedID()
+        pathID0_2 = ManagedAttachment.lastSegmentOfUriPath(managedid0_2, attachment.name())
+
+        yield self.commitTransaction(0)
+
+        # Add original to a different resource
+        object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;1.ics&quot;)
+        component = yield object1.componentForUser()
+        attach = component.mainComponent().getProperty(&quot;ATTACH&quot;)
+
+        object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;3.ics&quot;)
+        component = yield object1.componentForUser()
+        attach = component.mainComponent().addProperty(attach)
+        yield object1.setComponent(component)
+        yield self.commitTransaction(0)
+
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
+        yield syncer.loadRecord()
+        yield syncer.prepareCalendarHome()
+
+        # Trigger sync of the one calendar
+        local_sync_state = {}
+        remote_sync_state = yield syncer.getCalendarSyncList()
+        yield syncer.syncCalendar(
+            remote_id,
+            local_sync_state,
+            remote_sync_state,
+        )
+        self.assertEqual(len(local_sync_state), 1)
+        self.assertEqual(local_sync_state[remote_id].lastSyncToken, remote_sync_state[remote_id].lastSyncToken)
+
+        # Sync attachments
+        changed, removed = yield syncer.syncAttachments()
+
+        @inlineCallbacks
+        def _mapLocalIDToRemote(remote_id):
+            records = yield AttachmentMigrationRecord.all(self.theTransactionUnderTest(1))
+            yield self.commitTransaction(1)
+            for record in records:
+                if record.remoteResourceID == remote_id:
+                    returnValue(record.localResourceID)
+            else:
+                returnValue(None)
+
+        self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1)), (yield _mapLocalIDToRemote(id0_2)),)))
+        self.assertEqual(removed, set())
+
+        # Link attachments (after home is disabled)
+        yield syncer.disableRemoteHome()
+        len_links = yield syncer.linkAttachments()
+        self.assertEqual(len_links, 3)
+
+        # Local calendar exists
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
+        self.assertTrue(calendar1 is not None)
+        children = yield calendar1.objectResources()
+        self.assertEqual(set([child.name() for child in children]), set((&quot;1.ics&quot;, &quot;2.ics&quot;, &quot;3.ics&quot;,)))
+
+        # Make sure calendar object is associated with attachment
+        object1 = yield calendar1.objectResourceWithName(&quot;1.ics&quot;)
+        attachments = yield object1.managedAttachmentList()
+        self.assertEqual(attachments, [pathID0_1, ])
+
+        attachment = yield object1.attachmentWithManagedID(managedid0_1)
+        self.assertTrue(attachment is not None)
+        self.assertEqual(attachment.md5(), md50_1)
+
+        # Make sure calendar object is associated with attachment
+        object1 = yield calendar1.objectResourceWithName(&quot;2.ics&quot;)
+        attachments = yield object1.managedAttachmentList()
+        self.assertEqual(attachments, [pathID0_2, ])
+
+        attachment = yield object1.attachmentWithManagedID(managedid0_2)
+        self.assertTrue(attachment is not None)
+        self.assertEqual(attachment.md5(), md50_2)
+
+        # Make sure calendar object is associated with attachment
+        object1 = yield calendar1.objectResourceWithName(&quot;3.ics&quot;)
+        attachments = yield object1.managedAttachmentList()
+        self.assertEqual(attachments, [pathID0_1, ])
+
+        attachment = yield object1.attachmentWithManagedID(managedid0_1)
+        self.assertTrue(attachment is not None)
+        self.assertEqual(attachment.md5(), md50_1)
+
+
+    @inlineCallbacks
+    def test_delegate_reconcile(self):
+        &quot;&quot;&quot;
+        Test that L{delegateReconcile} copies over the full set of delegates and caches associated groups..
+        &quot;&quot;&quot;
+
+        # Create remote home
+        yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        yield self.notificationCollectionUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        yield self.commitTransaction(0)
+
+        # Add some delegates
+        txn = self.theTransactionUnderTest(0)
+        record01 = yield txn.directoryService().recordWithUID(u&quot;user01&quot;)
+        record02 = yield txn.directoryService().recordWithUID(u&quot;user02&quot;)
+        record03 = yield txn.directoryService().recordWithUID(u&quot;user03&quot;)
+
+        group01 = yield txn.directoryService().recordWithUID(u&quot;__top_group_1__&quot;)
+        group02 = yield txn.directoryService().recordWithUID(u&quot;right_coast&quot;)
+
+        # Add user02 and user03 as individual delegates
+        yield Delegates.addDelegate(txn, record01, record02, True)
+        yield Delegates.addDelegate(txn, record01, record03, False)
+
+        # Add group delegates
+        yield Delegates.addDelegate(txn, record01, group01, True)
+        yield Delegates.addDelegate(txn, record01, group02, False)
+
+        # Add external delegates
+        yield txn.assignExternalDelegates(u&quot;user01&quot;, None, None, u&quot;external1&quot;, u&quot;external2&quot;)
+
+        yield self.commitTransaction(0)
+
+
+        # Initially no local delegates
+        txn = self.theTransactionUnderTest(1)
+        delegates = yield txn.dumpIndividualDelegatesLocal(u&quot;user01&quot;)
+        self.assertEqual(len(delegates), 0)
+        delegates = yield txn.dumpGroupDelegatesLocal(u&quot;user04&quot;)
+        self.assertEqual(len(delegates), 0)
+        externals = yield txn.dumpExternalDelegatesLocal(u&quot;user01&quot;)
+        self.assertEqual(len(externals), 0)
+        yield self.commitTransaction(1)
+
+        # Sync from remote side
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
+        yield syncer.loadRecord()
+        yield syncer.disableRemoteHome()
+        yield syncer.delegateReconcile()
+
+        # Now have local delegates
+        txn = self.theTransactionUnderTest(1)
+
+        delegates = yield txn.dumpIndividualDelegatesLocal(u&quot;user01&quot;)
+        self.assertEqual(
+            set(delegates),
+            set((
+                DelegateRecord.make(delegator=&quot;user01&quot;, delegate=&quot;user02&quot;, readWrite=1),
+                DelegateRecord.make(delegator=&quot;user01&quot;, delegate=&quot;user03&quot;, readWrite=0),
+            )),
+        )
+
+        delegateGroups = yield txn.dumpGroupDelegatesLocal(u&quot;user01&quot;)
+        group_top = yield txn.groupByUID(u&quot;__top_group_1__&quot;)
+        group_right = yield txn.groupByUID(u&quot;right_coast&quot;)
+        self.assertEqual(
+            set([item[0] for item in delegateGroups]),
+            set((
+                DelegateGroupsRecord.make(delegator=&quot;user01&quot;, groupID=group_top.groupID, readWrite=1, isExternal=False),
+                DelegateGroupsRecord.make(delegator=&quot;user01&quot;, groupID=group_right.groupID, readWrite=0, isExternal=False),
+            )),
+        )
+
+        externals = yield txn.dumpExternalDelegatesLocal(u&quot;user01&quot;)
+        self.assertEqual(
+            set(externals),
+            set((
+                ExternalDelegateGroupsRecord.make(
+                    delegator=&quot;user01&quot;,
+                    groupUIDRead=&quot;external1&quot;,
+                    groupUIDWrite=&quot;external2&quot;,
+                ),
+            )),
+        )
+
+        yield self.commitTransaction(1)
+
+
+    @inlineCallbacks
+    def test_notifications_reconcile(self):
+        &quot;&quot;&quot;
+        Test that L{delegateReconcile} copies over the full set of delegates and caches associated groups..
+        &quot;&quot;&quot;
+
+        # Create remote home - and add some fake notifications
+        yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        notifications = yield self.theTransactionUnderTest(0).notificationsWithUID(&quot;user01&quot;, create=True)
+        uid1 = str(uuid4())
+        obj1 = yield notifications.writeNotificationObject(uid1, &quot;type1&quot;, &quot;data1&quot;)
+        id1 = obj1.id()
+        uid2 = str(uuid4())
+        obj2 = yield notifications.writeNotificationObject(uid2, &quot;type2&quot;, &quot;data2&quot;)
+        id2 = obj2.id()
+        yield self.commitTransaction(0)
+
+        # Sync from remote side
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
+        yield syncer.loadRecord()
+        yield syncer.prepareCalendarHome()
+        yield syncer.disableRemoteHome()
+        changes = yield syncer.notificationsReconcile()
+        self.assertEqual(changes, 2)
+
+        # Now have local notifications
+        notifications = yield NotificationCollection.notificationsWithUID(
+            self.theTransactionUnderTest(1),
+            &quot;user01&quot;,
+            status=_HOME_STATUS_MIGRATING,
+        )
+        results = yield notifications.notificationObjects()
+        self.assertEqual(len(results), 2)
+        for result in results:
+            for test_uid, test_id, test_type, test_data in ((uid1, id1, &quot;type1&quot;, &quot;data1&quot;,), (uid2, id2, &quot;type2&quot;, &quot;data2&quot;,),):
+                if result.uid() == test_uid:
+                    self.assertNotEqual(result.id(), test_id)
+                    self.assertEqual(json.loads(result.notificationType()), test_type)
+                    data = yield result.notificationData()
+                    self.assertEqual(json.loads(data), test_data)
+                    break
+            else:
+                self.fail(&quot;Notification uid {} not found&quot;.format(result.uid()))
+        yield self.commitTransaction(1)
+
+
+    @inlineCallbacks
+    def test_disable_remote_home(self):
+        &quot;&quot;&quot;
+        Test that L{disableRemoteHome} changes the remote status and prevents a normal state
+        home from being created.
+        &quot;&quot;&quot;
+
+        # Create remote home - and add some fake notifications
+        yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        yield self.theTransactionUnderTest(0).notificationsWithUID(&quot;user01&quot;, create=True)
+        yield self.commitTransaction(0)
+
+        # Sync from remote side
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
+        yield syncer.loadRecord()
+        yield syncer.prepareCalendarHome()
+        yield syncer.disableRemoteHome()
+
+        # It is disabled
+        home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;)
+        self.assertTrue(home is None)
+        home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_NORMAL)
+        self.assertTrue(home is None)
+        home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_DISABLED)
+        self.assertTrue(home is not None)
+        yield self.commitTransaction(0)
+
+
+
+class TestSharingSync(MultiStoreConduitTest):
+    &quot;&quot;&quot;
+    Test that L{CrossPodHomeSync} sharing sync works.
+    &quot;&quot;&quot;
+
+    @inlineCallbacks
+    def setUp(self):
+        self.accounts = FilePath(__file__).sibling(&quot;accounts&quot;).child(&quot;groupAccounts.xml&quot;)
+        self.augments = FilePath(__file__).sibling(&quot;accounts&quot;).child(&quot;augments.xml&quot;)
+        yield super(TestSharingSync, self).setUp()
+        yield self.populate()
+
+
+    def configure(self):
+        super(TestSharingSync, self).configure()
+        config.Sharing.Enabled = True
+        config.Sharing.Calendars.Enabled = True
+        config.Sharing.Calendars.Groups.Enabled = True
+        config.Sharing.Calendars.Groups.ReconciliationDelaySeconds = 0
+
+
+    @inlineCallbacks
+    def populate(self):
+        yield populateCalendarsFrom(self.requirements, self.theStoreUnderTest(0))
+
+    requirements = {
+        &quot;user01&quot; : None,
+        &quot;user02&quot; : None,
+        &quot;user06&quot; : None,
+        &quot;user07&quot; : None,
+        &quot;user08&quot; : None,
+        &quot;user09&quot; : None,
+        &quot;user10&quot; : None,
+    }
+
+
+    @inlineCallbacks
+    def _createShare(self, shareFrom, shareTo, accept=True):
+        # Invite
+        txnindex = 1 if shareFrom[0] == &quot;p&quot; else 0
+        home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(txnindex), name=shareFrom, create=True)
+        calendar = yield home.childWithName(&quot;calendar&quot;)
+        shareeView = yield calendar.inviteUIDToShare(shareTo, _BIND_MODE_READ, &quot;summary&quot;)
+        yield self.commitTransaction(txnindex)
+
+        # Accept
+        if accept:
+            inviteUID = shareeView.shareUID()
+            txnindex = 1 if shareTo[0] == &quot;p&quot; else 0
+            shareeHome = yield self.homeUnderTest(txn=self.theTransactionUnderTest(txnindex), name=shareTo)
+            shareeView = yield shareeHome.acceptShare(inviteUID)
+            sharedName = shareeView.name()
+            yield self.commitTransaction(txnindex)
+        else:
+            sharedName = None
+
+        returnValue(sharedName)
+
+
+    @inlineCallbacks
+    def test_shared_collections_reconcile(self):
+        &quot;&quot;&quot;
+        Test that L{sharedCollectionsReconcile} copies over the full set of delegates and caches associated groups..
+        &quot;&quot;&quot;
+
+        # Create home
+        yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        yield self.notificationCollectionUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        yield self.commitTransaction(0)
+
+        # Shared by migrating user
+        shared_name_02 = yield self._createShare(&quot;user01&quot;, &quot;user02&quot;)
+        shared_name_03 = yield self._createShare(&quot;user01&quot;, &quot;puser03&quot;)
+
+        # Shared to migrating user
+        shared_name_04 = yield self._createShare(&quot;user04&quot;, &quot;user01&quot;)
+        shared_name_05 = yield self._createShare(&quot;puser05&quot;, &quot;user01&quot;)
+
+        # Sync from remote side
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
+        yield syncer.loadRecord()
+        yield syncer.sync()
+        yield syncer.disableRemoteHome()
+        changes = yield syncer.sharedByCollectionsReconcile()
+        self.assertEqual(changes, 2)
+        changes = yield syncer.sharedToCollectionsReconcile()
+        self.assertEqual(changes, 2)
+
+        # Local calendar exists with shares
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
+        invites1 = yield calendar1.sharingInvites()
+        self.assertEqual(len(invites1), 2)
+        self.assertEqual(set([invite.uid for invite in invites1]), set((shared_name_02, shared_name_03,)))
+        yield self.commitTransaction(1)
+
+        # Remote sharee can access it
+        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user02&quot;)
+        calendar0 = yield home0.childWithName(shared_name_02)
+        self.assertTrue(calendar0 is not None)
+
+        # Local sharee can access it
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;puser03&quot;)
+        calendar1 = yield home1.childWithName(shared_name_03)
+        self.assertTrue(calendar1 is not None)
+
+        # Local shared calendars exist
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        calendar1 = yield home1.childWithName(shared_name_04)
+        self.assertTrue(calendar1 is not None)
+        calendar1 = yield home1.childWithName(shared_name_05)
+        self.assertTrue(calendar1 is not None)
+        yield self.commitTransaction(1)
+
+        # Sharers see migrated user as sharee
+        externalHome0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_EXTERNAL)
+        calendar0 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user04&quot;, name=&quot;calendar&quot;)
+        invites = yield calendar0.allInvitations()
+        self.assertEqual(len(invites), 1)
+        self.assertEqual(invites[0].shareeUID, &quot;user01&quot;)
+        self.assertEqual(invites[0].shareeHomeID, externalHome0.id())
+        yield self.commitTransaction(0)
+
+        shareeHome1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home=&quot;puser05&quot;, name=&quot;calendar&quot;)
+        invites = yield calendar1.allInvitations()
+        self.assertEqual(len(invites), 1)
+        self.assertEqual(invites[0].shareeUID, &quot;user01&quot;)
+        self.assertEqual(invites[0].shareeHomeID, shareeHome1.id())
+        yield self.commitTransaction(1)
+
+
+    @inlineCallbacks
+    def test_group_shared_collections_reconcile(self):
+        &quot;&quot;&quot;
+        Test that L{sharedCollectionsReconcile} copies over the full set of delegates and caches associated groups..
+        &quot;&quot;&quot;
+
+        # Create home
+        yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        yield self.notificationCollectionUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        yield self.commitTransaction(0)
+
+        # Shared by migrating user
+        yield self._createShare(&quot;user01&quot;, &quot;group02&quot;, accept=False)
+
+        # Sync from remote side
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
+        yield syncer.loadRecord()
+        yield syncer.sync()
+        yield syncer.disableRemoteHome()
+        changes = yield syncer.sharedByCollectionsReconcile()
+        self.assertEqual(changes, 3)
+        changes = yield syncer.sharedToCollectionsReconcile()
+        self.assertEqual(changes, 0)
+
+        # Local calendar exists with shares
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
+        invites1 = yield calendar1.sharingInvites()
+        self.assertEqual(len(invites1), 3)
+        sharee = yield GroupShareeRecord.querysimple(self.theTransactionUnderTest(1), calendarID=calendar1.id())
+        self.assertEqual(len(sharee), 1)
+        group = yield GroupsRecord.querysimple(self.theTransactionUnderTest(1), groupID=sharee[0].groupID)
+        self.assertEqual(len(group), 1)
+        self.assertEqual(group[0].groupUID, &quot;group02&quot;)
+        yield self.commitTransaction(1)
+
+
+
+class TestGroupAttendeeSync(MultiStoreConduitTest):
+    &quot;&quot;&quot;
+    GroupAttendeeReconciliation tests
+    &quot;&quot;&quot;
+
+    now = {&quot;now1&quot;: DateTime.getToday().getYear() + 1}
+
+    groupdata1 = &quot;&quot;&quot;BEGIN:VCALENDAR
+CALSCALE:GREGORIAN
+PRODID:-//Example Inc.//Example Calendar//EN
+VERSION:2.0
+BEGIN:VEVENT
+DTSTAMP:20051222T205953Z
+CREATED:20060101T150000Z
+DTSTART:{now1:04d}0101T100000Z
+DURATION:PT1H
+SUMMARY:event 1
+UID:event1@ninevah.local
+END:VEVENT
+END:VCALENDAR&quot;&quot;&quot;.format(**now)
+
+    groupdata2 = &quot;&quot;&quot;BEGIN:VCALENDAR
+CALSCALE:GREGORIAN
+PRODID:-//Example Inc.//Example Calendar//EN
+VERSION:2.0
+BEGIN:VEVENT
+DTSTAMP:20051222T205953Z
+CREATED:20060101T150000Z
+DTSTART:{now1:04d}0101T100000Z
+DURATION:PT1H
+SUMMARY:event 2
+UID:event2@ninevah.local
+ORGANIZER:mailto:user01@example.com
+ATTENDEE:mailto:user01@example.com
+ATTENDEE:mailto:group02@example.com
+END:VEVENT
+END:VCALENDAR&quot;&quot;&quot;.format(**now)
+
+    groupdata3 = &quot;&quot;&quot;BEGIN:VCALENDAR
+CALSCALE:GREGORIAN
+PRODID:-//Example Inc.//Example Calendar//EN
+VERSION:2.0
+BEGIN:VEVENT
+DTSTAMP:20051222T205953Z
+CREATED:20060101T150000Z
+DTSTART:{now1:04d}0101T100000Z
+DURATION:PT1H
+SUMMARY:event 3
+UID:event3@ninevah.local
+ORGANIZER:mailto:user01@example.com
+ATTENDEE:mailto:user01@example.com
+ATTENDEE:mailto:group04@example.com
+END:VEVENT
+END:VCALENDAR&quot;&quot;&quot;.format(**now)
+
+    @inlineCallbacks
+    def setUp(self):
+        self.accounts = FilePath(__file__).sibling(&quot;accounts&quot;).child(&quot;groupAccounts.xml&quot;)
+        yield super(TestGroupAttendeeSync, self).setUp()
+        yield self.populate()
+
+
+    def configure(self):
+        super(TestGroupAttendeeSync, self).configure()
+        config.GroupAttendees.Enabled = True
+        config.GroupAttendees.ReconciliationDelaySeconds = 0
+        config.GroupAttendees.AutoUpdateSecondsFromNow = 0
+
+
+    @inlineCallbacks
+    def populate(self):
+        yield populateCalendarsFrom(self.requirements, self.theStoreUnderTest(0))
+
+    requirements = {
+        &quot;user01&quot; : None,
+        &quot;user02&quot; : None,
+        &quot;user06&quot; : None,
+        &quot;user07&quot; : None,
+        &quot;user08&quot; : None,
+        &quot;user09&quot; : None,
+        &quot;user10&quot; : None,
+    }
+
+    @inlineCallbacks
+    def test_group_attendees(self):
+        &quot;&quot;&quot;
+        Test that L{groupAttendeeReconcile} links groups to the associated calendar object.
+        &quot;&quot;&quot;
+
+        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        calendar0 = yield home0.childWithName(&quot;calendar&quot;)
+        yield calendar0.createCalendarObjectWithName(&quot;1.ics&quot;, Component.fromString(self.groupdata1))
+        yield calendar0.createCalendarObjectWithName(&quot;2.ics&quot;, Component.fromString(self.groupdata2))
+        yield calendar0.createCalendarObjectWithName(&quot;3.ics&quot;, Component.fromString(self.groupdata3))
+        yield self.commitTransaction(0)
+
+        yield JobItem.waitEmpty(self.theStoreUnderTest(0).newTransaction, reactor, 60.0)
+
+        # Trigger sync
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
+        yield syncer.sync()
+
+        # Link groups
+        len_links = yield syncer.groupAttendeeReconcile()
+        self.assertEqual(len_links, 2)
+
+        # Local calendar exists
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        calendar1 = yield home1.childWithName(&quot;calendar&quot;)
+        self.assertTrue(calendar1 is not None)
+        children = yield calendar1.objectResources()
+        self.assertEqual(set([child.name() for child in children]), set((&quot;1.ics&quot;, &quot;2.ics&quot;, &quot;3.ics&quot;,)))
+
+        object2 = yield calendar1.objectResourceWithName(&quot;2.ics&quot;)
+        record = (yield object2.groupEventLinks()).values()[0]
+        group02 = yield self.theTransactionUnderTest(1).groupByUID(u&quot;group02&quot;)
+        self.assertEqual(record.groupID, group02.groupID)
+        self.assertEqual(record.membershipHash, group02.membershipHash)
+
+        object3 = yield calendar1.objectResourceWithName(&quot;3.ics&quot;)
+        record = (yield object3.groupEventLinks()).values()[0]
+        group04 = yield self.theTransactionUnderTest(1).groupByUID(u&quot;group04&quot;)
+        self.assertEqual(record.groupID, group04.groupID)
+        self.assertEqual(record.membershipHash, group04.membershipHash)
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtesttest_migrationpy"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_migration.py (14520 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/test_migration.py        2015-03-06 21:20:35 UTC (rev 14520)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_migration.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,693 +0,0 @@
</span><del>-##
-# Copyright (c) 2015 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-from pycalendar.datetime import DateTime
-from twisted.internet.defer import inlineCallbacks, returnValue
-from twisted.python.filepath import FilePath
-from twistedcaldav.config import config
-from twistedcaldav.ical import Component
-from txdav.common.datastore.podding.migration.home_sync import CrossPodHomeSync
-from txdav.common.datastore.podding.test.util import MultiStoreConduitTest
-from txdav.common.datastore.sql_tables import _BIND_MODE_READ, \
-    _HOME_STATUS_DISABLED, _HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL, \
-    _HOME_STATUS_MIGRATING
-from txdav.common.datastore.test.util import populateCalendarsFrom
-from txdav.who.delegates import Delegates
-from txweb2.http_headers import MimeType
-from txweb2.stream import MemoryStream
-from txdav.caldav.datastore.scheduling.ischedule.delivery import IScheduleRequest
-from txdav.caldav.datastore.scheduling.ischedule.resource import IScheduleInboxResource
-from txweb2.dav.test.util import SimpleRequest
-from txdav.caldav.datastore.test.common import CaptureProtocol
-
-
-class TestCompleteMigrationCycle(MultiStoreConduitTest):
-    &quot;&quot;&quot;
-    Test that a full migration cycle using L{CrossPodHomeSync} works.
-    &quot;&quot;&quot;
-
-    def __init__(self, methodName='runTest'):
-        super(TestCompleteMigrationCycle, self).__init__(methodName)
-        self.stash = {}
-
-
-    @inlineCallbacks
-    def setUp(self):
-        @inlineCallbacks
-        def _fakeSubmitRequest(iself, ssl, host, port, request):
-            pod = (port - 8008) / 100
-            inbox = IScheduleInboxResource(self.site.resource, self.theStoreUnderTest(pod), podding=True)
-            response = yield inbox.http_POST(SimpleRequest(
-                self.site,
-                &quot;POST&quot;,
-                &quot;http://{host}:{port}/podding&quot;.format(host=host, port=port),
-                request.headers,
-                request.stream.mem,
-            ))
-            returnValue(response)
-
-
-        self.patch(IScheduleRequest, &quot;_submitRequest&quot;, _fakeSubmitRequest)
-        self.accounts = FilePath(__file__).sibling(&quot;accounts&quot;).child(&quot;groupAccounts.xml&quot;)
-        self.augments = FilePath(__file__).sibling(&quot;accounts&quot;).child(&quot;augments.xml&quot;)
-        yield super(TestCompleteMigrationCycle, self).setUp()
-        yield self.populate()
-
-
-    def configure(self):
-        super(TestCompleteMigrationCycle, self).configure()
-        config.GroupAttendees.Enabled = True
-        config.GroupAttendees.ReconciliationDelaySeconds = 0
-        config.GroupAttendees.AutoUpdateSecondsFromNow = 0
-        config.AccountingCategories.migration = True
-        config.AccountingPrincipals = [&quot;*&quot;]
-
-
-    @inlineCallbacks
-    def populate(self):
-        yield populateCalendarsFrom(self.requirements0, self.theStoreUnderTest(0))
-        yield populateCalendarsFrom(self.requirements1, self.theStoreUnderTest(1))
-
-    requirements0 = {
-        &quot;user01&quot; : None,
-        &quot;user02&quot; : None,
-        &quot;user03&quot; : None,
-        &quot;user04&quot; : None,
-        &quot;user05&quot; : None,
-        &quot;user06&quot; : None,
-        &quot;user07&quot; : None,
-        &quot;user08&quot; : None,
-        &quot;user09&quot; : None,
-        &quot;user10&quot; : None,
-    }
-
-    requirements1 = {
-        &quot;puser01&quot; : None,
-        &quot;puser02&quot; : None,
-        &quot;puser03&quot; : None,
-        &quot;puser04&quot; : None,
-        &quot;puser05&quot; : None,
-        &quot;puser06&quot; : None,
-        &quot;puser07&quot; : None,
-        &quot;puser08&quot; : None,
-        &quot;puser09&quot; : None,
-        &quot;puser10&quot; : None,
-    }
-
-
-    @inlineCallbacks
-    def _createShare(self, shareFrom, shareTo, accept=True):
-        # Invite
-        txnindex = 1 if shareFrom[0] == &quot;p&quot; else 0
-        home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(txnindex), name=shareFrom, create=True)
-        calendar = yield home.childWithName(&quot;calendar&quot;)
-        shareeView = yield calendar.inviteUIDToShare(shareTo, _BIND_MODE_READ, &quot;summary&quot;)
-        yield self.commitTransaction(txnindex)
-
-        # Accept
-        if accept:
-            inviteUID = shareeView.shareUID()
-            txnindex = 1 if shareTo[0] == &quot;p&quot; else 0
-            shareeHome = yield self.homeUnderTest(txn=self.theTransactionUnderTest(txnindex), name=shareTo)
-            shareeView = yield shareeHome.acceptShare(inviteUID)
-            sharedName = shareeView.name()
-            yield self.commitTransaction(txnindex)
-        else:
-            sharedName = None
-
-        returnValue(sharedName)
-
-
-    def attachmentToString(self, attachment):
-        &quot;&quot;&quot;
-        Convenience to convert an L{IAttachment} to a string.
-
-        @param attachment: an L{IAttachment} provider to convert into a string.
-
-        @return: a L{Deferred} that fires with the contents of the attachment.
-
-        @rtype: L{Deferred} firing C{bytes}
-        &quot;&quot;&quot;
-        capture = CaptureProtocol()
-        attachment.retrieve(capture)
-        return capture.deferred
-
-
-    now = {
-        &quot;now&quot;: DateTime.getToday().getYear(),
-        &quot;now1&quot;: DateTime.getToday().getYear() + 1,
-    }
-
-    data01_1 = &quot;&quot;&quot;BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_data01_1
-DTSTART:{now1:04d}0102T140000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-RRULE:FREQ=WEEKLY
-SUMMARY:data01_1
-END:VEVENT
-END:VCALENDAR
-&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
-
-    data01_1_changed = &quot;&quot;&quot;BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_data01_1
-DTSTART:{now1:04d}0102T140000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-RRULE:FREQ=WEEKLY
-SUMMARY:data01_1_changed
-END:VEVENT
-END:VCALENDAR
-&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
-
-    data01_2 = &quot;&quot;&quot;BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_data01_2
-DTSTART:{now1:04d}0102T160000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-SUMMARY:data01_2
-ORGANIZER:mailto:user01@example.com
-ATTENDEE:mailto:user01@example.com
-ATTENDEE:mailto:user02@example.com
-ATTENDEE:mailto:puser02@example.com
-END:VEVENT
-END:VCALENDAR
-&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
-
-    data01_3 = &quot;&quot;&quot;BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_data01_3
-DTSTART:{now1:04d}0102T180000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-SUMMARY:data01_3
-ORGANIZER:mailto:user01@example.com
-ATTENDEE:mailto:user01@example.com
-ATTENDEE:mailto:group02@example.com
-END:VEVENT
-END:VCALENDAR
-&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
-
-    data02_1 = &quot;&quot;&quot;BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_data02_1
-DTSTART:{now1:04d}0103T140000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-RRULE:FREQ=WEEKLY
-SUMMARY:data02_1
-END:VEVENT
-END:VCALENDAR
-&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
-
-    data02_2 = &quot;&quot;&quot;BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_data02_2
-DTSTART:{now1:04d}0103T160000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-SUMMARY:data02_2
-ORGANIZER:mailto:user02@example.com
-ATTENDEE:mailto:user02@example.com
-ATTENDEE:mailto:user01@example.com
-ATTENDEE:mailto:puser02@example.com
-END:VEVENT
-END:VCALENDAR
-&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
-
-    data02_3 = &quot;&quot;&quot;BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_data02_3
-DTSTART:{now1:04d}0103T180000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-SUMMARY:data02_3
-ORGANIZER:mailto:user02@example.com
-ATTENDEE:mailto:user02@example.com
-ATTENDEE:mailto:group01@example.com
-END:VEVENT
-END:VCALENDAR
-&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
-
-    datap02_1 = &quot;&quot;&quot;BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_datap02_1
-DTSTART:{now1:04d}0103T140000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-RRULE:FREQ=WEEKLY
-SUMMARY:datap02_1
-END:VEVENT
-END:VCALENDAR
-&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
-
-    datap02_2 = &quot;&quot;&quot;BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_datap02_2
-DTSTART:{now1:04d}0103T160000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-SUMMARY:datap02_2
-ORGANIZER:mailto:puser02@example.com
-ATTENDEE:mailto:puser02@example.com
-ATTENDEE:mailto:user01@example.com
-END:VEVENT
-END:VCALENDAR
-&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
-
-    datap02_3 = &quot;&quot;&quot;BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_datap02_3
-DTSTART:{now1:04d}0103T180000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-SUMMARY:datap02_3
-ORGANIZER:mailto:puser02@example.com
-ATTENDEE:mailto:puser02@example.com
-ATTENDEE:mailto:group01@example.com
-END:VEVENT
-END:VCALENDAR
-&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
-
-
-    @inlineCallbacks
-    def preCheck(self):
-        &quot;&quot;&quot;
-        Checks prior to starting any tests
-        &quot;&quot;&quot;
-
-        for i in range(self.numberOfStores):
-            txn = self.theTransactionUnderTest(i)
-            record = yield txn.directoryService().recordWithUID(u&quot;user01&quot;)
-            self.assertEqual(record.serviceNodeUID, &quot;A&quot;)
-            self.assertEqual(record.thisServer(), i == 0)
-            record = yield txn.directoryService().recordWithUID(u&quot;user02&quot;)
-            self.assertEqual(record.serviceNodeUID, &quot;A&quot;)
-            self.assertEqual(record.thisServer(), i == 0)
-            record = yield txn.directoryService().recordWithUID(u&quot;puser02&quot;)
-            self.assertEqual(record.serviceNodeUID, &quot;B&quot;)
-            self.assertEqual(record.thisServer(), i == 1)
-            yield self.commitTransaction(i)
-
-
-    @inlineCallbacks
-    def initialState(self):
-        &quot;&quot;&quot;
-        Setup the server with an initial set of data
-
-        user01 - migrating user
-        user02 - has a calendar shared with user01
-        user03 - shared to by user01
-
-        puser01 - user on other pod
-        puser02 - has a calendar shared with user01
-        puser03 - shared to by user01
-        &quot;&quot;&quot;
-
-        # Data for user01
-        home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
-        calendar = yield home.childWithName(&quot;calendar&quot;)
-        yield calendar.createCalendarObjectWithName(&quot;01_1.ics&quot;, Component.fromString(self.data01_1))
-        yield calendar.createCalendarObjectWithName(&quot;01_2.ics&quot;, Component.fromString(self.data01_2))
-        obj3 = yield calendar.createCalendarObjectWithName(&quot;01_3.ics&quot;, Component.fromString(self.data01_3))
-        attachment, _ignore_location = yield obj3.addAttachment(None, MimeType.fromString(&quot;text/plain&quot;), &quot;test.txt&quot;, MemoryStream(&quot;Here is some text #1.&quot;))
-        self.stash[&quot;user01_attachment_id&quot;] = attachment.id()
-        self.stash[&quot;user01_attachment_md5&quot;] = attachment.md5()
-        self.stash[&quot;user01_attachment_mid&quot;] = attachment.managedID()
-        yield self.commitTransaction(0)
-
-        # Data for user02
-        home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user02&quot;, create=True)
-        calendar = yield home.childWithName(&quot;calendar&quot;)
-        yield calendar.createCalendarObjectWithName(&quot;02_1.ics&quot;, Component.fromString(self.data02_1))
-        yield calendar.createCalendarObjectWithName(&quot;02_2.ics&quot;, Component.fromString(self.data02_2))
-        yield calendar.createCalendarObjectWithName(&quot;02_3.ics&quot;, Component.fromString(self.data02_3))
-        yield self.commitTransaction(0)
-
-        # Data for puser02
-        home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;puser02&quot;, create=True)
-        calendar = yield home.childWithName(&quot;calendar&quot;)
-        yield calendar.createCalendarObjectWithName(&quot;p02_1.ics&quot;, Component.fromString(self.datap02_1))
-        yield calendar.createCalendarObjectWithName(&quot;p02_2.ics&quot;, Component.fromString(self.datap02_2))
-        yield calendar.createCalendarObjectWithName(&quot;p02_3.ics&quot;, Component.fromString(self.datap02_3))
-        yield self.commitTransaction(1)
-
-        # Share calendars
-        self.stash[&quot;sharename_user01_to_user03&quot;] = yield self._createShare(&quot;user01&quot;, &quot;user03&quot;)
-        self.stash[&quot;sharename_user01_to_puser03&quot;] = yield self._createShare(&quot;user01&quot;, &quot;puser03&quot;)
-        self.stash[&quot;sharename_user02_to_user01&quot;] = yield self._createShare(&quot;user02&quot;, &quot;user01&quot;)
-        self.stash[&quot;sharename_puser02_to_user01&quot;] = yield self._createShare(&quot;puser02&quot;, &quot;user01&quot;)
-
-        # Add some delegates
-        txn = self.theTransactionUnderTest(0)
-        record01 = yield txn.directoryService().recordWithUID(u&quot;user01&quot;)
-        record02 = yield txn.directoryService().recordWithUID(u&quot;user02&quot;)
-        record03 = yield txn.directoryService().recordWithUID(u&quot;user03&quot;)
-        precord01 = yield txn.directoryService().recordWithUID(u&quot;puser01&quot;)
-
-        group02 = yield txn.directoryService().recordWithUID(u&quot;group02&quot;)
-        group03 = yield txn.directoryService().recordWithUID(u&quot;group03&quot;)
-
-        # Add user02 and user03 as individual delegates
-        yield Delegates.addDelegate(txn, record01, record02, True)
-        yield Delegates.addDelegate(txn, record01, record03, False)
-        yield Delegates.addDelegate(txn, record01, precord01, False)
-
-        # Add group delegates
-        yield Delegates.addDelegate(txn, record01, group02, True)
-        yield Delegates.addDelegate(txn, record01, group03, False)
-
-        # Add external delegates
-        yield txn.assignExternalDelegates(u&quot;user01&quot;, None, None, u&quot;external1&quot;, u&quot;external2&quot;)
-
-        yield self.commitTransaction(0)
-
-        yield self.waitAllEmpty()
-
-
-    @inlineCallbacks
-    def secondState(self):
-        &quot;&quot;&quot;
-        Setup the server with data changes appearing after the first sync
-        &quot;&quot;&quot;
-        txn = self.theTransactionUnderTest(0)
-        obj = yield self.calendarObjectUnderTest(txn, name=&quot;01_1.ics&quot;, calendar_name=&quot;calendar&quot;, home=&quot;user01&quot;)
-        yield obj.setComponent(self.data01_1_changed)
-
-        obj = yield self.calendarObjectUnderTest(txn, name=&quot;02_2.ics&quot;, calendar_name=&quot;calendar&quot;, home=&quot;user02&quot;)
-        attachment, _ignore_location = yield obj.addAttachment(None, MimeType.fromString(&quot;text/plain&quot;), &quot;test_02.txt&quot;, MemoryStream(&quot;Here is some text #02.&quot;))
-        self.stash[&quot;user02_attachment_id&quot;] = attachment.id()
-        self.stash[&quot;user02_attachment_md5&quot;] = attachment.md5()
-        self.stash[&quot;user02_attachment_mid&quot;] = attachment.managedID()
-
-        yield self.commitTransaction(0)
-
-        yield self.waitAllEmpty()
-
-
-    @inlineCallbacks
-    def finalState(self):
-        &quot;&quot;&quot;
-        Setup the server with data changes appearing before the final sync
-        &quot;&quot;&quot;
-        txn = self.theTransactionUnderTest(1)
-        obj = yield self.calendarObjectUnderTest(txn, name=&quot;p02_2.ics&quot;, calendar_name=&quot;calendar&quot;, home=&quot;puser02&quot;)
-        attachment, _ignore_location = yield obj.addAttachment(None, MimeType.fromString(&quot;text/plain&quot;), &quot;test_p02.txt&quot;, MemoryStream(&quot;Here is some text #p02.&quot;))
-        self.stash[&quot;puser02_attachment_id&quot;] = attachment.id()
-        self.stash[&quot;puser02_attachment_mid&quot;] = attachment.managedID()
-        self.stash[&quot;puser02_attachment_md5&quot;] = attachment.md5()
-
-        yield self.commitTransaction(1)
-
-        yield self.waitAllEmpty()
-
-
-    @inlineCallbacks
-    def switchAccounts(self):
-        &quot;&quot;&quot;
-        Switch the migrated user accounts to point to the new pod
-        &quot;&quot;&quot;
-
-        for i in range(self.numberOfStores):
-            txn = self.theTransactionUnderTest(i)
-            record = yield txn.directoryService().recordWithUID(u&quot;user01&quot;)
-            yield self.changeRecord(record, txn.directoryService().fieldName.serviceNodeUID, u&quot;B&quot;, directory=txn.directoryService())
-            yield self.commitTransaction(i)
-
-        for i in range(self.numberOfStores):
-            txn = self.theTransactionUnderTest(i)
-            record = yield txn.directoryService().recordWithUID(u&quot;user01&quot;)
-            self.assertEqual(record.serviceNodeUID, &quot;B&quot;)
-            self.assertEqual(record.thisServer(), i == 1)
-            record = yield txn.directoryService().recordWithUID(u&quot;user02&quot;)
-            self.assertEqual(record.serviceNodeUID, &quot;A&quot;)
-            self.assertEqual(record.thisServer(), i == 0)
-            record = yield txn.directoryService().recordWithUID(u&quot;puser02&quot;)
-            self.assertEqual(record.serviceNodeUID, &quot;B&quot;)
-            self.assertEqual(record.thisServer(), i == 1)
-            yield self.commitTransaction(i)
-
-
-    @inlineCallbacks
-    def postCheck(self):
-        &quot;&quot;&quot;
-        Checks after migration is done
-        &quot;&quot;&quot;
-
-        # Check that the home has been moved
-        home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;user01&quot;)
-        self.assertTrue(home.external())
-        home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_NORMAL)
-        self.assertTrue(home is None)
-        home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_EXTERNAL)
-        self.assertTrue(home is not None)
-        home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_DISABLED)
-        self.assertTrue(home is not None)
-        home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        self.assertTrue(home is None)
-        yield self.commitTransaction(0)
-
-        home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;)
-        self.assertTrue(home.normal())
-        home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_NORMAL)
-        self.assertTrue(home is not None)
-        home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_EXTERNAL)
-        self.assertTrue(home is None)
-        home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_DISABLED)
-        self.assertTrue(home is not None)
-        home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
-        self.assertTrue(home is None)
-        yield self.commitTransaction(1)
-
-        # Check that the notifications have been moved
-        notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_NORMAL)
-        self.assertTrue(notifications is None)
-        notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_EXTERNAL)
-        self.assertTrue(notifications is None)
-        notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_DISABLED)
-        self.assertTrue(notifications is not None)
-        yield self.commitTransaction(0)
-
-        notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_NORMAL)
-        self.assertTrue(notifications is not None)
-        notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_EXTERNAL)
-        self.assertTrue(notifications is None)
-        notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_DISABLED)
-        self.assertTrue(notifications is not None)
-        yield self.commitTransaction(1)
-
-        # New pod data
-        homes = {}
-        homes[&quot;user01&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;)
-        homes[&quot;user02&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user02&quot;)
-        self.assertTrue(homes[&quot;user02&quot;].external())
-        homes[&quot;user03&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user03&quot;)
-        self.assertTrue(homes[&quot;user03&quot;].external())
-        homes[&quot;puser01&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;puser01&quot;)
-        self.assertTrue(homes[&quot;puser01&quot;].normal())
-        homes[&quot;puser02&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;puser02&quot;)
-        self.assertTrue(homes[&quot;puser02&quot;].normal())
-        homes[&quot;puser03&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;puser03&quot;)
-        self.assertTrue(homes[&quot;puser03&quot;].normal())
-
-        # Check calendar data on new pod
-        calendars = yield homes[&quot;user01&quot;].loadChildren()
-        calnames = dict([(calendar.name(), calendar) for calendar in calendars])
-        self.assertEqual(
-            set(calnames.keys()),
-            set((&quot;calendar&quot;, &quot;tasks&quot;, &quot;inbox&quot;, self.stash[&quot;sharename_user02_to_user01&quot;], self.stash[&quot;sharename_puser02_to_user01&quot;],))
-        )
-
-        # Check shared-by user01 on new pod
-        shared = calnames[&quot;calendar&quot;]
-        invitations = yield shared.sharingInvites()
-        by_sharee = dict([(invitation.shareeUID, invitation) for invitation in invitations])
-        self.assertEqual(len(invitations), 2)
-        self.assertEqual(set(by_sharee.keys()), set((&quot;user03&quot;, &quot;puser03&quot;,)))
-        self.assertEqual(by_sharee[&quot;user03&quot;].shareeHomeID, homes[&quot;user03&quot;].id())
-        self.assertEqual(by_sharee[&quot;puser03&quot;].shareeHomeID, homes[&quot;puser03&quot;].id())
-
-        # Check shared-to user01 on new pod
-        shared = calnames[self.stash[&quot;sharename_user02_to_user01&quot;]]
-        self.assertEqual(shared.ownerHome().uid(), &quot;user02&quot;)
-        self.assertEqual(shared.ownerHome().id(), homes[&quot;user02&quot;].id())
-
-        shared = calnames[self.stash[&quot;sharename_puser02_to_user01&quot;]]
-        self.assertEqual(shared.ownerHome().uid(), &quot;puser02&quot;)
-        self.assertEqual(shared.ownerHome().id(), homes[&quot;puser02&quot;].id())
-
-        shared = yield homes[&quot;puser02&quot;].calendarWithName(&quot;calendar&quot;)
-        invitations = yield shared.sharingInvites()
-        self.assertEqual(len(invitations), 1)
-        self.assertEqual(invitations[0].shareeHomeID, homes[&quot;user01&quot;].id())
-
-        yield self.commitTransaction(1)
-
-        # Old pod data
-        homes = {}
-        homes[&quot;user01&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;user01&quot;)
-        homes[&quot;user02&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;user02&quot;)
-        self.assertTrue(homes[&quot;user02&quot;].normal())
-        homes[&quot;user03&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;user03&quot;)
-        self.assertTrue(homes[&quot;user03&quot;].normal())
-        homes[&quot;puser01&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;puser01&quot;)
-        self.assertTrue(homes[&quot;puser01&quot;] is None)
-        homes[&quot;puser02&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;puser02&quot;)
-        self.assertTrue(homes[&quot;puser02&quot;].external())
-        homes[&quot;puser03&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;puser03&quot;)
-        self.assertTrue(homes[&quot;puser03&quot;].external())
-
-        # Check shared-by user01 on old pod
-        shared = yield homes[&quot;user03&quot;].calendarWithName(self.stash[&quot;sharename_user01_to_user03&quot;])
-        self.assertEqual(shared.ownerHome().uid(), &quot;user01&quot;)
-        self.assertEqual(shared.ownerHome().id(), homes[&quot;user01&quot;].id())
-
-        # Check shared-to user01 on old pod
-        shared = yield homes[&quot;user02&quot;].calendarWithName(&quot;calendar&quot;)
-        invitations = yield shared.sharingInvites()
-        self.assertEqual(len(invitations), 1)
-        self.assertEqual(invitations[0].shareeHomeID, homes[&quot;user01&quot;].id())
-
-        yield self.commitTransaction(0)
-
-        # Delegates on each pod
-        for pod in range(self.numberOfStores):
-            txn = self.theTransactionUnderTest(pod)
-            records = {}
-            for ctr in range(10):
-                uid = u&quot;user{:02d}&quot;.format(ctr + 1)
-                records[uid] = yield txn.directoryService().recordWithUID(uid)
-            for ctr in range(10):
-                uid = u&quot;puser{:02d}&quot;.format(ctr + 1)
-                records[uid] = yield txn.directoryService().recordWithUID(uid)
-            for ctr in range(10):
-                uid = u&quot;group{:02d}&quot;.format(ctr + 1)
-                records[uid] = yield txn.directoryService().recordWithUID(uid)
-
-            delegates = yield Delegates.delegatesOf(txn, records[&quot;user01&quot;], True, False)
-            self.assertTrue(records[&quot;user02&quot;] in delegates)
-            self.assertTrue(records[&quot;group02&quot;] in delegates)
-            delegates = yield Delegates.delegatesOf(txn, records[&quot;user01&quot;], True, True)
-            self.assertTrue(records[&quot;user02&quot;] in delegates)
-            self.assertTrue(records[&quot;user06&quot;] in delegates)
-            self.assertTrue(records[&quot;user07&quot;] in delegates)
-            self.assertTrue(records[&quot;user08&quot;] in delegates)
-
-            delegates = yield Delegates.delegatesOf(txn, records[&quot;user01&quot;], False, False)
-            self.assertTrue(records[&quot;user03&quot;] in delegates)
-            self.assertTrue(records[&quot;group03&quot;] in delegates)
-            self.assertTrue(records[&quot;puser01&quot;] in delegates)
-            delegates = yield Delegates.delegatesOf(txn, records[&quot;user01&quot;], False, True)
-            self.assertTrue(records[&quot;user03&quot;] in delegates)
-            self.assertTrue(records[&quot;user07&quot;] in delegates)
-            self.assertTrue(records[&quot;user08&quot;] in delegates)
-            self.assertTrue(records[&quot;user09&quot;] in delegates)
-            self.assertTrue(records[&quot;puser01&quot;] in delegates)
-
-        # Attachments
-        obj = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;01_3.ics&quot;, calendar_name=&quot;calendar&quot;, home=&quot;user01&quot;)
-        attachment = yield obj.attachmentWithManagedID(self.stash[&quot;user01_attachment_mid&quot;])
-        self.assertTrue(attachment is not None)
-        self.assertEqual(attachment.md5(), self.stash[&quot;user01_attachment_md5&quot;])
-        data = yield self.attachmentToString(attachment)
-        self.assertEqual(data, &quot;Here is some text #1.&quot;)
-
-
-    @inlineCallbacks
-    def test_migration(self):
-        &quot;&quot;&quot;
-        Full migration cycle.
-        &quot;&quot;&quot;
-
-        yield self.preCheck()
-
-        # Step 1. Live full sync
-        yield self.initialState()
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
-        yield syncer.sync()
-
-        # Step 2. Live incremental sync
-        yield self.secondState()
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
-        yield syncer.sync()
-
-        # Step 3. Disable home after final changes
-        yield self.finalState()
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
-        yield syncer.disableRemoteHome()
-
-        # Step 4. Final incremental sync
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;, final=True)
-        yield syncer.sync()
-
-        # Step 5. Final reconcile sync
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;, final=True)
-        yield syncer.finalSync()
-
-        # Step 6. Enable new home
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;, final=True)
-        yield syncer.enableLocalHome()
-
-        # Step 7. Remove old home
-        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;, final=True)
-        yield syncer.removeRemoteHome()
-
-        yield self.switchAccounts()
-
-        yield self.postCheck()
</del></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtesttest_migrationpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastorepoddingmigrationtesttest_migrationpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_migration.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/test_migration.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_migration.py                                (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_migration.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,693 @@
</span><ins>+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from pycalendar.datetime import DateTime
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.python.filepath import FilePath
+from twistedcaldav.config import config
+from twistedcaldav.ical import Component
+from txdav.common.datastore.podding.migration.home_sync import CrossPodHomeSync
+from txdav.common.datastore.podding.test.util import MultiStoreConduitTest
+from txdav.common.datastore.sql_tables import _BIND_MODE_READ, \
+    _HOME_STATUS_DISABLED, _HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL, \
+    _HOME_STATUS_MIGRATING
+from txdav.common.datastore.test.util import populateCalendarsFrom
+from txdav.who.delegates import Delegates
+from txweb2.http_headers import MimeType
+from txweb2.stream import MemoryStream
+from txdav.caldav.datastore.scheduling.ischedule.delivery import IScheduleRequest
+from txdav.caldav.datastore.scheduling.ischedule.resource import IScheduleInboxResource
+from txweb2.dav.test.util import SimpleRequest
+from txdav.caldav.datastore.test.common import CaptureProtocol
+
+
+class TestCompleteMigrationCycle(MultiStoreConduitTest):
+    &quot;&quot;&quot;
+    Test that a full migration cycle using L{CrossPodHomeSync} works.
+    &quot;&quot;&quot;
+
+    def __init__(self, methodName='runTest'):
+        super(TestCompleteMigrationCycle, self).__init__(methodName)
+        self.stash = {}
+
+
+    @inlineCallbacks
+    def setUp(self):
+        @inlineCallbacks
+        def _fakeSubmitRequest(iself, ssl, host, port, request):
+            pod = (port - 8008) / 100
+            inbox = IScheduleInboxResource(self.site.resource, self.theStoreUnderTest(pod), podding=True)
+            response = yield inbox.http_POST(SimpleRequest(
+                self.site,
+                &quot;POST&quot;,
+                &quot;http://{host}:{port}/podding&quot;.format(host=host, port=port),
+                request.headers,
+                request.stream.mem,
+            ))
+            returnValue(response)
+
+
+        self.patch(IScheduleRequest, &quot;_submitRequest&quot;, _fakeSubmitRequest)
+        self.accounts = FilePath(__file__).sibling(&quot;accounts&quot;).child(&quot;groupAccounts.xml&quot;)
+        self.augments = FilePath(__file__).sibling(&quot;accounts&quot;).child(&quot;augments.xml&quot;)
+        yield super(TestCompleteMigrationCycle, self).setUp()
+        yield self.populate()
+
+
+    def configure(self):
+        super(TestCompleteMigrationCycle, self).configure()
+        config.GroupAttendees.Enabled = True
+        config.GroupAttendees.ReconciliationDelaySeconds = 0
+        config.GroupAttendees.AutoUpdateSecondsFromNow = 0
+        config.AccountingCategories.migration = True
+        config.AccountingPrincipals = [&quot;*&quot;]
+
+
+    @inlineCallbacks
+    def populate(self):
+        yield populateCalendarsFrom(self.requirements0, self.theStoreUnderTest(0))
+        yield populateCalendarsFrom(self.requirements1, self.theStoreUnderTest(1))
+
+    requirements0 = {
+        &quot;user01&quot; : None,
+        &quot;user02&quot; : None,
+        &quot;user03&quot; : None,
+        &quot;user04&quot; : None,
+        &quot;user05&quot; : None,
+        &quot;user06&quot; : None,
+        &quot;user07&quot; : None,
+        &quot;user08&quot; : None,
+        &quot;user09&quot; : None,
+        &quot;user10&quot; : None,
+    }
+
+    requirements1 = {
+        &quot;puser01&quot; : None,
+        &quot;puser02&quot; : None,
+        &quot;puser03&quot; : None,
+        &quot;puser04&quot; : None,
+        &quot;puser05&quot; : None,
+        &quot;puser06&quot; : None,
+        &quot;puser07&quot; : None,
+        &quot;puser08&quot; : None,
+        &quot;puser09&quot; : None,
+        &quot;puser10&quot; : None,
+    }
+
+
+    @inlineCallbacks
+    def _createShare(self, shareFrom, shareTo, accept=True):
+        # Invite
+        txnindex = 1 if shareFrom[0] == &quot;p&quot; else 0
+        home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(txnindex), name=shareFrom, create=True)
+        calendar = yield home.childWithName(&quot;calendar&quot;)
+        shareeView = yield calendar.inviteUIDToShare(shareTo, _BIND_MODE_READ, &quot;summary&quot;)
+        yield self.commitTransaction(txnindex)
+
+        # Accept
+        if accept:
+            inviteUID = shareeView.shareUID()
+            txnindex = 1 if shareTo[0] == &quot;p&quot; else 0
+            shareeHome = yield self.homeUnderTest(txn=self.theTransactionUnderTest(txnindex), name=shareTo)
+            shareeView = yield shareeHome.acceptShare(inviteUID)
+            sharedName = shareeView.name()
+            yield self.commitTransaction(txnindex)
+        else:
+            sharedName = None
+
+        returnValue(sharedName)
+
+
+    def attachmentToString(self, attachment):
+        &quot;&quot;&quot;
+        Convenience to convert an L{IAttachment} to a string.
+
+        @param attachment: an L{IAttachment} provider to convert into a string.
+
+        @return: a L{Deferred} that fires with the contents of the attachment.
+
+        @rtype: L{Deferred} firing C{bytes}
+        &quot;&quot;&quot;
+        capture = CaptureProtocol()
+        attachment.retrieve(capture)
+        return capture.deferred
+
+
+    now = {
+        &quot;now&quot;: DateTime.getToday().getYear(),
+        &quot;now1&quot;: DateTime.getToday().getYear() + 1,
+    }
+
+    data01_1 = &quot;&quot;&quot;BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_data01_1
+DTSTART:{now1:04d}0102T140000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+RRULE:FREQ=WEEKLY
+SUMMARY:data01_1
+END:VEVENT
+END:VCALENDAR
+&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
+
+    data01_1_changed = &quot;&quot;&quot;BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_data01_1
+DTSTART:{now1:04d}0102T140000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+RRULE:FREQ=WEEKLY
+SUMMARY:data01_1_changed
+END:VEVENT
+END:VCALENDAR
+&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
+
+    data01_2 = &quot;&quot;&quot;BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_data01_2
+DTSTART:{now1:04d}0102T160000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+SUMMARY:data01_2
+ORGANIZER:mailto:user01@example.com
+ATTENDEE:mailto:user01@example.com
+ATTENDEE:mailto:user02@example.com
+ATTENDEE:mailto:puser02@example.com
+END:VEVENT
+END:VCALENDAR
+&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
+
+    data01_3 = &quot;&quot;&quot;BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_data01_3
+DTSTART:{now1:04d}0102T180000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+SUMMARY:data01_3
+ORGANIZER:mailto:user01@example.com
+ATTENDEE:mailto:user01@example.com
+ATTENDEE:mailto:group02@example.com
+END:VEVENT
+END:VCALENDAR
+&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
+
+    data02_1 = &quot;&quot;&quot;BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_data02_1
+DTSTART:{now1:04d}0103T140000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+RRULE:FREQ=WEEKLY
+SUMMARY:data02_1
+END:VEVENT
+END:VCALENDAR
+&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
+
+    data02_2 = &quot;&quot;&quot;BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_data02_2
+DTSTART:{now1:04d}0103T160000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+SUMMARY:data02_2
+ORGANIZER:mailto:user02@example.com
+ATTENDEE:mailto:user02@example.com
+ATTENDEE:mailto:user01@example.com
+ATTENDEE:mailto:puser02@example.com
+END:VEVENT
+END:VCALENDAR
+&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
+
+    data02_3 = &quot;&quot;&quot;BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_data02_3
+DTSTART:{now1:04d}0103T180000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+SUMMARY:data02_3
+ORGANIZER:mailto:user02@example.com
+ATTENDEE:mailto:user02@example.com
+ATTENDEE:mailto:group01@example.com
+END:VEVENT
+END:VCALENDAR
+&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
+
+    datap02_1 = &quot;&quot;&quot;BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_datap02_1
+DTSTART:{now1:04d}0103T140000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+RRULE:FREQ=WEEKLY
+SUMMARY:datap02_1
+END:VEVENT
+END:VCALENDAR
+&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
+
+    datap02_2 = &quot;&quot;&quot;BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_datap02_2
+DTSTART:{now1:04d}0103T160000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+SUMMARY:datap02_2
+ORGANIZER:mailto:puser02@example.com
+ATTENDEE:mailto:puser02@example.com
+ATTENDEE:mailto:user01@example.com
+END:VEVENT
+END:VCALENDAR
+&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
+
+    datap02_3 = &quot;&quot;&quot;BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_datap02_3
+DTSTART:{now1:04d}0103T180000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+SUMMARY:datap02_3
+ORGANIZER:mailto:puser02@example.com
+ATTENDEE:mailto:puser02@example.com
+ATTENDEE:mailto:group01@example.com
+END:VEVENT
+END:VCALENDAR
+&quot;&quot;&quot;.replace(&quot;\n&quot;, &quot;\r\n&quot;).format(**now)
+
+
+    @inlineCallbacks
+    def preCheck(self):
+        &quot;&quot;&quot;
+        Checks prior to starting any tests
+        &quot;&quot;&quot;
+
+        for i in range(self.numberOfStores):
+            txn = self.theTransactionUnderTest(i)
+            record = yield txn.directoryService().recordWithUID(u&quot;user01&quot;)
+            self.assertEqual(record.serviceNodeUID, &quot;A&quot;)
+            self.assertEqual(record.thisServer(), i == 0)
+            record = yield txn.directoryService().recordWithUID(u&quot;user02&quot;)
+            self.assertEqual(record.serviceNodeUID, &quot;A&quot;)
+            self.assertEqual(record.thisServer(), i == 0)
+            record = yield txn.directoryService().recordWithUID(u&quot;puser02&quot;)
+            self.assertEqual(record.serviceNodeUID, &quot;B&quot;)
+            self.assertEqual(record.thisServer(), i == 1)
+            yield self.commitTransaction(i)
+
+
+    @inlineCallbacks
+    def initialState(self):
+        &quot;&quot;&quot;
+        Setup the server with an initial set of data
+
+        user01 - migrating user
+        user02 - has a calendar shared with user01
+        user03 - shared to by user01
+
+        puser01 - user on other pod
+        puser02 - has a calendar shared with user01
+        puser03 - shared to by user01
+        &quot;&quot;&quot;
+
+        # Data for user01
+        home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user01&quot;, create=True)
+        calendar = yield home.childWithName(&quot;calendar&quot;)
+        yield calendar.createCalendarObjectWithName(&quot;01_1.ics&quot;, Component.fromString(self.data01_1))
+        yield calendar.createCalendarObjectWithName(&quot;01_2.ics&quot;, Component.fromString(self.data01_2))
+        obj3 = yield calendar.createCalendarObjectWithName(&quot;01_3.ics&quot;, Component.fromString(self.data01_3))
+        attachment, _ignore_location = yield obj3.addAttachment(None, MimeType.fromString(&quot;text/plain&quot;), &quot;test.txt&quot;, MemoryStream(&quot;Here is some text #1.&quot;))
+        self.stash[&quot;user01_attachment_id&quot;] = attachment.id()
+        self.stash[&quot;user01_attachment_md5&quot;] = attachment.md5()
+        self.stash[&quot;user01_attachment_mid&quot;] = attachment.managedID()
+        yield self.commitTransaction(0)
+
+        # Data for user02
+        home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=&quot;user02&quot;, create=True)
+        calendar = yield home.childWithName(&quot;calendar&quot;)
+        yield calendar.createCalendarObjectWithName(&quot;02_1.ics&quot;, Component.fromString(self.data02_1))
+        yield calendar.createCalendarObjectWithName(&quot;02_2.ics&quot;, Component.fromString(self.data02_2))
+        yield calendar.createCalendarObjectWithName(&quot;02_3.ics&quot;, Component.fromString(self.data02_3))
+        yield self.commitTransaction(0)
+
+        # Data for puser02
+        home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;puser02&quot;, create=True)
+        calendar = yield home.childWithName(&quot;calendar&quot;)
+        yield calendar.createCalendarObjectWithName(&quot;p02_1.ics&quot;, Component.fromString(self.datap02_1))
+        yield calendar.createCalendarObjectWithName(&quot;p02_2.ics&quot;, Component.fromString(self.datap02_2))
+        yield calendar.createCalendarObjectWithName(&quot;p02_3.ics&quot;, Component.fromString(self.datap02_3))
+        yield self.commitTransaction(1)
+
+        # Share calendars
+        self.stash[&quot;sharename_user01_to_user03&quot;] = yield self._createShare(&quot;user01&quot;, &quot;user03&quot;)
+        self.stash[&quot;sharename_user01_to_puser03&quot;] = yield self._createShare(&quot;user01&quot;, &quot;puser03&quot;)
+        self.stash[&quot;sharename_user02_to_user01&quot;] = yield self._createShare(&quot;user02&quot;, &quot;user01&quot;)
+        self.stash[&quot;sharename_puser02_to_user01&quot;] = yield self._createShare(&quot;puser02&quot;, &quot;user01&quot;)
+
+        # Add some delegates
+        txn = self.theTransactionUnderTest(0)
+        record01 = yield txn.directoryService().recordWithUID(u&quot;user01&quot;)
+        record02 = yield txn.directoryService().recordWithUID(u&quot;user02&quot;)
+        record03 = yield txn.directoryService().recordWithUID(u&quot;user03&quot;)
+        precord01 = yield txn.directoryService().recordWithUID(u&quot;puser01&quot;)
+
+        group02 = yield txn.directoryService().recordWithUID(u&quot;group02&quot;)
+        group03 = yield txn.directoryService().recordWithUID(u&quot;group03&quot;)
+
+        # Add user02 and user03 as individual delegates
+        yield Delegates.addDelegate(txn, record01, record02, True)
+        yield Delegates.addDelegate(txn, record01, record03, False)
+        yield Delegates.addDelegate(txn, record01, precord01, False)
+
+        # Add group delegates
+        yield Delegates.addDelegate(txn, record01, group02, True)
+        yield Delegates.addDelegate(txn, record01, group03, False)
+
+        # Add external delegates
+        yield txn.assignExternalDelegates(u&quot;user01&quot;, None, None, u&quot;external1&quot;, u&quot;external2&quot;)
+
+        yield self.commitTransaction(0)
+
+        yield self.waitAllEmpty()
+
+
+    @inlineCallbacks
+    def secondState(self):
+        &quot;&quot;&quot;
+        Setup the server with data changes appearing after the first sync
+        &quot;&quot;&quot;
+        txn = self.theTransactionUnderTest(0)
+        obj = yield self.calendarObjectUnderTest(txn, name=&quot;01_1.ics&quot;, calendar_name=&quot;calendar&quot;, home=&quot;user01&quot;)
+        yield obj.setComponent(self.data01_1_changed)
+
+        obj = yield self.calendarObjectUnderTest(txn, name=&quot;02_2.ics&quot;, calendar_name=&quot;calendar&quot;, home=&quot;user02&quot;)
+        attachment, _ignore_location = yield obj.addAttachment(None, MimeType.fromString(&quot;text/plain&quot;), &quot;test_02.txt&quot;, MemoryStream(&quot;Here is some text #02.&quot;))
+        self.stash[&quot;user02_attachment_id&quot;] = attachment.id()
+        self.stash[&quot;user02_attachment_md5&quot;] = attachment.md5()
+        self.stash[&quot;user02_attachment_mid&quot;] = attachment.managedID()
+
+        yield self.commitTransaction(0)
+
+        yield self.waitAllEmpty()
+
+
+    @inlineCallbacks
+    def finalState(self):
+        &quot;&quot;&quot;
+        Setup the server with data changes appearing before the final sync
+        &quot;&quot;&quot;
+        txn = self.theTransactionUnderTest(1)
+        obj = yield self.calendarObjectUnderTest(txn, name=&quot;p02_2.ics&quot;, calendar_name=&quot;calendar&quot;, home=&quot;puser02&quot;)
+        attachment, _ignore_location = yield obj.addAttachment(None, MimeType.fromString(&quot;text/plain&quot;), &quot;test_p02.txt&quot;, MemoryStream(&quot;Here is some text #p02.&quot;))
+        self.stash[&quot;puser02_attachment_id&quot;] = attachment.id()
+        self.stash[&quot;puser02_attachment_mid&quot;] = attachment.managedID()
+        self.stash[&quot;puser02_attachment_md5&quot;] = attachment.md5()
+
+        yield self.commitTransaction(1)
+
+        yield self.waitAllEmpty()
+
+
+    @inlineCallbacks
+    def switchAccounts(self):
+        &quot;&quot;&quot;
+        Switch the migrated user accounts to point to the new pod
+        &quot;&quot;&quot;
+
+        for i in range(self.numberOfStores):
+            txn = self.theTransactionUnderTest(i)
+            record = yield txn.directoryService().recordWithUID(u&quot;user01&quot;)
+            yield self.changeRecord(record, txn.directoryService().fieldName.serviceNodeUID, u&quot;B&quot;, directory=txn.directoryService())
+            yield self.commitTransaction(i)
+
+        for i in range(self.numberOfStores):
+            txn = self.theTransactionUnderTest(i)
+            record = yield txn.directoryService().recordWithUID(u&quot;user01&quot;)
+            self.assertEqual(record.serviceNodeUID, &quot;B&quot;)
+            self.assertEqual(record.thisServer(), i == 1)
+            record = yield txn.directoryService().recordWithUID(u&quot;user02&quot;)
+            self.assertEqual(record.serviceNodeUID, &quot;A&quot;)
+            self.assertEqual(record.thisServer(), i == 0)
+            record = yield txn.directoryService().recordWithUID(u&quot;puser02&quot;)
+            self.assertEqual(record.serviceNodeUID, &quot;B&quot;)
+            self.assertEqual(record.thisServer(), i == 1)
+            yield self.commitTransaction(i)
+
+
+    @inlineCallbacks
+    def postCheck(self):
+        &quot;&quot;&quot;
+        Checks after migration is done
+        &quot;&quot;&quot;
+
+        # Check that the home has been moved
+        home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;user01&quot;)
+        self.assertTrue(home.external())
+        home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_NORMAL)
+        self.assertTrue(home is None)
+        home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_EXTERNAL)
+        self.assertTrue(home is not None)
+        home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_DISABLED)
+        self.assertTrue(home is not None)
+        home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        self.assertTrue(home is None)
+        yield self.commitTransaction(0)
+
+        home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;)
+        self.assertTrue(home.normal())
+        home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_NORMAL)
+        self.assertTrue(home is not None)
+        home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_EXTERNAL)
+        self.assertTrue(home is None)
+        home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_DISABLED)
+        self.assertTrue(home is not None)
+        home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_MIGRATING)
+        self.assertTrue(home is None)
+        yield self.commitTransaction(1)
+
+        # Check that the notifications have been moved
+        notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_NORMAL)
+        self.assertTrue(notifications is None)
+        notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_EXTERNAL)
+        self.assertTrue(notifications is None)
+        notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(0), name=&quot;user01&quot;, status=_HOME_STATUS_DISABLED)
+        self.assertTrue(notifications is not None)
+        yield self.commitTransaction(0)
+
+        notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_NORMAL)
+        self.assertTrue(notifications is not None)
+        notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_EXTERNAL)
+        self.assertTrue(notifications is None)
+        notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;, status=_HOME_STATUS_DISABLED)
+        self.assertTrue(notifications is not None)
+        yield self.commitTransaction(1)
+
+        # New pod data
+        homes = {}
+        homes[&quot;user01&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user01&quot;)
+        homes[&quot;user02&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user02&quot;)
+        self.assertTrue(homes[&quot;user02&quot;].external())
+        homes[&quot;user03&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;user03&quot;)
+        self.assertTrue(homes[&quot;user03&quot;].external())
+        homes[&quot;puser01&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;puser01&quot;)
+        self.assertTrue(homes[&quot;puser01&quot;].normal())
+        homes[&quot;puser02&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;puser02&quot;)
+        self.assertTrue(homes[&quot;puser02&quot;].normal())
+        homes[&quot;puser03&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=&quot;puser03&quot;)
+        self.assertTrue(homes[&quot;puser03&quot;].normal())
+
+        # Check calendar data on new pod
+        calendars = yield homes[&quot;user01&quot;].loadChildren()
+        calnames = dict([(calendar.name(), calendar) for calendar in calendars])
+        self.assertEqual(
+            set(calnames.keys()),
+            set((&quot;calendar&quot;, &quot;tasks&quot;, &quot;inbox&quot;, self.stash[&quot;sharename_user02_to_user01&quot;], self.stash[&quot;sharename_puser02_to_user01&quot;],))
+        )
+
+        # Check shared-by user01 on new pod
+        shared = calnames[&quot;calendar&quot;]
+        invitations = yield shared.sharingInvites()
+        by_sharee = dict([(invitation.shareeUID, invitation) for invitation in invitations])
+        self.assertEqual(len(invitations), 2)
+        self.assertEqual(set(by_sharee.keys()), set((&quot;user03&quot;, &quot;puser03&quot;,)))
+        self.assertEqual(by_sharee[&quot;user03&quot;].shareeHomeID, homes[&quot;user03&quot;].id())
+        self.assertEqual(by_sharee[&quot;puser03&quot;].shareeHomeID, homes[&quot;puser03&quot;].id())
+
+        # Check shared-to user01 on new pod
+        shared = calnames[self.stash[&quot;sharename_user02_to_user01&quot;]]
+        self.assertEqual(shared.ownerHome().uid(), &quot;user02&quot;)
+        self.assertEqual(shared.ownerHome().id(), homes[&quot;user02&quot;].id())
+
+        shared = calnames[self.stash[&quot;sharename_puser02_to_user01&quot;]]
+        self.assertEqual(shared.ownerHome().uid(), &quot;puser02&quot;)
+        self.assertEqual(shared.ownerHome().id(), homes[&quot;puser02&quot;].id())
+
+        shared = yield homes[&quot;puser02&quot;].calendarWithName(&quot;calendar&quot;)
+        invitations = yield shared.sharingInvites()
+        self.assertEqual(len(invitations), 1)
+        self.assertEqual(invitations[0].shareeHomeID, homes[&quot;user01&quot;].id())
+
+        yield self.commitTransaction(1)
+
+        # Old pod data
+        homes = {}
+        homes[&quot;user01&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;user01&quot;)
+        homes[&quot;user02&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;user02&quot;)
+        self.assertTrue(homes[&quot;user02&quot;].normal())
+        homes[&quot;user03&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;user03&quot;)
+        self.assertTrue(homes[&quot;user03&quot;].normal())
+        homes[&quot;puser01&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;puser01&quot;)
+        self.assertTrue(homes[&quot;puser01&quot;] is None)
+        homes[&quot;puser02&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;puser02&quot;)
+        self.assertTrue(homes[&quot;puser02&quot;].external())
+        homes[&quot;puser03&quot;] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name=&quot;puser03&quot;)
+        self.assertTrue(homes[&quot;puser03&quot;].external())
+
+        # Check shared-by user01 on old pod
+        shared = yield homes[&quot;user03&quot;].calendarWithName(self.stash[&quot;sharename_user01_to_user03&quot;])
+        self.assertEqual(shared.ownerHome().uid(), &quot;user01&quot;)
+        self.assertEqual(shared.ownerHome().id(), homes[&quot;user01&quot;].id())
+
+        # Check shared-to user01 on old pod
+        shared = yield homes[&quot;user02&quot;].calendarWithName(&quot;calendar&quot;)
+        invitations = yield shared.sharingInvites()
+        self.assertEqual(len(invitations), 1)
+        self.assertEqual(invitations[0].shareeHomeID, homes[&quot;user01&quot;].id())
+
+        yield self.commitTransaction(0)
+
+        # Delegates on each pod
+        for pod in range(self.numberOfStores):
+            txn = self.theTransactionUnderTest(pod)
+            records = {}
+            for ctr in range(10):
+                uid = u&quot;user{:02d}&quot;.format(ctr + 1)
+                records[uid] = yield txn.directoryService().recordWithUID(uid)
+            for ctr in range(10):
+                uid = u&quot;puser{:02d}&quot;.format(ctr + 1)
+                records[uid] = yield txn.directoryService().recordWithUID(uid)
+            for ctr in range(10):
+                uid = u&quot;group{:02d}&quot;.format(ctr + 1)
+                records[uid] = yield txn.directoryService().recordWithUID(uid)
+
+            delegates = yield Delegates.delegatesOf(txn, records[&quot;user01&quot;], True, False)
+            self.assertTrue(records[&quot;user02&quot;] in delegates)
+            self.assertTrue(records[&quot;group02&quot;] in delegates)
+            delegates = yield Delegates.delegatesOf(txn, records[&quot;user01&quot;], True, True)
+            self.assertTrue(records[&quot;user02&quot;] in delegates)
+            self.assertTrue(records[&quot;user06&quot;] in delegates)
+            self.assertTrue(records[&quot;user07&quot;] in delegates)
+            self.assertTrue(records[&quot;user08&quot;] in delegates)
+
+            delegates = yield Delegates.delegatesOf(txn, records[&quot;user01&quot;], False, False)
+            self.assertTrue(records[&quot;user03&quot;] in delegates)
+            self.assertTrue(records[&quot;group03&quot;] in delegates)
+            self.assertTrue(records[&quot;puser01&quot;] in delegates)
+            delegates = yield Delegates.delegatesOf(txn, records[&quot;user01&quot;], False, True)
+            self.assertTrue(records[&quot;user03&quot;] in delegates)
+            self.assertTrue(records[&quot;user07&quot;] in delegates)
+            self.assertTrue(records[&quot;user08&quot;] in delegates)
+            self.assertTrue(records[&quot;user09&quot;] in delegates)
+            self.assertTrue(records[&quot;puser01&quot;] in delegates)
+
+        # Attachments
+        obj = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;01_3.ics&quot;, calendar_name=&quot;calendar&quot;, home=&quot;user01&quot;)
+        attachment = yield obj.attachmentWithManagedID(self.stash[&quot;user01_attachment_mid&quot;])
+        self.assertTrue(attachment is not None)
+        self.assertEqual(attachment.md5(), self.stash[&quot;user01_attachment_md5&quot;])
+        data = yield self.attachmentToString(attachment)
+        self.assertEqual(data, &quot;Here is some text #1.&quot;)
+
+
+    @inlineCallbacks
+    def test_migration(self):
+        &quot;&quot;&quot;
+        Full migration cycle.
+        &quot;&quot;&quot;
+
+        yield self.preCheck()
+
+        # Step 1. Live full sync
+        yield self.initialState()
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
+        yield syncer.sync()
+
+        # Step 2. Live incremental sync
+        yield self.secondState()
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
+        yield syncer.sync()
+
+        # Step 3. Disable home after final changes
+        yield self.finalState()
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;)
+        yield syncer.disableRemoteHome()
+
+        # Step 4. Final incremental sync
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;, final=True)
+        yield syncer.sync()
+
+        # Step 5. Final reconcile sync
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;, final=True)
+        yield syncer.finalSync()
+
+        # Step 6. Enable new home
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;, final=True)
+        yield syncer.enableLocalHome()
+
+        # Step 7. Remove old home
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), &quot;user01&quot;, final=True)
+        yield syncer.removeRemoteHome()
+
+        yield self.switchAccounts()
+
+        yield self.postCheck()
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingrequestpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/request.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/request.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/request.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -23,7 +23,7 @@
</span><span class="cx"> from txweb2.client.http import HTTPClientProtocol, ClientRequest
</span><span class="cx"> from txweb2.dav.util import allDataFromStream
</span><span class="cx"> from txweb2.http_headers import Headers, MimeType
</span><del>-from txweb2.stream import MemoryStream
</del><ins>+from txweb2.stream import MemoryStream, readStream
</ins><span class="cx"> 
</span><span class="cx"> from twisted.internet.defer import inlineCallbacks, returnValue
</span><span class="cx"> from twisted.internet.protocol import Factory
</span><span class="lines">@@ -50,11 +50,12 @@
</span><span class="cx">     case the JSON data is sent in an HTTP header.
</span><span class="cx">     &quot;&quot;&quot;
</span><span class="cx"> 
</span><del>-    def __init__(self, server, data, stream=None, stream_type=None):
</del><ins>+    def __init__(self, server, data, stream=None, stream_type=None, writeStream=None):
</ins><span class="cx">         self.server = server
</span><span class="cx">         self.data = json.dumps(data)
</span><span class="cx">         self.stream = stream
</span><span class="cx">         self.streamType = stream_type
</span><ins>+        self.writeStream = writeStream
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="lines">@@ -72,7 +73,28 @@
</span><span class="cx">                 self.loggedResponse = yield self.logResponse(response)
</span><span class="cx">                 emitAccounting(&quot;xPod&quot;, &quot;&quot;, self.loggedRequest + &quot;\n&quot; + self.loggedResponse, &quot;POST&quot;)
</span><span class="cx"> 
</span><del>-            if response.code in (responsecode.OK, responsecode.BAD_REQUEST,):
</del><ins>+            if response.code == responsecode.OK:
+                if self.writeStream is None:
+                    data = (yield allDataFromStream(response.stream))
+                    data = json.loads(data)
+                else:
+                    yield readStream(response.stream, self.writeStream.write)
+                    content_type = response.headers.getHeader(&quot;content-type&quot;)
+                    if content_type is None:
+                        content_type = MimeType(&quot;application&quot;, &quot;octet-stream&quot;)
+                    content_disposition = response.headers.getHeader(&quot;content-disposition&quot;)
+                    if content_disposition is None or &quot;filename&quot; not in content_disposition.params:
+                        filename = &quot;&quot;
+                    else:
+                        filename = content_disposition.params[&quot;filename&quot;]
+                    self.writeStream.resetDetails(content_type, filename)
+                    yield self.writeStream.loseConnection()
+                    data = {
+                        &quot;result&quot;: &quot;ok&quot;,
+                        &quot;content-type&quot;: content_type,
+                        &quot;name&quot;: filename,
+                    }
+            elif response.code == responsecode.BAD_REQUEST:
</ins><span class="cx">                 data = (yield allDataFromStream(response.stream))
</span><span class="cx">                 data = json.loads(data)
</span><span class="cx">             else:
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingresourcepy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/resource.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/resource.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/resource.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -18,9 +18,11 @@
</span><span class="cx"> from txweb2.dav.noneprops import NonePropertyStore
</span><span class="cx"> from txweb2.dav.util import allDataFromStream
</span><span class="cx"> from txweb2.http import Response, HTTPError, StatusResponse, JSONResponse
</span><del>-from txweb2.http_headers import MimeType
</del><ins>+from txweb2.http_headers import MimeType, MimeDisposition
+from txweb2.stream import ProducerStream
</ins><span class="cx"> 
</span><span class="cx"> from twisted.internet.defer import succeed, returnValue, inlineCallbacks
</span><ins>+from twisted.internet.protocol import Protocol
</ins><span class="cx"> 
</span><span class="cx"> from twistedcaldav.extensions import DAVResource, \
</span><span class="cx">     DAVResourceWithoutChildrenMixin
</span><span class="lines">@@ -154,19 +156,54 @@
</span><span class="cx">             request.extendedLogItems = {}
</span><span class="cx">         request.extendedLogItems[&quot;xpod&quot;] = j[&quot;action&quot;] if &quot;action&quot; in j else &quot;unknown&quot;
</span><span class="cx"> 
</span><del>-        # Get the conduit to process the data
-        try:
-            result = yield self.store.conduit.processRequest(j)
-            code = responsecode.OK if result[&quot;result&quot;] == &quot;ok&quot; else responsecode.BAD_REQUEST
-        except Exception as e:
-            # Send the exception over to the other side
-            result = {
-                &quot;result&quot;: &quot;exception&quot;,
-                &quot;class&quot;: &quot;.&quot;.join((e.__class__.__module__, e.__class__.__name__,)),
-                &quot;request&quot;: str(e),
-            }
-            code = responsecode.BAD_REQUEST
</del><ins>+        # Look for a streaming action which needs special handling
+        if self.store.conduit.isStreamAction(j):
+            # Get the conduit to process the data stream
+            try:
</ins><span class="cx"> 
</span><ins>+                stream = ProducerStream()
+                class StreamProtocol(Protocol):
+                    def connectionMade(self):
+                        stream.registerProducer(self.transport, False)
+                    def dataReceived(self, data):
+                        stream.write(data)
+                    def connectionLost(self, reason):
+                        stream.finish()
+
+                result = yield self.store.conduit.processRequestStream(j, StreamProtocol())
+
+                try:
+                    ct, name = result
+                except ValueError:
+                    code = responsecode.BAD_REQUEST
+                else:
+                    headers = {&quot;content-type&quot;: MimeType.fromString(ct)}
+                    headers[&quot;content-disposition&quot;] = MimeDisposition(&quot;attachment&quot;, params={&quot;filename&quot;: name})
+                    returnValue(Response(responsecode.OK, headers, stream))
+
+            except Exception as e:
+                # Send the exception over to the other side
+                result = {
+                    &quot;result&quot;: &quot;exception&quot;,
+                    &quot;class&quot;: &quot;.&quot;.join((e.__class__.__module__, e.__class__.__name__,)),
+                    &quot;details&quot;: str(e),
+                }
+                code = responsecode.BAD_REQUEST
+
+        else:
+            # Get the conduit to process the data
+            try:
+                result = yield self.store.conduit.processRequest(j)
+                code = responsecode.OK if result[&quot;result&quot;] == &quot;ok&quot; else responsecode.BAD_REQUEST
+            except Exception as e:
+                # Send the exception over to the other side
+                result = {
+                    &quot;result&quot;: &quot;exception&quot;,
+                    &quot;class&quot;: &quot;.&quot;.join((e.__class__.__module__, e.__class__.__name__,)),
+                    &quot;details&quot;: str(e),
+                }
+                code = responsecode.BAD_REQUEST
+
</ins><span class="cx">         response = JSONResponse(code, result)
</span><span class="cx">         returnValue(response)
</span><span class="cx"> 
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingsharing_invitespy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/sharing_invites.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/sharing_invites.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/sharing_invites.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -27,9 +27,8 @@
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def send_shareinvite(
</span><del>-        self, txn, homeType, ownerUID, ownerID, ownerName,
-        shareeUID, shareUID, bindMode,
-        summary, copy_properties, supported_components
</del><ins>+        self, txn, homeType, ownerUID, ownerName, shareeUID, shareUID,
+        bindMode, bindUID, summary, copy_properties, supported_components
</ins><span class="cx">     ):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Send a sharing invite cross-pod message.
</span><span class="lines">@@ -40,9 +39,6 @@
</span><span class="cx">         @param ownerUID: UID of the sharer.
</span><span class="cx">         @type ownerUID: C{str}
</span><span class="cx"> 
</span><del>-        @param ownerID: resource ID of the sharer calendar
-        @type ownerID: C{int}
-
</del><span class="cx">         @param ownerName: owner's name of the sharer calendar
</span><span class="cx">         @type ownerName: C{str}
</span><span class="cx"> 
</span><span class="lines">@@ -54,7 +50,8 @@
</span><span class="cx"> 
</span><span class="cx">         @param bindMode: bind mode for the share
</span><span class="cx">         @type bindMode: C{str}
</span><del>-
</del><ins>+        @param bindUID: bind UID of the sharer calendar
+        @type bindUID: C{str}
</ins><span class="cx">         @param summary: sharing message
</span><span class="cx">         @type summary: C{str}
</span><span class="cx"> 
</span><span class="lines">@@ -73,11 +70,11 @@
</span><span class="cx">             &quot;action&quot;: &quot;shareinvite&quot;,
</span><span class="cx">             &quot;type&quot;: homeType,
</span><span class="cx">             &quot;owner&quot;: ownerUID,
</span><del>-            &quot;owner_id&quot;: ownerID,
</del><span class="cx">             &quot;owner_name&quot;: ownerName,
</span><span class="cx">             &quot;sharee&quot;: shareeUID,
</span><span class="cx">             &quot;share_id&quot;: shareUID,
</span><span class="cx">             &quot;mode&quot;: bindMode,
</span><ins>+            &quot;bind_uid&quot;: bindUID,
</ins><span class="cx">             &quot;summary&quot;: summary,
</span><span class="cx">             &quot;properties&quot;: copy_properties,
</span><span class="cx">         }
</span><span class="lines">@@ -107,10 +104,10 @@
</span><span class="cx">         # Create a share
</span><span class="cx">         yield shareeHome.processExternalInvite(
</span><span class="cx">             request[&quot;owner&quot;],
</span><del>-            request[&quot;owner_id&quot;],
</del><span class="cx">             request[&quot;owner_name&quot;],
</span><span class="cx">             request[&quot;share_id&quot;],
</span><span class="cx">             request[&quot;mode&quot;],
</span><ins>+            request[&quot;bind_uid&quot;],
</ins><span class="cx">             request[&quot;summary&quot;],
</span><span class="cx">             request[&quot;properties&quot;],
</span><span class="cx">             supported_components=request.get(&quot;supported-components&quot;)
</span><span class="lines">@@ -119,7 +116,8 @@
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def send_shareuninvite(
</span><del>-        self, txn, homeType, ownerUID, ownerID, shareeUID, shareUID
</del><ins>+        self, txn, homeType, ownerUID,
+        bindUID, shareeUID, shareUID
</ins><span class="cx">     ):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Send a sharing uninvite cross-pod message.
</span><span class="lines">@@ -129,10 +127,9 @@
</span><span class="cx"> 
</span><span class="cx">         @param ownerUID: UID of the sharer.
</span><span class="cx">         @type ownerUID: C{str}
</span><ins>+        @param bindUID: bind UID of the sharer calendar
+        @type bindUID: C{str}
</ins><span class="cx"> 
</span><del>-        @param ownerID: resource ID of the sharer calendar
-        @type ownerID: C{int}
-
</del><span class="cx">         @param shareeUID: UID of the sharee
</span><span class="cx">         @type shareeUID: C{str}
</span><span class="cx"> 
</span><span class="lines">@@ -148,7 +145,7 @@
</span><span class="cx">             &quot;action&quot;: &quot;shareuninvite&quot;,
</span><span class="cx">             &quot;type&quot;: homeType,
</span><span class="cx">             &quot;owner&quot;: ownerUID,
</span><del>-            &quot;owner_id&quot;: ownerID,
</del><ins>+            &quot;bind_uid&quot;: bindUID,
</ins><span class="cx">             &quot;sharee&quot;: shareeUID,
</span><span class="cx">             &quot;share_id&quot;: shareUID,
</span><span class="cx">         }
</span><span class="lines">@@ -174,7 +171,7 @@
</span><span class="cx">         # Remove a share
</span><span class="cx">         yield shareeHome.processExternalUninvite(
</span><span class="cx">             request[&quot;owner&quot;],
</span><del>-            request[&quot;owner_id&quot;],
</del><ins>+            request[&quot;bind_uid&quot;],
</ins><span class="cx">             request[&quot;share_id&quot;],
</span><span class="cx">         )
</span><span class="cx"> 
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingstore_apipy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/store_api.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/store_api.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/store_api.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -16,8 +16,9 @@
</span><span class="cx"> 
</span><span class="cx"> from twisted.internet.defer import inlineCallbacks, returnValue
</span><span class="cx"> 
</span><del>-from txdav.common.datastore.podding.base import FailedCrossPodRequestError
</del><span class="cx"> from txdav.caldav.datastore.scheduling.freebusy import generateFreeBusyInfo
</span><ins>+from txdav.common.datastore.podding.util import UtilityConduitMixin
+from txdav.common.datastore.sql_tables import _HOME_STATUS_DISABLED
</ins><span class="cx"> 
</span><span class="cx"> from twistedcaldav.caldavxml import TimeRange
</span><span class="cx"> 
</span><span class="lines">@@ -27,145 +28,21 @@
</span><span class="cx">     Defines common cross-pod API for generic access to remote resources.
</span><span class="cx">     &quot;&quot;&quot;
</span><span class="cx"> 
</span><del>-    #
-    # Utility methods to map from store objects to/from JSON
-    #
-
</del><span class="cx">     @inlineCallbacks
</span><del>-    def _getRequestForStoreObject(self, action, storeObject, classMethod):
</del><ins>+    def send_home_resource_id(self, txn, recipient, migrating=False):
</ins><span class="cx">         &quot;&quot;&quot;
</span><del>-        Create the JSON data needed to identify the remote resource by type and
-        ids, along with any parent resources.
-
-        @param action: the conduit action name
-        @type action: L{str}
-
-        @param storeObject: the store object that is being operated on
-        @type storeObject: L{object}
-
-        @param classMethod: indicates whether the method being called is a
-            classmethod
-        @type classMethod: L{bool}
-
-        @return: the transaction in use, the JSON dict to send in the request,
-            the server where the request should be sent
-        @rtype: L{tuple} of (L{CommonStoreTransaction}, L{dict}, L{str})
-        &quot;&quot;&quot;
-
-        from txdav.common.datastore.sql import (
-            CommonObjectResource, CommonHomeChild, CommonHome
-        )
-
-        result = {
-            &quot;action&quot;: action,
-        }
-
-        # Extract the relevant store objects
-        txn = storeObject._txn
-        owner_home = None
-        viewer_home = None
-        home_child = None
-        object_resource = None
-
-        if isinstance(storeObject, CommonObjectResource):
-            owner_home = storeObject.ownerHome()
-            viewer_home = storeObject.viewerHome()
-            home_child = storeObject.parentCollection()
-            object_resource = storeObject
-        elif isinstance(storeObject, CommonHomeChild):
-            owner_home = storeObject.ownerHome()
-            viewer_home = storeObject.viewerHome()
-            home_child = storeObject
-            result[&quot;classMethod&quot;] = classMethod
-        elif isinstance(storeObject, CommonHome):
-            owner_home = storeObject
-            viewer_home = storeObject
-            txn = storeObject._txn
-            result[&quot;classMethod&quot;] = classMethod
-
-        # Add store object identities to JSON request
-        result[&quot;homeType&quot;] = viewer_home._homeType
-        result[&quot;homeUID&quot;] = viewer_home.uid()
-        if home_child:
-            if home_child.owned():
-                result[&quot;homeChildID&quot;] = home_child.id()
-            else:
-                result[&quot;homeChildSharedID&quot;] = home_child.name()
-        if object_resource:
-            result[&quot;objectResourceID&quot;] = object_resource.id()
-
-        # Note that the owner_home is always the ownerHome() because in the
-        # sharing case a viewer is accessing the owner's data on another pod.
-        directory = self.store.directoryService()
-        recipient = yield directory.recordWithUID(owner_home.uid())
-
-        returnValue((txn, result, recipient.server(),))
-
-
-    @inlineCallbacks
-    def _getStoreObjectForRequest(self, txn, request):
-        &quot;&quot;&quot;
-        Resolve the supplied JSON data to get a store object to operate on.
-        &quot;&quot;&quot;
-
-        returnObject = txn
-        classObject = None
-
-        if &quot;homeUID&quot; in request:
-            home = yield txn.homeWithUID(
-                request[&quot;homeType&quot;], request[&quot;homeUID&quot;]
-            )
-            if home is None:
-                raise FailedCrossPodRequestError(&quot;Invalid owner UID specified&quot;)
-            home._internalRequest = False
-            returnObject = home
-            if request.get(&quot;classMethod&quot;, False):
-                classObject = home._childClass
-
-        if &quot;homeChildID&quot; in request:
-            homeChild = yield home.childWithID(request[&quot;homeChildID&quot;])
-            if homeChild is None:
-                raise FailedCrossPodRequestError(
-                    &quot;Invalid home child specified&quot;
-                )
-            returnObject = homeChild
-            if request.get(&quot;classMethod&quot;, False):
-                classObject = homeChild._objectResourceClass
-        elif &quot;homeChildSharedID&quot; in request:
-            homeChild = yield home.childWithName(request[&quot;homeChildSharedID&quot;])
-            if homeChild is None:
-                raise FailedCrossPodRequestError(
-                    &quot;Invalid home child specified&quot;
-                )
-            returnObject = homeChild
-            if request.get(&quot;classMethod&quot;, False):
-                classObject = homeChild._objectResourceClass
-
-        if &quot;objectResourceID&quot; in request:
-            objectResource = yield homeChild.objectResourceWithID(
-                request[&quot;objectResourceID&quot;]
-            )
-            if objectResource is None:
-                raise FailedCrossPodRequestError(
-                    &quot;Invalid object resource specified&quot;
-                )
-            returnObject = objectResource
-
-        returnValue((returnObject, classObject,))
-
-
-    @inlineCallbacks
-    def send_home_resource_id(self, txn, recipient):
-        &quot;&quot;&quot;
</del><span class="cx">         Lookup the remote resourceID matching the specified directory uid.
</span><span class="cx"> 
</span><span class="cx">         @param ownerUID: directory record for user whose home is needed
</span><span class="cx">         @type ownerUID: L{DirectroryRecord}
</span><ins>+        @param migrating: if L{True} then also return a disbaled home
+        @type migrating: L{bool}
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx"> 
</span><span class="cx">         request = {
</span><span class="cx">             &quot;action&quot;: &quot;home-resource_id&quot;,
</span><span class="cx">             &quot;ownerUID&quot;: recipient.uid,
</span><ins>+            &quot;migrating&quot;: migrating,
</ins><span class="cx">         }
</span><span class="cx"> 
</span><span class="cx">         response = yield self.sendRequest(txn, recipient, request)
</span><span class="lines">@@ -175,14 +52,15 @@
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def recv_home_resource_id(self, txn, request):
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        Process an addAttachment cross-pod request.
-        Request arguments as per L{send_add_attachment}.
</del><ins>+        Process an addAttachment cross-pod request. Request arguments as per L{send_add_attachment}.
</ins><span class="cx"> 
</span><span class="cx">         @param request: request arguments
</span><span class="cx">         @type request: C{dict}
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx"> 
</span><span class="cx">         home = yield txn.calendarHomeWithUID(request[&quot;ownerUID&quot;])
</span><ins>+        if home is None and request[&quot;migrating&quot;]:
+            home = yield txn.calendarHomeWithUID(request[&quot;ownerUID&quot;], status=_HOME_STATUS_DISABLED)
</ins><span class="cx">         returnValue(home.id() if home is not None else None)
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="lines">@@ -200,18 +78,12 @@
</span><span class="cx">         event_details,
</span><span class="cx">     ):
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        Request free busy information for a shared calendar collection hosted
-        on a different pod.
-        See L{txdav.caldav.datastore.scheduling.freebusy} for the base free
-        busy lookup behavior.
</del><ins>+        Request free busy information for a shared calendar collection hosted on a different pod. See
+        L{txdav.caldav.datastore.scheduling.freebusy} for the base free busy lookup behavior.
</ins><span class="cx">         &quot;&quot;&quot;
</span><del>-        txn, request, server = yield self._getRequestForStoreObject(
-            &quot;freebusy&quot;, calresource, False
-        )
</del><ins>+        txn, request, server = yield self._getRequestForStoreObject(&quot;freebusy&quot;, calresource, False)
</ins><span class="cx"> 
</span><del>-        request[&quot;timerange&quot;] = [
-            timerange.start.getText(), timerange.end.getText()
-        ]
</del><ins>+        request[&quot;timerange&quot;] = [timerange.start.getText(), timerange.end.getText()]
</ins><span class="cx">         request[&quot;matchtotal&quot;] = matchtotal
</span><span class="cx">         request[&quot;excludeuid&quot;] = excludeuid
</span><span class="cx">         request[&quot;organizer&quot;] = organizer
</span><span class="lines">@@ -227,26 +99,20 @@
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def recv_freebusy(self, txn, request):
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        Process a freebusy cross-pod request.
-        Message arguments as per L{send_freebusy}.
</del><ins>+        Process a freebusy cross-pod request. Message arguments as per L{send_freebusy}.
</ins><span class="cx"> 
</span><span class="cx">         @param request: request arguments
</span><span class="cx">         @type request: C{dict}
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx"> 
</span><span class="cx">         # Operate on the L{CommonHomeChild}
</span><del>-        calresource, _ignore = yield self._getStoreObjectForRequest(
-            txn, request
-        )
</del><ins>+        calresource, _ignore = yield self._getStoreObjectForRequest(txn, request)
</ins><span class="cx"> 
</span><span class="cx">         fbinfo = [[], [], []]
</span><span class="cx">         matchtotal = yield generateFreeBusyInfo(
</span><span class="cx">             calresource,
</span><span class="cx">             fbinfo,
</span><del>-            TimeRange(
-                start=request[&quot;timerange&quot;][0],
-                end=request[&quot;timerange&quot;][1],
-            ),
</del><ins>+            TimeRange(start=request[&quot;timerange&quot;][0], end=request[&quot;timerange&quot;][1]),
</ins><span class="cx">             request[&quot;matchtotal&quot;],
</span><span class="cx">             request[&quot;excludeuid&quot;],
</span><span class="cx">             request[&quot;organizer&quot;],
</span><span class="lines">@@ -268,232 +134,63 @@
</span><span class="cx">         })
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-    #
-    # We can simplify code generation for simple calls by dynamically
-    # generating the appropriate class methods.
-    #
-
-    @inlineCallbacks
-    def _simple_object_send(
-        self, actionName, storeObject,
-        classMethod=False, transform=None, args=None, kwargs=None
-    ):
</del><ins>+    @staticmethod
+    def _to_serialize_pair_list(value):
</ins><span class="cx">         &quot;&quot;&quot;
</span><del>-        A simple send operation that returns a value.
-
-        @param actionName: name of the action.
-        @type actionName: C{str}
-
-        @param shareeView: sharee resource being operated on.
-        @type shareeView: L{CommonHomeChildExternal}
-
-        @param objectResource: the resource being operated on, or C{None} for
-            classmethod.
-        @type objectResource: L{CommonObjectResourceExternal}
-
-        @param transform: a function used to convert the JSON response into
-            return values.
-        @type transform: C{callable}
-
-        @param args: list of optional arguments.
-        @type args: C{list}
-
-        @param kwargs: optional keyword arguments.
-        @type kwargs: C{dict}
</del><ins>+        Convert the value to the external (JSON-based) representation.
</ins><span class="cx">         &quot;&quot;&quot;
</span><ins>+        return [[a.serialize(), b.serialize(), ] for a, b in value]
</ins><span class="cx"> 
</span><del>-        txn, request, server = yield self._getRequestForStoreObject(
-            actionName, storeObject, classMethod
-        )
-        if args is not None:
-            request[&quot;arguments&quot;] = args
-        if kwargs is not None:
-            request[&quot;keywords&quot;] = kwargs
-        response = yield self.sendRequestToServer(txn, server, request)
-        returnValue(transform(response) if transform is not None else response)
</del><span class="cx"> 
</span><del>-
-    @inlineCallbacks
-    def _simple_object_recv(
-        self, txn, actionName, request, method, transform=None
-    ):
-        &quot;&quot;&quot;
-        A simple recv operation that returns a value.
-        We also look for an optional set of arguments/keywords and include
-        those only if present.
-
-        @param actionName: name of the action.
-        @type actionName: C{str}
-
-        @param request: request arguments
-        @type request: C{dict}
-
-        @param method: name of the method to execute on the shared resource to
-            get the result.
-        @type method: C{str}
-
-        @param transform: method to call on returned JSON value to convert it
-            to something useful.
-        @type transform: C{callable}
-        &quot;&quot;&quot;
-
-        storeObject, classObject = yield self._getStoreObjectForRequest(
-            txn, request
-        )
-
-        if classObject is not None:
-            m = getattr(classObject, method)
-            value = yield m(
-                storeObject,
-                *request.get(&quot;arguments&quot;, ()),
-                **request.get(&quot;keywords&quot;, {})
-            )
-        else:
-            m = getattr(storeObject, method)
-            value = yield m(
-                *request.get(&quot;arguments&quot;, ()),
-                **request.get(&quot;keywords&quot;, {})
-            )
-
-        returnValue(transform(value) if transform is not None else value)
-
-
-    #
-    # Factory methods for binding actions to the conduit class
-    #
-    @classmethod
-    def _make_simple_action(
-        cls, action, method, classMethod=False,
-        transform_recv_result=None, transform_send_result=None
-    ):
-        setattr(
-            cls,
-            &quot;send_{}&quot;.format(action),
-            lambda self, storeObject, *args, **kwargs:
-                self._simple_object_send(
-                    action, storeObject, classMethod=classMethod,
-                    transform=transform_send_result, args=args, kwargs=kwargs
-            )
-        )
-        setattr(
-            cls,
-            &quot;recv_{}&quot;.format(action),
-            lambda self, txn, message:
-                self._simple_object_recv(
-                    txn, action, message, method,
-                    transform=transform_recv_result
-                )
-        )
-
-
-    #
-    # Transforms for returned data
-    #
</del><span class="cx">     @staticmethod
</span><del>-    def _to_externalize(value):
</del><ins>+    def _to_serialize_dict_value(value):
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Convert the value to the external (JSON-based) representation.
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        return value.externalize() if value is not None else None
</del><ins>+        return dict([(k, v.serialize(),) for k, v in value.items()])
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @staticmethod
</span><del>-    def _to_externalize_list(value):
</del><ins>+    def _to_serialize_dict_list_serialized_value(value):
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Convert the value to the external (JSON-based) representation.
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        return [v.externalize() for v in value]
</del><ins>+        return dict([(k, UtilityConduitMixin._to_serialize_list(v),) for k, v in value.items()])
</ins><span class="cx"> 
</span><del>-
-    @staticmethod
-    def _to_string(value):
-        return str(value)
-
-
-    @staticmethod
-    def _to_tuple(value):
-        return tuple(value)
-
</del><span class="cx"> # These are the actions on store objects we need to expose via the conduit api
</span><span class="cx"> 
</span><span class="cx"> # Calls on L{CommonHome} objects
</span><ins>+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;home_metadata&quot;, &quot;serialize&quot;)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;home_set_status&quot;, &quot;setStatus&quot;)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;home_get_all_group_attendees&quot;, &quot;getAllGroupAttendees&quot;, transform_recv_result=StoreAPIConduitMixin._to_serialize_pair_list)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;home_shared_to_records&quot;, &quot;sharedToBindRecords&quot;, transform_recv_result=StoreAPIConduitMixin._to_serialize_dict_list_serialized_value)
</ins><span class="cx"> 
</span><span class="cx"> # Calls on L{CommonHomeChild} objects
</span><del>-StoreAPIConduitMixin._make_simple_action(
-    &quot;homechild_listobjects&quot;, &quot;listObjects&quot;,
-    classMethod=True
-)
-StoreAPIConduitMixin._make_simple_action(
-    &quot;homechild_loadallobjects&quot;, &quot;loadAllObjects&quot;,
-    classMethod=True,
-    transform_recv_result=StoreAPIConduitMixin._to_externalize_list
-)
-StoreAPIConduitMixin._make_simple_action(
-    &quot;homechild_objectwith&quot;, &quot;objectWith&quot;,
-    classMethod=True,
-    transform_recv_result=StoreAPIConduitMixin._to_externalize
-)
-StoreAPIConduitMixin._make_simple_action(
-    &quot;homechild_movehere&quot;, &quot;moveObjectResourceHere&quot;
-)
-StoreAPIConduitMixin._make_simple_action(
-    &quot;homechild_moveaway&quot;, &quot;moveObjectResourceAway&quot;
-)
-StoreAPIConduitMixin._make_simple_action(
-    &quot;homechild_synctoken&quot;, &quot;syncToken&quot;
-)
-StoreAPIConduitMixin._make_simple_action(
-    &quot;homechild_resourcenamessincerevision&quot;, &quot;resourceNamesSinceRevision&quot;,
-    transform_send_result=StoreAPIConduitMixin._to_tuple
-)
-StoreAPIConduitMixin._make_simple_action(
-    &quot;homechild_search&quot;, &quot;search&quot;
-)
</del><ins>+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;homechild_listobjects&quot;, &quot;listObjects&quot;, classMethod=True)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;homechild_loadallobjects&quot;, &quot;loadAllObjects&quot;, classMethod=True, transform_recv_result=UtilityConduitMixin._to_serialize_list)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;homechild_objectwith&quot;, &quot;objectWith&quot;, classMethod=True, transform_recv_result=UtilityConduitMixin._to_serialize)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;homechild_movehere&quot;, &quot;moveObjectResourceHere&quot;)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;homechild_moveaway&quot;, &quot;moveObjectResourceAway&quot;)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;homechild_synctokenrevision&quot;, &quot;syncTokenRevision&quot;)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;homechild_resourcenamessincerevision&quot;, &quot;resourceNamesSinceRevision&quot;, transform_send_result=UtilityConduitMixin._to_tuple)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;homechild_search&quot;, &quot;search&quot;)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;homechild_sharing_records&quot;, &quot;sharingBindRecords&quot;, transform_recv_result=StoreAPIConduitMixin._to_serialize_dict_value)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;homechild_migrate_sharing_records&quot;, &quot;migrateBindRecords&quot;)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;homechild_group_sharees&quot;, &quot;groupSharees&quot;, transform_recv_result=StoreAPIConduitMixin._to_serialize_dict_list_serialized_value)
</ins><span class="cx"> 
</span><span class="cx"> # Calls on L{CommonObjectResource} objects
</span><del>-StoreAPIConduitMixin._make_simple_action(
-    &quot;objectresource_loadallobjects&quot;, &quot;loadAllObjects&quot;,
-    classMethod=True,
-    transform_recv_result=StoreAPIConduitMixin._to_externalize_list
-)
-StoreAPIConduitMixin._make_simple_action(
-    &quot;objectresource_loadallobjectswithnames&quot;, &quot;loadAllObjectsWithNames&quot;,
-    classMethod=True,
-    transform_recv_result=StoreAPIConduitMixin._to_externalize_list
-)
-StoreAPIConduitMixin._make_simple_action(
-    &quot;objectresource_listobjects&quot;, &quot;listObjects&quot;,
-    classMethod=True
-)
-StoreAPIConduitMixin._make_simple_action(
-    &quot;objectresource_countobjects&quot;, &quot;countObjects&quot;,
-    classMethod=True
-)
-StoreAPIConduitMixin._make_simple_action(
-    &quot;objectresource_objectwith&quot;, &quot;objectWith&quot;,
-    classMethod=True,
-    transform_recv_result=StoreAPIConduitMixin._to_externalize
-)
-StoreAPIConduitMixin._make_simple_action(
-    &quot;objectresource_resourcenameforuid&quot;, &quot;resourceNameForUID&quot;,
-    classMethod=True
-)
-StoreAPIConduitMixin._make_simple_action(
-    &quot;objectresource_resourceuidforname&quot;, &quot;resourceUIDForName&quot;,
-    classMethod=True
-)
-StoreAPIConduitMixin._make_simple_action(
-    &quot;objectresource_create&quot;, &quot;create&quot;,
-    classMethod=True,
-    transform_recv_result=StoreAPIConduitMixin._to_externalize
-)
-StoreAPIConduitMixin._make_simple_action(
-    &quot;objectresource_setcomponent&quot;, &quot;setComponent&quot;
-)
-StoreAPIConduitMixin._make_simple_action(
-    &quot;objectresource_component&quot;, &quot;component&quot;,
-    transform_recv_result=StoreAPIConduitMixin._to_string
-)
-StoreAPIConduitMixin._make_simple_action(
-    &quot;objectresource_remove&quot;, &quot;remove&quot;
-)
</del><ins>+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;objectresource_loadallobjects&quot;, &quot;loadAllObjects&quot;, classMethod=True, transform_recv_result=UtilityConduitMixin._to_serialize_list)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;objectresource_loadallobjectswithnames&quot;, &quot;loadAllObjectsWithNames&quot;, classMethod=True, transform_recv_result=UtilityConduitMixin._to_serialize_list)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;objectresource_listobjects&quot;, &quot;listObjects&quot;, classMethod=True)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;objectresource_countobjects&quot;, &quot;countObjects&quot;, classMethod=True)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;objectresource_objectwith&quot;, &quot;objectWith&quot;, classMethod=True, transform_recv_result=UtilityConduitMixin._to_serialize)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;objectresource_resourcenameforuid&quot;, &quot;resourceNameForUID&quot;, classMethod=True)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;objectresource_resourceuidforname&quot;, &quot;resourceUIDForName&quot;, classMethod=True)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;objectresource_create&quot;, &quot;create&quot;, classMethod=True, transform_recv_result=UtilityConduitMixin._to_serialize)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;objectresource_setcomponent&quot;, &quot;setComponent&quot;)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;objectresource_component&quot;, &quot;component&quot;, transform_recv_result=UtilityConduitMixin._to_string)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;objectresource_remove&quot;, &quot;remove&quot;)
+
+# Calls on L{NotificationCollection} objects
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;notification_set_status&quot;, &quot;setStatus&quot;)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, &quot;notification_all_records&quot;, &quot;notificationObjectRecords&quot;, transform_recv_result=UtilityConduitMixin._to_serialize_list)
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingtesttest_conduitpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/test/test_conduit.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/test/test_conduit.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/test/test_conduit.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -32,7 +32,7 @@
</span><span class="cx"> from txdav.caldav.datastore.query.filter import Filter
</span><span class="cx"> from txdav.caldav.datastore.scheduling.freebusy import generateFreeBusyInfo
</span><span class="cx"> from txdav.caldav.datastore.scheduling.ischedule.localservers import ServersDB, Server
</span><del>-from txdav.caldav.datastore.sql import ManagedAttachment
</del><ins>+from txdav.caldav.datastore.sql import ManagedAttachment, AttachmentLink
</ins><span class="cx"> from txdav.caldav.datastore.test.common import CaptureProtocol
</span><span class="cx"> from txdav.common.datastore.podding.conduit import PoddingConduit, \
</span><span class="cx">     FailedCrossPodRequestError
</span><span class="lines">@@ -362,11 +362,11 @@
</span><span class="cx">         yield self.createShare(&quot;user01&quot;, &quot;puser01&quot;)
</span><span class="cx"> 
</span><span class="cx">         calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, name=&quot;calendar&quot;)
</span><del>-        token1_1 = yield calendar1.syncToken()
</del><ins>+        token1_1 = yield calendar1.syncTokenRevision()
</ins><span class="cx">         yield self.commitTransaction(0)
</span><span class="cx"> 
</span><span class="cx">         shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home=&quot;puser01&quot;, name=&quot;shared-calendar&quot;)
</span><del>-        token2_1 = yield shared.syncToken()
</del><ins>+        token2_1 = yield shared.syncTokenRevision()
</ins><span class="cx">         yield self.commitTransaction(1)
</span><span class="cx"> 
</span><span class="cx">         self.assertEqual(token1_1, token2_1)
</span><span class="lines">@@ -376,11 +376,11 @@
</span><span class="cx">         yield self.commitTransaction(0)
</span><span class="cx"> 
</span><span class="cx">         calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, name=&quot;calendar&quot;)
</span><del>-        token1_2 = yield calendar1.syncToken()
</del><ins>+        token1_2 = yield calendar1.syncTokenRevision()
</ins><span class="cx">         yield self.commitTransaction(0)
</span><span class="cx"> 
</span><span class="cx">         shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home=&quot;puser01&quot;, name=&quot;shared-calendar&quot;)
</span><del>-        token2_2 = yield shared.syncToken()
</del><ins>+        token2_2 = yield shared.syncTokenRevision()
</ins><span class="cx">         yield self.commitTransaction(1)
</span><span class="cx"> 
</span><span class="cx">         self.assertNotEqual(token1_1, token1_2)
</span><span class="lines">@@ -394,11 +394,11 @@
</span><span class="cx">         yield self.commitTransaction(0)
</span><span class="cx"> 
</span><span class="cx">         calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, name=&quot;calendar&quot;)
</span><del>-        token1_3 = yield calendar1.syncToken()
</del><ins>+        token1_3 = yield calendar1.syncTokenRevision()
</ins><span class="cx">         yield self.commitTransaction(0)
</span><span class="cx"> 
</span><span class="cx">         shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home=&quot;puser01&quot;, name=&quot;shared-calendar&quot;)
</span><del>-        token2_3 = yield shared.syncToken()
</del><ins>+        token2_3 = yield shared.syncTokenRevision()
</ins><span class="cx">         yield self.commitTransaction(1)
</span><span class="cx"> 
</span><span class="cx">         self.assertNotEqual(token1_1, token1_3)
</span><span class="lines">@@ -1056,3 +1056,83 @@
</span><span class="cx">         attachment = yield ManagedAttachment.load(self.theTransactionUnderTest(0), resourceID, managedID)
</span><span class="cx">         self.assertTrue(attachment is None)
</span><span class="cx">         yield self.commitTransaction(0)
</span><ins>+
+
+    @inlineCallbacks
+    def test_get_all_attachments(self):
+        &quot;&quot;&quot;
+        Test that action=get-all-attachments works.
+        &quot;&quot;&quot;
+
+        yield self.createShare(&quot;user01&quot;, &quot;puser01&quot;)
+
+        calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, name=&quot;calendar&quot;)
+        yield calendar1.createCalendarObjectWithName(&quot;1.ics&quot;, Component.fromString(self.caldata1))
+        yield self.commitTransaction(0)
+
+        object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;1.ics&quot;)
+        yield object1.addAttachment(None, MimeType.fromString(&quot;text/plain&quot;), &quot;test.txt&quot;, MemoryStream(&quot;Here is some text.&quot;))
+        yield self.commitTransaction(0)
+
+        shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home=&quot;puser01&quot;, calendar_name=&quot;shared-calendar&quot;, name=&quot;1.ics&quot;)
+        attachments = yield shared_object.ownerHome().getAllAttachments()
+        self.assertEqual(len(attachments), 1)
+        self.assertTrue(isinstance(attachments[0], ManagedAttachment))
+        self.assertEqual(attachments[0].contentType(), MimeType.fromString(&quot;text/plain&quot;))
+        self.assertEqual(attachments[0].name(), &quot;test.txt&quot;)
+        yield self.commitTransaction(1)
+
+
+    @inlineCallbacks
+    def test_get_attachment_data(self):
+        &quot;&quot;&quot;
+        Test that action=get-all-attachments works.
+        &quot;&quot;&quot;
+
+        yield self.createShare(&quot;user01&quot;, &quot;puser01&quot;)
+
+        calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, name=&quot;calendar&quot;)
+        yield calendar1.createCalendarObjectWithName(&quot;1.ics&quot;, Component.fromString(self.caldata1))
+        yield self.commitTransaction(0)
+
+        object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;1.ics&quot;)
+        attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString(&quot;text/plain&quot;), &quot;test.txt&quot;, MemoryStream(&quot;Here is some text.&quot;))
+        remote_id = attachment.id()
+        yield self.commitTransaction(0)
+
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=&quot;puser01&quot;)
+        shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home=&quot;puser01&quot;, calendar_name=&quot;shared-calendar&quot;, name=&quot;1.ics&quot;)
+        attachment = yield ManagedAttachment._create(self.theTransactionUnderTest(1), None, home1.id())
+        attachment._contentType = MimeType.fromString(&quot;text/plain&quot;)
+        attachment._name = &quot;test.txt&quot;
+        yield shared_object.ownerHome().readAttachmentData(remote_id, attachment)
+        yield self.commitTransaction(1)
+
+
+    @inlineCallbacks
+    def test_get_attachment_links(self):
+        &quot;&quot;&quot;
+        Test that action=get-attachment-links works.
+        &quot;&quot;&quot;
+
+        yield self.createShare(&quot;user01&quot;, &quot;puser01&quot;)
+
+        calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, name=&quot;calendar&quot;)
+        cobj1 = yield calendar1.createCalendarObjectWithName(&quot;1.ics&quot;, Component.fromString(self.caldata1))
+        calobjID = cobj1.id()
+        yield self.commitTransaction(0)
+
+        object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home=&quot;user01&quot;, calendar_name=&quot;calendar&quot;, name=&quot;1.ics&quot;)
+        attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString(&quot;text/plain&quot;), &quot;test.txt&quot;, MemoryStream(&quot;Here is some text.&quot;))
+        attID = attachment.id()
+        managedID = attachment.managedID()
+        yield self.commitTransaction(0)
+
+        shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home=&quot;puser01&quot;, calendar_name=&quot;shared-calendar&quot;, name=&quot;1.ics&quot;)
+        links = yield shared_object.ownerHome().getAttachmentLinks()
+        self.assertEqual(len(links), 1)
+        self.assertTrue(isinstance(links[0], AttachmentLink))
+        self.assertEqual(links[0]._attachmentID, attID)
+        self.assertEqual(links[0]._managedID, managedID)
+        self.assertEqual(links[0]._calendarObjectID, calobjID)
+        yield self.commitTransaction(1)
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingtesttest_store_apipy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/test/test_store_api.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/test/test_store_api.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/test/test_store_api.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -104,8 +104,8 @@
</span><span class="cx"> 
</span><span class="cx">         from txdav.caldav.datastore.sql_external import CalendarHomeExternal
</span><span class="cx">         recipient = yield txn.store().directoryService().recordWithUID(uid)
</span><del>-        resourceID = yield txn.store().conduit.send_home_resource_id(self, recipient)
-        home = CalendarHomeExternal(txn, recipient.uid, resourceID) if resourceID is not None else None
</del><ins>+        resourceID = yield txn.store().conduit.send_home_resource_id(txn, recipient)
+        home = CalendarHomeExternal.makeSyntheticExternalHome(txn, recipient.uid, resourceID) if resourceID is not None else None
</ins><span class="cx">         if home:
</span><span class="cx">             home._childClass = home._childClass._externalClass
</span><span class="cx">         returnValue(home)
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingtestutilpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/test/util.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/test/util.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/test/util.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -14,25 +14,33 @@
</span><span class="cx"> # limitations under the License.
</span><span class="cx"> ##
</span><span class="cx"> 
</span><ins>+from twisted.internet import reactor
</ins><span class="cx"> from twisted.internet.defer import inlineCallbacks, returnValue
</span><ins>+from twisted.internet.protocol import Protocol
</ins><span class="cx"> 
</span><span class="cx"> from txdav.caldav.datastore.scheduling.ischedule.localservers import (
</span><span class="cx">     Server, ServersDB
</span><span class="cx"> )
</span><span class="cx"> from txdav.common.datastore.podding.conduit import PoddingConduit
</span><ins>+from txdav.common.datastore.podding.request import ConduitRequest
</ins><span class="cx"> from txdav.common.datastore.sql_tables import _BIND_MODE_WRITE
</span><span class="cx"> from txdav.common.datastore.test.util import (
</span><span class="cx">     CommonCommonTests, SQLStoreBuilder, buildTestDirectory
</span><span class="cx"> )
</span><span class="cx"> 
</span><span class="cx"> import txweb2.dav.test.util
</span><ins>+from txweb2 import responsecode
+from txweb2.http import Response, JSONResponse
+from txweb2.http_headers import MimeDisposition, MimeType
+from txweb2.stream import ProducerStream
</ins><span class="cx"> 
</span><span class="cx"> from twext.enterprise.ienterprise import AlreadyFinishedError
</span><ins>+from twext.enterprise.jobqueue import JobItem
</ins><span class="cx"> 
</span><span class="cx"> import json
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-class FakeConduitRequest(object):
</del><ins>+class FakeConduitRequest(ConduitRequest):
</ins><span class="cx">     &quot;&quot;&quot;
</span><span class="cx">     A conduit request that sends messages internally rather than using HTTP
</span><span class="cx">     &quot;&quot;&quot;
</span><span class="lines">@@ -54,28 +62,16 @@
</span><span class="cx">         cls.storeMap[server.details()] = store
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-    def __init__(self, server, data, stream=None, stream_type=None):
</del><ins>+    def __init__(self, server, data, stream=None, stream_type=None, writeStream=None):
</ins><span class="cx"> 
</span><span class="cx">         self.server = server
</span><span class="cx">         self.data = json.dumps(data)
</span><span class="cx">         self.stream = stream
</span><span class="cx">         self.streamType = stream_type
</span><ins>+        self.writeStream = writeStream
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><del>-    def doRequest(self, txn):
-
-        # Generate an HTTP client request
-        try:
-            response = (yield self._processRequest())
-            response = json.loads(response)
-        except Exception as e:
-            raise ValueError(&quot;Failed cross-pod request: {}&quot;.format(e))
-
-        returnValue(response)
-
-
-    @inlineCallbacks
</del><span class="cx">     def _processRequest(self):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Process the request by sending it to the relevant server.
</span><span class="lines">@@ -90,19 +86,43 @@
</span><span class="cx">             j[&quot;stream&quot;] = self.stream
</span><span class="cx">             j[&quot;streamType&quot;] = self.streamType
</span><span class="cx">         try:
</span><del>-            result = yield store.conduit.processRequest(j)
</del><ins>+            if store.conduit.isStreamAction(j):
+                stream = ProducerStream()
+                class StreamProtocol(Protocol):
+                    def connectionMade(self):
+                        stream.registerProducer(self.transport, False)
+                    def dataReceived(self, data):
+                        stream.write(data)
+                    def connectionLost(self, reason):
+                        stream.finish()
+
+                result = yield store.conduit.processRequestStream(j, StreamProtocol())
+
+                try:
+                    ct, name = result
+                except ValueError:
+                    code = responsecode.BAD_REQUEST
+                else:
+                    headers = {&quot;content-type&quot;: MimeType.fromString(ct)}
+                    headers[&quot;content-disposition&quot;] = MimeDisposition(&quot;attachment&quot;, params={&quot;filename&quot;: name})
+                    returnValue(Response(responsecode.OK, headers, stream))
+            else:
+                result = yield store.conduit.processRequest(j)
+                code = responsecode.OK
</ins><span class="cx">         except Exception as e:
</span><span class="cx">             # Send the exception over to the other side
</span><span class="cx">             result = {
</span><span class="cx">                 &quot;result&quot;: &quot;exception&quot;,
</span><span class="cx">                 &quot;class&quot;: &quot;.&quot;.join((e.__class__.__module__, e.__class__.__name__,)),
</span><del>-                &quot;request&quot;: str(e),
</del><ins>+                &quot;details&quot;: str(e),
</ins><span class="cx">             }
</span><del>-        result = json.dumps(result)
-        returnValue(result)
</del><ins>+            code = responsecode.BAD_REQUEST
</ins><span class="cx"> 
</span><ins>+        response = JSONResponse(code, result)
+        returnValue(response)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><ins>+
</ins><span class="cx"> class MultiStoreConduitTest(CommonCommonTests, txweb2.dav.test.util.TestCase):
</span><span class="cx"> 
</span><span class="cx">     numberOfStores = 2
</span><span class="lines">@@ -110,6 +130,8 @@
</span><span class="cx">     theStoreBuilders = []
</span><span class="cx">     theStores = []
</span><span class="cx">     activeTransactions = []
</span><ins>+    accounts = None
+    augments = None
</ins><span class="cx"> 
</span><span class="cx">     def __init__(self, methodName='runTest'):
</span><span class="cx">         txweb2.dav.test.util.TestCase.__init__(self, methodName)
</span><span class="lines">@@ -135,13 +157,19 @@
</span><span class="cx">             if i == 0:
</span><span class="cx">                 yield self.buildStoreAndDirectory(
</span><span class="cx">                     serversDB=serversDB,
</span><del>-                    storeBuilder=self.theStoreBuilders[i]
</del><ins>+                    storeBuilder=self.theStoreBuilders[i],
+                    accounts=self.accounts,
+                    augments=self.augments,
</ins><span class="cx">                 )
</span><span class="cx">                 self.theStores[i] = self.store
</span><span class="cx">             else:
</span><span class="cx">                 self.theStores[i] = yield self.buildStore(self.theStoreBuilders[i])
</span><span class="cx">                 directory = buildTestDirectory(
</span><del>-                    self.theStores[i], self.mktemp(), serversDB=serversDB
</del><ins>+                    self.theStores[i],
+                    self.mktemp(),
+                    serversDB=serversDB,
+                    accounts=self.accounts,
+                    augments=self.augments,
</ins><span class="cx">                 )
</span><span class="cx">                 self.theStores[i].setDirectoryService(directory)
</span><span class="cx"> 
</span><span class="lines">@@ -199,6 +227,12 @@
</span><span class="cx">         self.activeTransactions[count] = None
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    @inlineCallbacks
+    def waitAllEmpty(self):
+        for i in range(self.numberOfStores):
+            yield JobItem.waitEmpty(self.theStoreUnderTest(i).newTransaction, reactor, 60.0)
+
+
</ins><span class="cx">     def makeConduit(self, store):
</span><span class="cx">         conduit = PoddingConduit(store)
</span><span class="cx">         conduit.conduitRequestClass = FakeConduitRequest
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingutilpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastorepoddingutilpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/podding/util.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/util.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/util.py                                (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/podding/util.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,265 @@
</span><ins>+##
+# Copyright (c) 2013-2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twisted.internet.defer import inlineCallbacks, returnValue
+
+from txdav.common.datastore.podding.base import FailedCrossPodRequestError
+from txdav.common.datastore.sql_notification import NotificationCollection, \
+    NotificationObject
+
+
+class UtilityConduitMixin(object):
+    &quot;&quot;&quot;
+    Defines utility methods for cross-pod API and mix-ins.
+    &quot;&quot;&quot;
+
+    #
+    # Utility methods to map from store objects to/from JSON
+    #
+
+    @inlineCallbacks
+    def _getRequestForStoreObject(self, action, storeObject, classMethod):
+        &quot;&quot;&quot;
+        Create the JSON data needed to identify the remote resource by type and ids, along with any parent resources.
+
+        @param action: the conduit action name
+        @type action: L{str}
+        @param storeObject: the store object that is being operated on
+        @type storeObject: L{object}
+        @param classMethod: indicates whether the method being called is a classmethod
+        @type classMethod: L{bool}
+
+        @return: the transaction in use, the JSON dict to send in the request,
+            the server where the request should be sent
+        @rtype: L{tuple} of (L{CommonStoreTransaction}, L{dict}, L{str})
+        &quot;&quot;&quot;
+
+        from txdav.common.datastore.sql import CommonObjectResource, CommonHomeChild, CommonHome
+        result = {
+            &quot;action&quot;: action,
+        }
+
+        # Extract the relevant store objects
+        txn = storeObject._txn
+        owner_home = None
+        viewer_home = None
+        home_child = None
+        object_resource = None
+        notification = None
+        if isinstance(storeObject, CommonObjectResource):
+            owner_home = storeObject.ownerHome()
+            viewer_home = storeObject.viewerHome()
+            home_child = storeObject.parentCollection()
+            object_resource = storeObject
+        elif isinstance(storeObject, CommonHomeChild):
+            owner_home = storeObject.ownerHome()
+            viewer_home = storeObject.viewerHome()
+            home_child = storeObject
+            result[&quot;classMethod&quot;] = classMethod
+        elif isinstance(storeObject, CommonHome):
+            owner_home = storeObject
+            viewer_home = storeObject
+            txn = storeObject._txn
+            result[&quot;classMethod&quot;] = classMethod
+        elif isinstance(storeObject, NotificationCollection):
+            notification = storeObject
+            txn = storeObject._txn
+            result[&quot;classMethod&quot;] = classMethod
+
+        # Add store object identities to JSON request
+        if viewer_home:
+            result[&quot;homeType&quot;] = viewer_home._homeType
+            result[&quot;homeUID&quot;] = viewer_home.uid()
+            if getattr(viewer_home, &quot;_migratingHome&quot;, False):
+                result[&quot;allowDisabledHome&quot;] = True
+            if home_child:
+                if home_child.owned():
+                    result[&quot;homeChildID&quot;] = home_child.id()
+                else:
+                    result[&quot;homeChildSharedID&quot;] = home_child.name()
+            if object_resource:
+                result[&quot;objectResourceID&quot;] = object_resource.id()
+
+            # Note that the owner_home is always the ownerHome() because in the sharing case
+            # a viewer is accessing the owner's data on another pod.
+            recipient = yield self.store.directoryService().recordWithUID(owner_home.uid())
+
+        elif notification:
+            result[&quot;notificationUID&quot;] = notification.uid()
+            if getattr(notification, &quot;_migratingHome&quot;, False):
+                result[&quot;allowDisabledHome&quot;] = True
+            recipient = yield self.store.directoryService().recordWithUID(notification.uid())
+
+        returnValue((txn, result, recipient.server(),))
+
+
+    @inlineCallbacks
+    def _getStoreObjectForRequest(self, txn, request):
+        &quot;&quot;&quot;
+        Resolve the supplied JSON data to get a store object to operate on.
+        &quot;&quot;&quot;
+
+        returnObject = txn
+        classObject = None
+
+        if &quot;allowDisabledHome&quot; in request:
+            txn._allowDisabled = True
+
+        if &quot;homeUID&quot; in request:
+            home = yield txn.homeWithUID(request[&quot;homeType&quot;], request[&quot;homeUID&quot;])
+            if home is None:
+                raise FailedCrossPodRequestError(&quot;Invalid owner UID specified&quot;)
+            home._internalRequest = False
+            returnObject = home
+            if request.get(&quot;classMethod&quot;, False):
+                classObject = home._childClass
+
+        if &quot;homeChildID&quot; in request:
+            homeChild = yield home.childWithID(request[&quot;homeChildID&quot;])
+            if homeChild is None:
+                raise FailedCrossPodRequestError(&quot;Invalid home child specified&quot;)
+            returnObject = homeChild
+            if request.get(&quot;classMethod&quot;, False):
+                classObject = homeChild._objectResourceClass
+        elif &quot;homeChildSharedID&quot; in request:
+            homeChild = yield home.childWithName(request[&quot;homeChildSharedID&quot;])
+            if homeChild is None:
+                raise FailedCrossPodRequestError(&quot;Invalid home child specified&quot;)
+            returnObject = homeChild
+            if request.get(&quot;classMethod&quot;, False):
+                classObject = homeChild._objectResourceClass
+
+        if &quot;objectResourceID&quot; in request:
+            objectResource = yield homeChild.objectResourceWithID(request[&quot;objectResourceID&quot;])
+            if objectResource is None:
+                raise FailedCrossPodRequestError(&quot;Invalid object resource specified&quot;)
+            returnObject = objectResource
+
+        if &quot;notificationUID&quot; in request:
+            notification = yield txn.notificationsWithUID(request[&quot;notificationUID&quot;])
+            if notification is None:
+                raise FailedCrossPodRequestError(&quot;Invalid notification UID specified&quot;)
+            notification._internalRequest = False
+            returnObject = notification
+            if request.get(&quot;classMethod&quot;, False):
+                classObject = NotificationObject
+
+        returnValue((returnObject, classObject,))
+
+
+    #
+    # We can simplify code generation for simple calls by dynamically generating the appropriate class methods.
+    #
+
+    @inlineCallbacks
+    def _simple_object_send(self, actionName, storeObject, classMethod=False, transform=None, args=None, kwargs=None):
+        &quot;&quot;&quot;
+        A simple send operation that returns a value.
+
+        @param actionName: name of the action.
+        @type actionName: C{str}
+        @param shareeView: sharee resource being operated on.
+        @type shareeView: L{CommonHomeChildExternal}
+        @param objectResource: the resource being operated on, or C{None} for classmethod.
+        @type objectResource: L{CommonObjectResourceExternal}
+        @param transform: a function used to convert the JSON response into return values.
+        @type transform: C{callable}
+        @param args: list of optional arguments.
+        @type args: C{list}
+        @param kwargs: optional keyword arguments.
+        @type kwargs: C{dict}
+        &quot;&quot;&quot;
+
+        txn, request, server = yield self._getRequestForStoreObject(actionName, storeObject, classMethod)
+        if args is not None:
+            request[&quot;arguments&quot;] = args
+        if kwargs is not None:
+            request[&quot;keywords&quot;] = kwargs
+        response = yield self.sendRequestToServer(txn, server, request)
+        returnValue(transform(response) if transform is not None else response)
+
+
+    @inlineCallbacks
+    def _simple_object_recv(self, txn, actionName, request, method, transform=None):
+        &quot;&quot;&quot;
+        A simple recv operation that returns a value. We also look for an optional set of arguments/keywords
+        and include those only if present.
+
+        @param actionName: name of the action.
+        @type actionName: C{str}
+        @param request: request arguments
+        @type request: C{dict}
+        @param method: name of the method to execute on the shared resource to get the result.
+        @type method: C{str}
+        @param transform: method to call on returned JSON value to convert it to something useful.
+        @type transform: C{callable}
+        &quot;&quot;&quot;
+
+        storeObject, classObject = yield self._getStoreObjectForRequest(txn, request)
+        if classObject is not None:
+            value = yield getattr(classObject, method)(storeObject, *request.get(&quot;arguments&quot;, ()), **request.get(&quot;keywords&quot;, {}))
+        else:
+            value = yield getattr(storeObject, method)(*request.get(&quot;arguments&quot;, ()), **request.get(&quot;keywords&quot;, {}))
+
+        returnValue(transform(value) if transform is not None else value)
+
+
+    #
+    # Factory methods for binding actions to the conduit class
+    #
+    @staticmethod
+    def _make_simple_action(bindcls, action, method, classMethod=False, transform_recv_result=None, transform_send_result=None):
+        setattr(
+            bindcls,
+            &quot;send_{}&quot;.format(action),
+            lambda self, storeObject, *args, **kwargs:
+                self._simple_object_send(action, storeObject, classMethod=classMethod, transform=transform_send_result, args=args, kwargs=kwargs)
+        )
+        setattr(
+            bindcls,
+            &quot;recv_{}&quot;.format(action),
+            lambda self, txn, message:
+                self._simple_object_recv(txn, action, message, method, transform=transform_recv_result)
+        )
+
+
+    #
+    # Transforms for returned data
+    #
+    @staticmethod
+    def _to_serialize(value):
+        &quot;&quot;&quot;
+        Convert the value to the external (JSON-based) representation.
+        &quot;&quot;&quot;
+        return value.serialize() if value is not None else None
+
+
+    @staticmethod
+    def _to_serialize_list(value):
+        &quot;&quot;&quot;
+        Convert the value to the external (JSON-based) representation.
+        &quot;&quot;&quot;
+        return [v.serialize() for v in value]
+
+
+    @staticmethod
+    def _to_string(value):
+        return str(value)
+
+
+    @staticmethod
+    def _to_tuple(value):
+        return tuple(value)
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresqlpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/sql.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/sql.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -31,7 +31,7 @@
</span><span class="cx"> 
</span><span class="cx"> from twext.enterprise.dal.syntax import (
</span><span class="cx">     Delete, utcNowSQL, Union, Insert, Len, Max, Parameter, SavepointAction,
</span><del>-    Select, Update, ColumnSyntax, TableSyntax, Upper, Count, ALL_COLUMNS, Sum,
</del><ins>+    Select, Update, Count, ALL_COLUMNS, Sum,
</ins><span class="cx">     DatabaseLock, DatabaseUnlock)
</span><span class="cx"> from twext.enterprise.ienterprise import AlreadyFinishedError
</span><span class="cx"> from twext.enterprise.jobqueue import LocalQueuer
</span><span class="lines">@@ -39,12 +39,10 @@
</span><span class="cx"> from twext.internet.decorate import memoizedKey, Memoizable
</span><span class="cx"> from twext.python.clsprop import classproperty
</span><span class="cx"> from twext.python.log import Logger
</span><del>-from txweb2.http_headers import MimeType
</del><span class="cx"> 
</span><span class="cx"> from twisted.application.service import Service
</span><span class="cx"> from twisted.internet import reactor
</span><span class="cx"> from twisted.internet.defer import inlineCallbacks, returnValue, succeed
</span><del>-from twisted.python import hashlib
</del><span class="cx"> from twisted.python.failure import Failure
</span><span class="cx"> from twisted.python.modules import getModule
</span><span class="cx"> from twisted.python.util import FancyEqMixin
</span><span class="lines">@@ -53,45 +51,40 @@
</span><span class="cx"> from twistedcaldav.dateops import datetimeMktime, pyCalendarTodatetime
</span><span class="cx"> 
</span><span class="cx"> from txdav.base.datastore.util import QueryCacher
</span><del>-from txdav.base.datastore.util import normalizeUUIDOrNot
-from txdav.base.propertystore.base import PropertyName
</del><span class="cx"> from txdav.base.propertystore.none import PropertyStore as NonePropertyStore
</span><span class="cx"> from txdav.base.propertystore.sql import PropertyStore
</span><span class="cx"> from txdav.caldav.icalendarstore import ICalendarTransaction, ICalendarStore
</span><span class="cx"> from txdav.carddav.iaddressbookstore import IAddressBookTransaction
</span><span class="cx"> from txdav.common.datastore.common import HomeChildBase
</span><span class="cx"> from txdav.common.datastore.podding.conduit import PoddingConduit
</span><del>-from txdav.common.datastore.sql_tables import _BIND_MODE_DIRECT, \
-    _BIND_MODE_INDIRECT, _BIND_MODE_OWN, _BIND_STATUS_ACCEPTED, \
-    _BIND_STATUS_DECLINED, _BIND_STATUS_DELETED, _BIND_STATUS_INVALID, \
-    _BIND_STATUS_INVITED, _HOME_STATUS_EXTERNAL, _HOME_STATUS_NORMAL, \
-    _HOME_STATUS_PURGING, schema, splitSQLString
</del><ins>+from txdav.common.datastore.sql_apn import APNSubscriptionsMixin
+from txdav.common.datastore.sql_directory import DelegatesAPIMixin, \
+    GroupsAPIMixin, GroupCacherAPIMixin
+from txdav.common.datastore.sql_imip import imipAPIMixin
+from txdav.common.datastore.sql_notification import NotificationCollection
+from txdav.common.datastore.sql_tables import _BIND_MODE_OWN, _BIND_STATUS_ACCEPTED, \
+    _HOME_STATUS_EXTERNAL, _HOME_STATUS_NORMAL, \
+    _HOME_STATUS_PURGING, schema, splitSQLString, _HOME_STATUS_MIGRATING, \
+    _HOME_STATUS_DISABLED
+from txdav.common.datastore.sql_util import _SharedSyncLogic
+from txdav.common.datastore.sql_sharing import SharingHomeMixIn, SharingMixIn
</ins><span class="cx"> from txdav.common.icommondatastore import ConcurrentModification, \
</span><del>-    RecordNotAllowedError, ExternalShareFailed, ShareNotAllowed, \
-    IndexedSearchException, NotFoundError
</del><ins>+    RecordNotAllowedError, ShareNotAllowed, \
+    IndexedSearchException, EADDRESSBOOKTYPE, ECALENDARTYPE
</ins><span class="cx"> from txdav.common.icommondatastore import HomeChildNameNotAllowedError, \
</span><span class="cx">     HomeChildNameAlreadyExistsError, NoSuchHomeChildError, \
</span><span class="cx">     ObjectResourceNameNotAllowedError, ObjectResourceNameAlreadyExistsError, \
</span><del>-    NoSuchObjectResourceError, AllRetriesFailed, InvalidSubscriptionValues, \
-    InvalidIMIPTokenValues, TooManyObjectResourcesError, \
-    SyncTokenValidException
</del><ins>+    NoSuchObjectResourceError, AllRetriesFailed, \
+    TooManyObjectResourcesError, SyncTokenValidException
</ins><span class="cx"> from txdav.common.idirectoryservice import IStoreDirectoryService, \
</span><span class="cx">     DirectoryRecordNotFoundError
</span><del>-from txdav.common.inotifications import INotificationCollection, \
-    INotificationObject
</del><span class="cx"> from txdav.idav import ChangeCategory
</span><del>-from txdav.who.delegates import Delegates
-from txdav.xml import element
</del><span class="cx"> 
</span><del>-from uuid import uuid4, UUID
-
</del><span class="cx"> from zope.interface import implements, directlyProvides
</span><span class="cx"> 
</span><del>-from collections import namedtuple
-import datetime
</del><ins>+import collections
</ins><span class="cx"> import inspect
</span><span class="cx"> import itertools
</span><del>-import json
</del><span class="cx"> import sys
</span><span class="cx"> import time
</span><span class="cx"> 
</span><span class="lines">@@ -99,18 +92,6 @@
</span><span class="cx"> 
</span><span class="cx"> log = Logger()
</span><span class="cx"> 
</span><del>-ECALENDARTYPE = 0
-EADDRESSBOOKTYPE = 1
-ENOTIFICATIONTYPE = 2
-
-# Labels used to identify the class of resource being modified, so that
-# notification systems can target the correct application
-NotifierPrefixes = {
-    ECALENDARTYPE: &quot;CalDAV&quot;,
-    EADDRESSBOOKTYPE: &quot;CardDAV&quot;,
-}
-
-
</del><span class="cx"> class CommonDataStore(Service, object):
</span><span class="cx">     &quot;&quot;&quot;
</span><span class="cx">     Shared logic for SQL-based data stores, between calendar and addressbook
</span><span class="lines">@@ -565,7 +546,10 @@
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-class CommonStoreTransaction(object):
</del><ins>+class CommonStoreTransaction(
+    GroupsAPIMixin, GroupCacherAPIMixin, DelegatesAPIMixin,
+    imipAPIMixin, APNSubscriptionsMixin,
+):
</ins><span class="cx">     &quot;&quot;&quot;
</span><span class="cx">     Transaction implementation for SQL database.
</span><span class="cx">     &quot;&quot;&quot;
</span><span class="lines">@@ -585,14 +569,26 @@
</span><span class="cx"> 
</span><span class="cx">         self._store = store
</span><span class="cx">         self._queuer = self._store.queuer
</span><del>-        self._calendarHomes = {}
-        self._addressbookHomes = {}
-        self._notificationHomes = {}
</del><ins>+        self._cachedHomes = {
+            ECALENDARTYPE: {
+                &quot;byUID&quot;: collections.defaultdict(dict),
+                &quot;byID&quot;: collections.defaultdict(dict),
+            },
+            EADDRESSBOOKTYPE: {
+                &quot;byUID&quot;: collections.defaultdict(dict),
+                &quot;byID&quot;: collections.defaultdict(dict),
+            },
+        }
+        self._notificationHomes = {
+            &quot;byUID&quot;: collections.defaultdict(dict),
+            &quot;byID&quot;: collections.defaultdict(dict),
+        }
</ins><span class="cx">         self._notifierFactories = notifierFactories
</span><span class="cx">         self._notifiedAlready = set()
</span><span class="cx">         self._bumpedRevisionAlready = set()
</span><span class="cx">         self._label = label
</span><span class="cx">         self._migrating = migrating
</span><ins>+        self._allowDisabled = False
</ins><span class="cx">         self._primaryHomeType = None
</span><span class="cx">         self._disableCache = disableCache or not store.queryCachingEnabled()
</span><span class="cx">         if disableCache:
</span><span class="lines">@@ -695,14 +691,11 @@
</span><span class="cx">         ).on(self)
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-    def _determineMemo(self, storeType, uid, create=False, authzUID=None):
</del><ins>+    def _determineMemo(self, storeType, lookupMode, status):
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Determine the memo dictionary to use for homeWithUID.
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        if storeType == ECALENDARTYPE:
-            return self._calendarHomes
-        else:
-            return self._addressbookHomes
</del><ins>+        return self._cachedHomes[storeType][lookupMode][status]
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="lines">@@ -717,11 +710,11 @@
</span><span class="cx">             yield self.homeWithUID(storeType, uid, create=False)
</span><span class="cx"> 
</span><span class="cx">         # Return the memoized list directly
</span><del>-        returnValue([kv[1] for kv in sorted(self._determineMemo(storeType, None).items(), key=lambda x: x[0])])
</del><ins>+        returnValue([kv[1] for kv in sorted(self._determineMemo(storeType, &quot;byUID&quot;, _HOME_STATUS_NORMAL).items(), key=lambda x: x[0])])
</ins><span class="cx"> 
</span><span class="cx"> 
</span><del>-    @memoizedKey(&quot;uid&quot;, _determineMemo)
-    def homeWithUID(self, storeType, uid, create=False, authzUID=None):
</del><ins>+    @inlineCallbacks
+    def homeWithUID(self, storeType, uid, status=None, create=False, authzUID=None):
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         We need to distinguish between various different users &quot;looking&quot; at a home and its
</span><span class="cx">         child resources because we have per-user properties that depend on which user is &quot;looking&quot;.
</span><span class="lines">@@ -733,15 +726,21 @@
</span><span class="cx">         if storeType not in (ECALENDARTYPE, EADDRESSBOOKTYPE):
</span><span class="cx">             raise RuntimeError(&quot;Unknown home type.&quot;)
</span><span class="cx"> 
</span><del>-        return self._homeClass[storeType].homeWithUID(self, uid, create, authzUID)
</del><ins>+        result = self._determineMemo(storeType, &quot;byUID&quot;, status).get(uid)
+        if result is None:
+            result = yield self._homeClass[storeType].homeWithUID(self, uid, status, create, authzUID)
+            if result:
+                self._determineMemo(storeType, &quot;byUID&quot;, status)[uid] = result
+                self._determineMemo(storeType, &quot;byID&quot;, None)[result.id()] = result
+        returnValue(result)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><del>-    def calendarHomeWithUID(self, uid, create=False, authzUID=None):
-        return self.homeWithUID(ECALENDARTYPE, uid, create=create, authzUID=authzUID)
</del><ins>+    def calendarHomeWithUID(self, uid, status=None, create=False, authzUID=None):
+        return self.homeWithUID(ECALENDARTYPE, uid, status=status, create=create, authzUID=authzUID)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><del>-    def addressbookHomeWithUID(self, uid, create=False, authzUID=None):
-        return self.homeWithUID(EADDRESSBOOKTYPE, uid, create=create, authzUID=authzUID)
</del><ins>+    def addressbookHomeWithUID(self, uid, status=None, create=False, authzUID=None):
+        return self.homeWithUID(EADDRESSBOOKTYPE, uid, status=status, create=create, authzUID=authzUID)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="lines">@@ -749,12 +748,15 @@
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Load a calendar or addressbook home by its integer resource ID.
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        uid = (yield self._homeClass[storeType].homeUIDWithResourceID(self, rid))
-        if uid:
-            # Always get the owner's view of the home = i.e., authzUID=uid
-            result = (yield self.homeWithUID(storeType, uid, authzUID=uid))
-        else:
-            result = None
</del><ins>+        if storeType not in (ECALENDARTYPE, EADDRESSBOOKTYPE):
+            raise RuntimeError(&quot;Unknown home type.&quot;)
+
+        result = self._determineMemo(storeType, &quot;byID&quot;, None).get(rid)
+        if result is None:
+            result = yield self._homeClass[storeType].homeWithResourceID(self, rid)
+            if result:
+                self._determineMemo(storeType, &quot;byID&quot;, None)[rid] = result
+                self._determineMemo(storeType, &quot;byUID&quot;, result.status())[result.uid()] = result
</ins><span class="cx">         returnValue(result)
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="lines">@@ -766,1303 +768,36 @@
</span><span class="cx">         return self.homeWithResourceID(EADDRESSBOOKTYPE, rid)
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-    @memoizedKey(&quot;uid&quot;, &quot;_notificationHomes&quot;)
-    def notificationsWithUID(self, uid, create=True):
</del><ins>+    @inlineCallbacks
+    def notificationsWithUID(self, uid, status=None, create=False):
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Implement notificationsWithUID.
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        return NotificationCollection.notificationsWithUID(self, uid, create)
</del><span class="cx"> 
</span><ins>+        result = self._notificationHomes[&quot;byUID&quot;][status].get(uid)
+        if result is None:
+            result = yield NotificationCollection.notificationsWithUID(self, uid, status=status, create=create)
+            if result:
+                self._notificationHomes[&quot;byUID&quot;][status][uid] = result
+                self._notificationHomes[&quot;byID&quot;][None][result.id()] = result
+        returnValue(result)
</ins><span class="cx"> 
</span><del>-    @memoizedKey(&quot;rid&quot;, &quot;_notificationHomes&quot;)
</del><ins>+
+    @inlineCallbacks
</ins><span class="cx">     def notificationsWithResourceID(self, rid):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Implement notificationsWithResourceID.
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        return NotificationCollection.notificationsWithResourceID(self, rid)
</del><span class="cx"> 
</span><ins>+        result = self._notificationHomes[&quot;byID&quot;][None].get(rid)
+        if result is None:
+            result = yield NotificationCollection.notificationsWithResourceID(self, rid)
+            if result:
+                self._notificationHomes[&quot;byID&quot;][None][rid] = result
+                self._notificationHomes[&quot;byUID&quot;][result.status()][result.uid()] = result
+        returnValue(result)
</ins><span class="cx"> 
</span><del>-    @classproperty
-    def _insertAPNSubscriptionQuery(cls):
-        apn = schema.APN_SUBSCRIPTIONS
-        return Insert({
-            apn.TOKEN: Parameter(&quot;token&quot;),
-            apn.RESOURCE_KEY: Parameter(&quot;resourceKey&quot;),
-            apn.MODIFIED: Parameter(&quot;modified&quot;),
-            apn.SUBSCRIBER_GUID: Parameter(&quot;subscriber&quot;),
-            apn.USER_AGENT: Parameter(&quot;userAgent&quot;),
-            apn.IP_ADDR: Parameter(&quot;ipAddr&quot;)
-        })
</del><span class="cx"> 
</span><del>-
-    @classproperty
-    def _updateAPNSubscriptionQuery(cls):
-        apn = schema.APN_SUBSCRIPTIONS
-        return Update(
-            {
-                apn.MODIFIED: Parameter(&quot;modified&quot;),
-                apn.SUBSCRIBER_GUID: Parameter(&quot;subscriber&quot;),
-                apn.USER_AGENT: Parameter(&quot;userAgent&quot;),
-                apn.IP_ADDR: Parameter(&quot;ipAddr&quot;)
-            },
-            Where=(apn.TOKEN == Parameter(&quot;token&quot;)).And(
-                apn.RESOURCE_KEY == Parameter(&quot;resourceKey&quot;))
-        )
-
-
-    @classproperty
-    def _selectAPNSubscriptionQuery(cls):
-        apn = schema.APN_SUBSCRIPTIONS
-        return Select(
-            [apn.MODIFIED, apn.SUBSCRIBER_GUID],
-            From=apn,
-            Where=(apn.TOKEN == Parameter(&quot;token&quot;)).And(
-                apn.RESOURCE_KEY == Parameter(&quot;resourceKey&quot;)
-            )
-        )
-
-
-    @inlineCallbacks
-    def addAPNSubscription(
-        self, token, key, timestamp, subscriber,
-        userAgent, ipAddr
-    ):
-        if not (token and key and timestamp and subscriber):
-            raise InvalidSubscriptionValues()
-
-        # Cap these values at 255 characters
-        userAgent = userAgent[:255]
-        ipAddr = ipAddr[:255]
-
-        row = yield self._selectAPNSubscriptionQuery.on(
-            self,
-            token=token, resourceKey=key
-        )
-        if not row:  # Subscription does not yet exist
-            try:
-                yield self._insertAPNSubscriptionQuery.on(
-                    self,
-                    token=token, resourceKey=key, modified=timestamp,
-                    subscriber=subscriber, userAgent=userAgent,
-                    ipAddr=ipAddr)
-            except Exception:
-                # Subscription may have been added by someone else, which is fine
-                pass
-
-        else:  # Subscription exists, so update with new timestamp and subscriber
-            try:
-                yield self._updateAPNSubscriptionQuery.on(
-                    self,
-                    token=token, resourceKey=key, modified=timestamp,
-                    subscriber=subscriber, userAgent=userAgent,
-                    ipAddr=ipAddr)
-            except Exception:
-                # Subscription may have been added by someone else, which is fine
-                pass
-
-
-    @classproperty
-    def _removeAPNSubscriptionQuery(cls):
-        apn = schema.APN_SUBSCRIPTIONS
-        return Delete(From=apn,
-                      Where=(apn.TOKEN == Parameter(&quot;token&quot;)).And(
-                          apn.RESOURCE_KEY == Parameter(&quot;resourceKey&quot;)))
-
-
-    def removeAPNSubscription(self, token, key):
-        return self._removeAPNSubscriptionQuery.on(
-            self,
-            token=token, resourceKey=key)
-
-
-    @classproperty
-    def _purgeOldAPNSubscriptionQuery(cls):
-        apn = schema.APN_SUBSCRIPTIONS
-        return Delete(From=apn,
-                      Where=(apn.MODIFIED &lt; Parameter(&quot;olderThan&quot;)))
-
-
-    def purgeOldAPNSubscriptions(self, olderThan):
-        return self._purgeOldAPNSubscriptionQuery.on(
-            self,
-            olderThan=olderThan)
-
-
-    @classproperty
-    def _apnSubscriptionsByTokenQuery(cls):
-        apn = schema.APN_SUBSCRIPTIONS
-        return Select([apn.RESOURCE_KEY, apn.MODIFIED, apn.SUBSCRIBER_GUID],
-                      From=apn, Where=apn.TOKEN == Parameter(&quot;token&quot;))
-
-
-    def apnSubscriptionsByToken(self, token):
-        return self._apnSubscriptionsByTokenQuery.on(self, token=token)
-
-
-    @classproperty
-    def _apnSubscriptionsByKeyQuery(cls):
-        apn = schema.APN_SUBSCRIPTIONS
-        return Select([apn.TOKEN, apn.SUBSCRIBER_GUID],
-                      From=apn, Where=apn.RESOURCE_KEY == Parameter(&quot;resourceKey&quot;))
-
-
-    def apnSubscriptionsByKey(self, key):
-        return self._apnSubscriptionsByKeyQuery.on(self, resourceKey=key)
-
-
-    @classproperty
-    def _apnSubscriptionsBySubscriberQuery(cls):
-        apn = schema.APN_SUBSCRIPTIONS
-        return Select([apn.TOKEN, apn.RESOURCE_KEY, apn.MODIFIED, apn.USER_AGENT, apn.IP_ADDR],
-                      From=apn, Where=apn.SUBSCRIBER_GUID == Parameter(&quot;subscriberGUID&quot;))
-
-
-    def apnSubscriptionsBySubscriber(self, guid):
-        return self._apnSubscriptionsBySubscriberQuery.on(self, subscriberGUID=guid)
-
-
-    # Create IMIP token
-
-    @classproperty
-    def _insertIMIPTokenQuery(cls):
-        imip = schema.IMIP_TOKENS
-        return Insert({
-            imip.TOKEN: Parameter(&quot;token&quot;),
-            imip.ORGANIZER: Parameter(&quot;organizer&quot;),
-            imip.ATTENDEE: Parameter(&quot;attendee&quot;),
-            imip.ICALUID: Parameter(&quot;icaluid&quot;),
-        })
-
-
-    @inlineCallbacks
-    def imipCreateToken(self, organizer, attendee, icaluid, token=None):
-        if not (organizer and attendee and icaluid):
-            raise InvalidIMIPTokenValues()
-
-        if token is None:
-            token = str(uuid4())
-
-        try:
-            yield self._insertIMIPTokenQuery.on(
-                self,
-                token=token, organizer=organizer, attendee=attendee,
-                icaluid=icaluid)
-        except Exception:
-            # TODO: is it okay if someone else created the same row just now?
-            pass
-        returnValue(token)
-
-    # Lookup IMIP organizer+attendee+icaluid for token
-
-
-    @classproperty
-    def _selectIMIPTokenByTokenQuery(cls):
-        imip = schema.IMIP_TOKENS
-        return Select([imip.ORGANIZER, imip.ATTENDEE, imip.ICALUID], From=imip,
-                      Where=(imip.TOKEN == Parameter(&quot;token&quot;)))
-
-
-    def imipLookupByToken(self, token):
-        return self._selectIMIPTokenByTokenQuery.on(self, token=token)
-
-    # Lookup IMIP token for organizer+attendee+icaluid
-
-
-    @classproperty
-    def _selectIMIPTokenQuery(cls):
-        imip = schema.IMIP_TOKENS
-        return Select(
-            [imip.TOKEN],
-            From=imip,
-            Where=(imip.ORGANIZER == Parameter(&quot;organizer&quot;)).And(
-                imip.ATTENDEE == Parameter(&quot;attendee&quot;)).And(
-                imip.ICALUID == Parameter(&quot;icaluid&quot;))
-        )
-
-
-    @classproperty
-    def _updateIMIPTokenQuery(cls):
-        imip = schema.IMIP_TOKENS
-        return Update(
-            {imip.ACCESSED: utcNowSQL, },
-            Where=(imip.ORGANIZER == Parameter(&quot;organizer&quot;)).And(
-                imip.ATTENDEE == Parameter(&quot;attendee&quot;)).And(
-                    imip.ICALUID == Parameter(&quot;icaluid&quot;))
-        )
-
-
-    @inlineCallbacks
-    def imipGetToken(self, organizer, attendee, icaluid):
-        row = (yield self._selectIMIPTokenQuery.on(
-            self, organizer=organizer,
-            attendee=attendee, icaluid=icaluid))
-        if row:
-            token = row[0][0]
-            # update the timestamp
-            yield self._updateIMIPTokenQuery.on(
-                self, organizer=organizer,
-                attendee=attendee, icaluid=icaluid)
-        else:
-            token = None
-        returnValue(token)
-
-
-    # Remove IMIP token
-    @classproperty
-    def _removeIMIPTokenQuery(cls):
-        imip = schema.IMIP_TOKENS
-        return Delete(From=imip,
-                      Where=(imip.TOKEN == Parameter(&quot;token&quot;)))
-
-
-    def imipRemoveToken(self, token):
-        return self._removeIMIPTokenQuery.on(self, token=token)
-
-
-    # Purge old IMIP tokens
-    @classproperty
-    def _purgeOldIMIPTokensQuery(cls):
-        imip = schema.IMIP_TOKENS
-        return Delete(From=imip,
-                      Where=(imip.ACCESSED &lt; Parameter(&quot;olderThan&quot;)))
-
-
-    def purgeOldIMIPTokens(self, olderThan):
-        &quot;&quot;&quot;
-        @type olderThan: datetime
-        &quot;&quot;&quot;
-        return self._purgeOldIMIPTokensQuery.on(self, olderThan=olderThan)
-
-    # End of IMIP
-
-
-    # Groups
-
-    @classproperty
-    def _addGroupQuery(cls):
-        gr = schema.GROUPS
-        return Insert(
-            {
-                gr.NAME: Parameter(&quot;name&quot;),
-                gr.GROUP_UID: Parameter(&quot;groupUID&quot;),
-                gr.MEMBERSHIP_HASH: Parameter(&quot;membershipHash&quot;)
-            },
-            Return=gr.GROUP_ID
-        )
-
-
-    @classproperty
-    def _updateGroupQuery(cls):
-        gr = schema.GROUPS
-        return Update(
-            {
-                gr.MEMBERSHIP_HASH: Parameter(&quot;membershipHash&quot;),
-                gr.NAME: Parameter(&quot;name&quot;),
-                gr.MODIFIED: Parameter(&quot;timestamp&quot;),
-                gr.EXTANT: Parameter(&quot;extant&quot;),
-            },
-            Where=(gr.GROUP_UID == Parameter(&quot;groupUID&quot;))
-        )
-
-
-    @classproperty
-    def _groupByUID(cls):
-        gr = schema.GROUPS
-        return Select(
-            [gr.GROUP_ID, gr.NAME, gr.MEMBERSHIP_HASH, gr.MODIFIED, gr.EXTANT],
-            From=gr,
-            Where=(gr.GROUP_UID == Parameter(&quot;groupUID&quot;))
-        )
-
-
-    @classproperty
-    def _groupByID(cls):
-        gr = schema.GROUPS
-        return Select(
-            [gr.GROUP_UID, gr.NAME, gr.MEMBERSHIP_HASH, gr.EXTANT],
-            From=gr,
-            Where=(gr.GROUP_ID == Parameter(&quot;groupID&quot;))
-        )
-
-
-    @classproperty
-    def _deleteGroup(cls):
-        gr = schema.GROUPS
-        return Delete(
-            From=gr,
-            Where=(gr.GROUP_ID == Parameter(&quot;groupID&quot;))
-        )
-
-
-    @inlineCallbacks
-    def addGroup(self, groupUID, name, membershipHash):
-        &quot;&quot;&quot;
-        @type groupUID: C{unicode}
-        @type name: C{unicode}
-        @type membershipHash: C{str}
-        &quot;&quot;&quot;
-        record = yield self.directoryService().recordWithUID(groupUID)
-        if record is None:
-            returnValue(None)
-
-        groupID = (yield self._addGroupQuery.on(
-            self,
-            name=name.encode(&quot;utf-8&quot;),
-            groupUID=groupUID.encode(&quot;utf-8&quot;),
-            membershipHash=membershipHash
-        ))[0][0]
-
-        yield self.refreshGroup(
-            groupUID, record, groupID, name.encode(&quot;utf-8&quot;), membershipHash, True
-        )
-        returnValue(groupID)
-
-
-    def updateGroup(self, groupUID, name, membershipHash, extant=True):
-        &quot;&quot;&quot;
-        @type groupUID: C{unicode}
-        @type name: C{unicode}
-        @type membershipHash: C{str}
-        @type extant: C{boolean}
-        &quot;&quot;&quot;
-        timestamp = datetime.datetime.utcnow()
-        return self._updateGroupQuery.on(
-            self,
-            name=name.encode(&quot;utf-8&quot;),
-            groupUID=groupUID.encode(&quot;utf-8&quot;),
-            timestamp=timestamp,
-            membershipHash=membershipHash,
-            extant=(1 if extant else 0)
-        )
-
-
-    @inlineCallbacks
-    def groupByUID(self, groupUID, create=True):
-        &quot;&quot;&quot;
-        Return or create a record for the group UID.
-
-        @type groupUID: C{unicode}
-
-        @return: Deferred firing with tuple of group ID C{str}, group name
-            C{unicode}, membership hash C{str}, modified timestamp, and
-            extant C{boolean}
-        &quot;&quot;&quot;
-        results = (
-            yield self._groupByUID.on(
-                self, groupUID=groupUID.encode(&quot;utf-8&quot;)
-            )
-        )
-        if results:
-            returnValue((
-                results[0][0],  # group id
-                results[0][1].decode(&quot;utf-8&quot;),  # name
-                results[0][2],  # membership hash
-                results[0][3],  # modified timestamp
-                bool(results[0][4]),  # extant
-            ))
-        elif create:
-            savepoint = SavepointAction(&quot;groupByUID&quot;)
-            yield savepoint.acquire(self)
-            try:
-                groupID = yield self.addGroup(groupUID, u&quot;&quot;, &quot;&quot;)
-                if groupID is None:
-                    # The record does not actually exist within the directory
-                    yield savepoint.release(self)
-                    returnValue((None, None, None, None, None))
-
-            except Exception:
-                yield savepoint.rollback(self)
-                results = (
-                    yield self._groupByUID.on(
-                        self, groupUID=groupUID.encode(&quot;utf-8&quot;)
-                    )
-                )
-                if results:
-                    returnValue((
-                        results[0][0],  # group id
-                        results[0][1].decode(&quot;utf-8&quot;),  # name
-                        results[0][2],  # membership hash
-                        results[0][3],  # modified timestamp
-                        bool(results[0][4]),  # extant
-                    ))
-                else:
-                    returnValue((None, None, None, None, None))
-            else:
-                yield savepoint.release(self)
-                results = (
-                    yield self._groupByUID.on(
-                        self, groupUID=groupUID.encode(&quot;utf-8&quot;)
-                    )
-                )
-                if results:
-                    returnValue((
-                        results[0][0],  # group id
-                        results[0][1].decode(&quot;utf-8&quot;),  # name
-                        results[0][2],  # membership hash
-                        results[0][3],  # modified timestamp
-                        bool(results[0][4]),  # extant
-                    ))
-                else:
-                    returnValue((None, None, None, None, None))
-        else:
-            returnValue((None, None, None, None, None))
-
-
-    @inlineCallbacks
-    def groupByID(self, groupID):
-        &quot;&quot;&quot;
-        Given a group ID, return the group UID, or raise NotFoundError
-
-        @type groupID: C{str}
-        @return: Deferred firing with a tuple of group UID C{unicode},
-            group name C{unicode}, membership hash C{str}, and extant C{boolean}
-        &quot;&quot;&quot;
-        try:
-            results = (yield self._groupByID.on(self, groupID=groupID))[0]
-            if results:
-                results = (
-                    results[0].decode(&quot;utf-8&quot;),
-                    results[1].decode(&quot;utf-8&quot;),
-                    results[2],
-                    bool(results[3])
-                )
-            returnValue(results)
-        except IndexError:
-            raise NotFoundError
-
-
-    def deleteGroup(self, groupID):
-        return self._deleteGroup.on(self, groupID=groupID)
-
-    # End of Groups
-
-
-    # Group Members
-
-    @classproperty
-    def _addMemberToGroupQuery(cls):
-        gm = schema.GROUP_MEMBERSHIP
-        return Insert(
-            {
-                gm.GROUP_ID: Parameter(&quot;groupID&quot;),
-                gm.MEMBER_UID: Parameter(&quot;memberUID&quot;)
-            }
-        )
-
-
-    @classproperty
-    def _removeMemberFromGroupQuery(cls):
-        gm = schema.GROUP_MEMBERSHIP
-        return Delete(
-            From=gm,
-            Where=(
-                gm.GROUP_ID == Parameter(&quot;groupID&quot;)
-            ).And(
-                gm.MEMBER_UID == Parameter(&quot;memberUID&quot;)
-            )
-        )
-
-
-    @classproperty
-    def _selectGroupMembersQuery(cls):
-        gm = schema.GROUP_MEMBERSHIP
-        return Select(
-            [gm.MEMBER_UID],
-            From=gm,
-            Where=(
-                gm.GROUP_ID == Parameter(&quot;groupID&quot;)
-            )
-        )
-
-
-    @classproperty
-    def _selectGroupsForQuery(cls):
-        gr = schema.GROUPS
-        gm = schema.GROUP_MEMBERSHIP
-
-        return Select(
-            [gr.GROUP_UID],
-            From=gr,
-            Where=(
-                gr.GROUP_ID.In(
-                    Select(
-                        [gm.GROUP_ID],
-                        From=gm,
-                        Where=(
-                            gm.MEMBER_UID == Parameter(&quot;uid&quot;)
-                        )
-                    )
-                )
-            )
-        )
-
-
-    def addMemberToGroup(self, memberUID, groupID):
-        return self._addMemberToGroupQuery.on(
-            self, groupID=groupID, memberUID=memberUID.encode(&quot;utf-8&quot;)
-        )
-
-
-    def removeMemberFromGroup(self, memberUID, groupID):
-        return self._removeMemberFromGroupQuery.on(
-            self, groupID=groupID, memberUID=memberUID.encode(&quot;utf-8&quot;)
-        )
-
-
-    @inlineCallbacks
-    def groupMemberUIDs(self, groupID):
-        &quot;&quot;&quot;
-        Returns the cached set of UIDs for members of the given groupID.
-        Sub-groups are not returned in the results but their members are,
-        because the group membership has already been expanded/flattened
-        before storing in the db.
-
-        @param groupID: the group ID
-        @type groupID: C{int}
-        @return: the set of member UIDs
-        @rtype: a Deferred which fires with a set() of C{str} UIDs
-        &quot;&quot;&quot;
-        members = set()
-        results = (yield self._selectGroupMembersQuery.on(self, groupID=groupID))
-        for row in results:
-            members.add(row[0].decode(&quot;utf-8&quot;))
-        returnValue(members)
-
-
-    @inlineCallbacks
-    def refreshGroup(self, groupUID, record, groupID, cachedName, cachedMembershipHash, cachedExtant):
-        &quot;&quot;&quot;
-        @param groupUID: the directory record
-        @type groupUID: C{unicode}
-        @param record: the directory record
-        @type record: C{iDirectoryRecord}
-        @param groupID: group resource id
-        @type groupID: C{str}
-        @param cachedName: group name in the database
-        @type cachedName: C{unicode}
-        @param cachedMembershipHash: membership hash in the database
-        @type cachedMembershipHash: C{str}
-        @param cachedExtant: extent field from in the database
-        @type cachedExtant: C{bool}
-
-        @return: Deferred firing with membershipChanged C{boolean}
-
-        &quot;&quot;&quot;
-        if record is not None:
-            memberUIDs = yield record.expandedMemberUIDs()
-            name = record.displayName
-            extant = True
-        else:
-            memberUIDs = frozenset()
-            name = cachedName
-            extant = False
-
-        membershipHashContent = hashlib.md5()
-        for memberUID in sorted(memberUIDs):
-            membershipHashContent.update(str(memberUID))
-        membershipHash = membershipHashContent.hexdigest()
-
-        if cachedMembershipHash != membershipHash:
-            membershipChanged = True
-            log.debug(
-                &quot;Group '{group}' changed&quot;, group=name
-            )
-        else:
-            membershipChanged = False
-
-        if membershipChanged or extant != cachedExtant:
-            # also updates group mod date
-            yield self.updateGroup(
-                groupUID, name, membershipHash, extant=extant
-            )
-
-        if membershipChanged:
-            addedUIDs, removedUIDs = yield self.synchronizeMembers(groupID, set(memberUIDs))
-        else:
-            addedUIDs = removedUIDs = None
-
-        returnValue((membershipChanged, addedUIDs, removedUIDs,))
-
-
-    @inlineCallbacks
-    def synchronizeMembers(self, groupID, newMemberUIDs):
-        &quot;&quot;&quot;
-        Update the group membership table in the database to match the new membership list. This
-        method will diff the existing set with the new set and apply the changes. It also calls out
-        to a groupChanged() method with the set of added and removed members so that other modules
-        that depend on groups can monitor the changes.
-
-        @param groupID: group id of group to update
-        @type groupID: L{str}
-        @param newMemberUIDs: set of new member UIDs in the group
-        @type newMemberUIDs: L{set} of L{str}
-        &quot;&quot;&quot;
-        cachedMemberUIDs = (yield self.groupMemberUIDs(groupID))
-
-        removed = cachedMemberUIDs - newMemberUIDs
-        for memberUID in removed:
-            yield self.removeMemberFromGroup(memberUID, groupID)
-
-        added = newMemberUIDs - cachedMemberUIDs
-        for memberUID in added:
-            yield self.addMemberToGroup(memberUID, groupID)
-
-        yield self.groupChanged(groupID, added, removed)
-
-        returnValue((added, removed,))
-
-
-    @inlineCallbacks
-    def groupChanged(self, groupID, addedUIDs, removedUIDs):
-        &quot;&quot;&quot;
-        Called when membership of a group changes.
-
-        @param groupID: group id of group that changed
-        @type groupID: L{str}
-        @param addedUIDs: set of new member UIDs added to the group
-        @type addedUIDs: L{set} of L{str}
-        @param removedUIDs: set of old member UIDs removed from the group
-        @type removedUIDs: L{set} of L{str}
-        &quot;&quot;&quot;
-        yield Delegates.groupChanged(self, groupID, addedUIDs, removedUIDs)
-
-
-    @inlineCallbacks
-    def groupMembers(self, groupID):
-        &quot;&quot;&quot;
-        The members of the given group as recorded in the db
-        &quot;&quot;&quot;
-        members = set()
-        memberUIDs = (yield self.groupMemberUIDs(groupID))
-        for uid in memberUIDs:
-            record = (yield self.directoryService().recordWithUID(uid))
-            if record is not None:
-                members.add(record)
-        returnValue(members)
-
-
-    @inlineCallbacks
-    def groupUIDsFor(self, uid):
-        &quot;&quot;&quot;
-        Returns the cached set of UIDs for the groups this given uid is
-        a member of.
-
-        @param uid: the uid
-        @type uid: C{unicode}
-        @return: the set of group IDs
-        @rtype: a Deferred which fires with a set() of C{int} group IDs
-        &quot;&quot;&quot;
-        groups = set()
-        results = (
-            yield self._selectGroupsForQuery.on(
-                self, uid=uid.encode(&quot;utf-8&quot;)
-            )
-        )
-        for row in results:
-            groups.add(row[0].decode(&quot;utf-8&quot;))
-        returnValue(groups)
-
-    # End of Group Members
-
-    # Delegates
-
-
-    @classproperty
-    def _addDelegateQuery(cls):
-        de = schema.DELEGATES
-        return Insert({de.DELEGATOR: Parameter(&quot;delegator&quot;),
-                       de.DELEGATE: Parameter(&quot;delegate&quot;),
-                       de.READ_WRITE: Parameter(&quot;readWrite&quot;),
-                       })
-
-
-    @classproperty
-    def _addDelegateGroupQuery(cls):
-        ds = schema.DELEGATE_GROUPS
-        return Insert({ds.DELEGATOR: Parameter(&quot;delegator&quot;),
-                       ds.GROUP_ID: Parameter(&quot;groupID&quot;),
-                       ds.READ_WRITE: Parameter(&quot;readWrite&quot;),
-                       ds.IS_EXTERNAL: Parameter(&quot;isExternal&quot;),
-                       })
-
-
-    @classproperty
-    def _removeDelegateQuery(cls):
-        de = schema.DELEGATES
-        return Delete(
-            From=de,
-            Where=(
-                de.DELEGATOR == Parameter(&quot;delegator&quot;)
-            ).And(
-                de.DELEGATE == Parameter(&quot;delegate&quot;)
-            ).And(
-                de.READ_WRITE == Parameter(&quot;readWrite&quot;)
-            )
-        )
-
-
-    @classproperty
-    def _removeDelegatesQuery(cls):
-        de = schema.DELEGATES
-        return Delete(
-            From=de,
-            Where=(
-                de.DELEGATOR == Parameter(&quot;delegator&quot;)
-            ).And(
-                de.READ_WRITE == Parameter(&quot;readWrite&quot;)
-            )
-        )
-
-
-    @classproperty
-    def _removeDelegateGroupQuery(cls):
-        ds = schema.DELEGATE_GROUPS
-        return Delete(
-            From=ds,
-            Where=(
-                ds.DELEGATOR == Parameter(&quot;delegator&quot;)
-            ).And(
-                ds.GROUP_ID == Parameter(&quot;groupID&quot;)
-            ).And(
-                ds.READ_WRITE == Parameter(&quot;readWrite&quot;)
-            )
-        )
-
-
-    @classproperty
-    def _removeDelegateGroupsQuery(cls):
-        ds = schema.DELEGATE_GROUPS
-        return Delete(
-            From=ds,
-            Where=(
-                ds.DELEGATOR == Parameter(&quot;delegator&quot;)
-            ).And(
-                ds.READ_WRITE == Parameter(&quot;readWrite&quot;)
-            )
-        )
-
-
-    @classproperty
-    def _selectDelegatesQuery(cls):
-        de = schema.DELEGATES
-        return Select(
-            [de.DELEGATE],
-            From=de,
-            Where=(
-                de.DELEGATOR == Parameter(&quot;delegator&quot;)
-            ).And(
-                de.READ_WRITE == Parameter(&quot;readWrite&quot;)
-            )
-        )
-
-
-    @classproperty
-    def _selectDelegatorsToGroupQuery(cls):
-        dg = schema.DELEGATE_GROUPS
-        return Select(
-            [dg.DELEGATOR],
-            From=dg,
-            Where=(
-                dg.GROUP_ID == Parameter(&quot;delegateGroup&quot;)
-            ).And(
-                dg.READ_WRITE == Parameter(&quot;readWrite&quot;)
-            )
-        )
-
-
-    @classproperty
-    def _selectDelegateGroupsQuery(cls):
-        ds = schema.DELEGATE_GROUPS
-        gr = schema.GROUPS
-
-        return Select(
-            [gr.GROUP_UID],
-            From=gr,
-            Where=(
-                gr.GROUP_ID.In(
-                    Select(
-                        [ds.GROUP_ID],
-                        From=ds,
-                        Where=(
-                            ds.DELEGATOR == Parameter(&quot;delegator&quot;)
-                        ).And(
-                            ds.READ_WRITE == Parameter(&quot;readWrite&quot;)
-                        )
-                    )
-                )
-            )
-        )
-
-
-    @classproperty
-    def _selectDirectDelegatorsQuery(cls):
-        de = schema.DELEGATES
-        return Select(
-            [de.DELEGATOR],
-            From=de,
-            Where=(
-                de.DELEGATE == Parameter(&quot;delegate&quot;)
-            ).And(
-                de.READ_WRITE == Parameter(&quot;readWrite&quot;)
-            )
-        )
-
-
-    @classproperty
-    def _selectIndirectDelegatorsQuery(cls):
-        dg = schema.DELEGATE_GROUPS
-        gm = schema.GROUP_MEMBERSHIP
-
-        return Select(
-            [dg.DELEGATOR],
-            From=dg,
-            Where=(
-                dg.GROUP_ID.In(
-                    Select(
-                        [gm.GROUP_ID],
-                        From=gm,
-                        Where=(gm.MEMBER_UID == Parameter(&quot;delegate&quot;))
-                    )
-                ).And(
-                    dg.READ_WRITE == Parameter(&quot;readWrite&quot;)
-                )
-            )
-        )
-
-
-    @classproperty
-    def _selectIndirectDelegatesQuery(cls):
-        dg = schema.DELEGATE_GROUPS
-        gm = schema.GROUP_MEMBERSHIP
-
-        return Select(
-            [gm.MEMBER_UID],
-            From=gm,
-            Where=(
-                gm.GROUP_ID.In(
-                    Select(
-                        [dg.GROUP_ID],
-                        From=dg,
-                        Where=(dg.DELEGATOR == Parameter(&quot;delegator&quot;)).And(
-                            dg.READ_WRITE == Parameter(&quot;readWrite&quot;))
-                    )
-                )
-            )
-        )
-
-
-    @classproperty
-    def _selectExternalDelegateGroupsQuery(cls):
-        edg = schema.EXTERNAL_DELEGATE_GROUPS
-        return Select(
-            [edg.DELEGATOR, edg.GROUP_UID_READ, edg.GROUP_UID_WRITE],
-            From=edg
-        )
-
-
-    @classproperty
-    def _removeExternalDelegateGroupsPairQuery(cls):
-        edg = schema.EXTERNAL_DELEGATE_GROUPS
-        return Delete(
-            From=edg,
-            Where=(
-                edg.DELEGATOR == Parameter(&quot;delegator&quot;)
-            )
-        )
-
-
-    @classproperty
-    def _storeExternalDelegateGroupsPairQuery(cls):
-        edg = schema.EXTERNAL_DELEGATE_GROUPS
-        return Insert(
-            {
-                edg.DELEGATOR: Parameter(&quot;delegator&quot;),
-                edg.GROUP_UID_READ: Parameter(&quot;readDelegate&quot;),
-                edg.GROUP_UID_WRITE: Parameter(&quot;writeDelegate&quot;),
-            }
-        )
-
-
-    @classproperty
-    def _removeExternalDelegateGroupsQuery(cls):
-        ds = schema.DELEGATE_GROUPS
-        return Delete(
-            From=ds,
-            Where=(
-                ds.DELEGATOR == Parameter(&quot;delegator&quot;)
-            ).And(
-                ds.IS_EXTERNAL == 1
-            )
-        )
-
-
-    @inlineCallbacks
-    def addDelegate(self, delegator, delegate, readWrite):
-        &quot;&quot;&quot;
-        Adds a row to the DELEGATES table.  The delegate should not be a
-        group.  To delegate to a group, call addDelegateGroup() instead.
-
-        @param delegator: the UID of the delegator
-        @type delegator: C{unicode}
-        @param delegate: the UID of the delegate
-        @type delegate: C{unicode}
-        @param readWrite: grant read and write access if True, otherwise
-            read-only access
-        @type readWrite: C{boolean}
-        &quot;&quot;&quot;
-
-        def _addDelegate(subtxn):
-            return self._addDelegateQuery.on(
-                subtxn,
-                delegator=delegator.encode(&quot;utf-8&quot;),
-                delegate=delegate.encode(&quot;utf-8&quot;),
-                readWrite=1 if readWrite else 0
-            )
-
-        try:
-            yield self.subtransaction(_addDelegate, retries=0, failureOK=True)
-        except AllRetriesFailed:
-            pass
-
-
-    @inlineCallbacks
-    def addDelegateGroup(self, delegator, delegateGroupID, readWrite,
-                         isExternal=False):
-        &quot;&quot;&quot;
-        Adds a row to the DELEGATE_GROUPS table.  The delegate should be a
-        group.  To delegate to a person, call addDelegate() instead.
-
-        @param delegator: the UID of the delegator
-        @type delegator: C{unicode}
-        @param delegateGroupID: the GROUP_ID of the delegate group
-        @type delegateGroupID: C{int}
-        @param readWrite: grant read and write access if True, otherwise
-            read-only access
-        @type readWrite: C{boolean}
-        &quot;&quot;&quot;
-
-        def _addDelegateGroup(subtxn):
-            return self._addDelegateGroupQuery.on(
-                subtxn,
-                delegator=delegator.encode(&quot;utf-8&quot;),
-                groupID=delegateGroupID,
-                readWrite=1 if readWrite else 0,
-                isExternal=1 if isExternal else 0
-            )
-
-        try:
-            yield self.subtransaction(_addDelegateGroup, retries=0, failureOK=True)
-        except AllRetriesFailed:
-            pass
-
-
-    def removeDelegate(self, delegator, delegate, readWrite):
-        &quot;&quot;&quot;
-        Removes a row from the DELEGATES table.  The delegate should not be a
-        group.  To remove a delegate group, call removeDelegateGroup() instead.
-
-        @param delegator: the UID of the delegator
-        @type delegator: C{unicode}
-        @param delegate: the UID of the delegate
-        @type delegate: C{unicode}
-        @param readWrite: remove read and write access if True, otherwise
-            read-only access
-        @type readWrite: C{boolean}
-        &quot;&quot;&quot;
-        return self._removeDelegateQuery.on(
-            self,
-            delegator=delegator.encode(&quot;utf-8&quot;),
-            delegate=delegate.encode(&quot;utf-8&quot;),
-            readWrite=1 if readWrite else 0
-        )
-
-
-    def removeDelegates(self, delegator, readWrite):
-        &quot;&quot;&quot;
-        Removes all rows for this delegator/readWrite combination from the
-        DELEGATES table.
-
-        @param delegator: the UID of the delegator
-        @type delegator: C{unicode}
-        @param readWrite: remove read and write access if True, otherwise
-            read-only access
-        @type readWrite: C{boolean}
-        &quot;&quot;&quot;
-        return self._removeDelegatesQuery.on(
-            self,
-            delegator=delegator.encode(&quot;utf-8&quot;),
-            readWrite=1 if readWrite else 0
-        )
-
-
-    def removeDelegateGroup(self, delegator, delegateGroupID, readWrite):
-        &quot;&quot;&quot;
-        Removes a row from the DELEGATE_GROUPS table.  The delegate should be a
-        group.  To remove a delegate person, call removeDelegate() instead.
-
-        @param delegator: the UID of the delegator
-        @type delegator: C{unicode}
-        @param delegateGroupID: the GROUP_ID of the delegate group
-        @type delegateGroupID: C{int}
-        @param readWrite: remove read and write access if True, otherwise
-            read-only access
-        @type readWrite: C{boolean}
-        &quot;&quot;&quot;
-        return self._removeDelegateGroupQuery.on(
-            self,
-            delegator=delegator.encode(&quot;utf-8&quot;),
-            groupID=delegateGroupID,
-            readWrite=1 if readWrite else 0
-        )
-
-
-    def removeDelegateGroups(self, delegator, readWrite):
-        &quot;&quot;&quot;
-        Removes all rows for this delegator/readWrite combination from the
-        DELEGATE_GROUPS table.
-
-        @param delegator: the UID of the delegator
-        @type delegator: C{unicode}
-        @param readWrite: remove read and write access if True, otherwise
-            read-only access
-        @type readWrite: C{boolean}
-        &quot;&quot;&quot;
-        return self._removeDelegateGroupsQuery.on(
-            self,
-            delegator=delegator.encode(&quot;utf-8&quot;),
-            readWrite=1 if readWrite else 0
-        )
-
-
-    @inlineCallbacks
-    def delegates(self, delegator, readWrite, expanded=False):
-        &quot;&quot;&quot;
-        Returns the UIDs of all delegates for the given delegator.  If
-        expanded is False, only the direct delegates (users and groups)
-        are returned.  If expanded is True, the expanded membership is
-        returned, not including the groups themselves.
-
-        @param delegator: the UID of the delegator
-        @type delegator: C{unicode}
-        @param readWrite: the access-type to check for; read and write
-            access if True, otherwise read-only access
-        @type readWrite: C{boolean}
-        @returns: the UIDs of the delegates (for the specified access
-            type)
-        @rtype: a Deferred resulting in a set
-        &quot;&quot;&quot;
-        delegates = set()
-        delegatorU = delegator.encode(&quot;utf-8&quot;)
-
-        # First get the direct delegates
-        results = (
-            yield self._selectDelegatesQuery.on(
-                self,
-                delegator=delegatorU,
-                readWrite=1 if readWrite else 0
-            )
-        )
-        delegates.update([row[0].decode(&quot;utf-8&quot;) for row in results])
-
-        if expanded:
-            # Get those who are in groups which have been delegated to
-            results = (
-                yield self._selectIndirectDelegatesQuery.on(
-                    self,
-                    delegator=delegatorU,
-                    readWrite=1 if readWrite else 0
-                )
-            )
-            # Skip the delegator if they are in one of the groups
-            delegates.update([row[0].decode(&quot;utf-8&quot;) for row in results if row[0] != delegatorU])
-
-        else:
-            # Get the directly-delegated-to groups
-            results = (
-                yield self._selectDelegateGroupsQuery.on(
-                    self,
-                    delegator=delegatorU,
-                    readWrite=1 if readWrite else 0
-                )
-            )
-            delegates.update([row[0].decode(&quot;utf-8&quot;) for row in results])
-
-        returnValue(delegates)
-
-
-    @inlineCallbacks
-    def delegators(self, delegate, readWrite):
-        &quot;&quot;&quot;
-        Returns the UIDs of all delegators which have granted access to
-        the given delegate, either directly or indirectly via groups.
-
-        @param delegate: the UID of the delegate
-        @type delegate: C{unicode}
-        @param readWrite: the access-type to check for; read and write
-            access if True, otherwise read-only access
-        @type readWrite: C{boolean}
-        @returns: the UIDs of the delegators (for the specified access
-            type)
-        @rtype: a Deferred resulting in a set
-        &quot;&quot;&quot;
-        delegators = set()
-        delegateU = delegate.encode(&quot;utf-8&quot;)
-
-        # First get the direct delegators
-        results = (
-            yield self._selectDirectDelegatorsQuery.on(
-                self,
-                delegate=delegateU,
-                readWrite=1 if readWrite else 0
-            )
-        )
-        delegators.update([row[0].decode(&quot;utf-8&quot;) for row in results])
-
-        # Finally get those who have delegated to groups the delegate
-        # is a member of
-        results = (
-            yield self._selectIndirectDelegatorsQuery.on(
-                self,
-                delegate=delegateU,
-                readWrite=1 if readWrite else 0
-            )
-        )
-        # Skip the delegator if they are in one of the groups
-        delegators.update([row[0].decode(&quot;utf-8&quot;) for row in results if row[0] != delegateU])
-
-        returnValue(delegators)
-
-
-    @inlineCallbacks
-    def delegatorsToGroup(self, delegateGroupID, readWrite):
-        &quot;&quot;&quot;
-        Return the UIDs of those who have delegated to the given group with the
-        given access level.
-
-        @param delegateGroupID: the group ID of the delegate group
-        @type delegateGroupID: C{int}
-        @param readWrite: the access-type to check for; read and write
-            access if True, otherwise read-only access
-        @type readWrite: C{boolean}
-        @returns: the UIDs of the delegators (for the specified access
-            type)
-        @rtype: a Deferred resulting in a set
-
-        &quot;&quot;&quot;
-        results = (
-            yield self._selectDelegatorsToGroupQuery.on(
-                self,
-                delegateGroup=delegateGroupID,
-                readWrite=1 if readWrite else 0
-            )
-        )
-        delegators = set([row[0].decode(&quot;utf-8&quot;) for row in results])
-        returnValue(delegators)
-
-
-    @inlineCallbacks
-    def allGroupDelegates(self):
-        &quot;&quot;&quot;
-        Return the UIDs of all groups which have been delegated to.  Useful
-        for obtaining the set of groups which need to be synchronized from
-        the directory.
-
-        @returns: the UIDs of all delegated-to groups
-        @rtype: a Deferred resulting in a set
-        &quot;&quot;&quot;
-        gr = schema.GROUPS
-        dg = schema.DELEGATE_GROUPS
-
-        results = (yield Select(
-            [gr.GROUP_UID],
-            From=gr,
-            Where=(gr.GROUP_ID.In(Select([dg.GROUP_ID], From=dg, Where=None)))
-        ).on(self))
-        delegates = set()
-        for row in results:
-            delegates.add(row[0].decode(&quot;utf-8&quot;))
-
-        returnValue(delegates)
-
-
-    @inlineCallbacks
-    def externalDelegates(self):
-        &quot;&quot;&quot;
-        Returns a dictionary mapping delegate UIDs to (read-group, write-group)
-        tuples, including only those assignments that originated from the
-        directory.
-
-        @returns: dictionary mapping delegator uid to (readDelegateUID,
-            writeDelegateUID) tuples
-        @rtype: a Deferred resulting in a dictionary
-        &quot;&quot;&quot;
-        delegates = {}
-
-        # Get the externally managed delegates (which are all groups)
-        results = (yield self._selectExternalDelegateGroupsQuery.on(self))
-        for delegator, readDelegateUID, writeDelegateUID in results:
-            delegates[delegator.encode(&quot;utf-8&quot;)] = (
-                readDelegateUID.encode(&quot;utf-8&quot;) if readDelegateUID else None,
-                writeDelegateUID.encode(&quot;utf-8&quot;) if writeDelegateUID else None
-            )
-
-        returnValue(delegates)
-
-
-    @inlineCallbacks
-    def assignExternalDelegates(
-        self, delegator, readDelegateGroupID, writeDelegateGroupID,
-        readDelegateUID, writeDelegateUID
-    ):
-        &quot;&quot;&quot;
-        Update the external delegate group table so we can quickly identify
-        diffs next time, and update the delegate group table itself
-
-        @param delegator
-        @type delegator: C{UUID}
-        &quot;&quot;&quot;
-
-        # Delete existing external assignments for the delegator
-        yield self._removeExternalDelegateGroupsQuery.on(
-            self,
-            delegator=str(delegator)
-        )
-
-        # Remove from the external comparison table
-        yield self._removeExternalDelegateGroupsPairQuery.on(
-            self,
-            delegator=str(delegator)
-        )
-
-        # Store new assignments in the external comparison table
-        if readDelegateUID or writeDelegateUID:
-            readDelegateForDB = (
-                readDelegateUID.encode(&quot;utf-8&quot;) if readDelegateUID else &quot;&quot;
-            )
-            writeDelegateForDB = (
-                writeDelegateUID.encode(&quot;utf-8&quot;) if writeDelegateUID else &quot;&quot;
-            )
-            yield self._storeExternalDelegateGroupsPairQuery.on(
-                self,
-                delegator=str(delegator),
-                readDelegate=readDelegateForDB,
-                writeDelegate=writeDelegateForDB
-            )
-
-        # Apply new assignments
-        if readDelegateGroupID is not None:
-            yield self.addDelegateGroup(
-                delegator, readDelegateGroupID, False, isExternal=True
-            )
-        if writeDelegateGroupID is not None:
-            yield self.addDelegateGroup(
-                delegator, writeDelegateGroupID, True, isExternal=True
-            )
-
-
-    # End of Delegates
-
-
</del><span class="cx">     def preCommit(self, operation):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Run things before C{commit}.  (Note: only provided by SQL
</span><span class="lines">@@ -2809,227 +1544,58 @@
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-class _EmptyCacher(object):
-
-    def set(self, key, value):
-        return succeed(True)
-
-
-    def get(self, key, withIdentifier=False):
-        return succeed(None)
-
-
-    def delete(self, key):
-        return succeed(True)
-
-
-
-class SharingHomeMixIn(object):
-    &quot;&quot;&quot;
-    Common class for CommonHome to implement sharing operations
-    &quot;&quot;&quot;
-
-    @inlineCallbacks
-    def acceptShare(self, shareUID, summary=None):
-        &quot;&quot;&quot;
-        This share is being accepted.
-        &quot;&quot;&quot;
-
-        shareeView = yield self.anyObjectWithShareUID(shareUID)
-        if shareeView is not None:
-            yield shareeView.acceptShare(summary)
-
-        returnValue(shareeView)
-
-
-    @inlineCallbacks
-    def declineShare(self, shareUID):
-        &quot;&quot;&quot;
-        This share is being declined.
-        &quot;&quot;&quot;
-
-        shareeView = yield self.anyObjectWithShareUID(shareUID)
-        if shareeView is not None:
-            yield shareeView.declineShare()
-
-        returnValue(shareeView is not None)
-
-
-    #
-    # External (cross-pod) sharing - entry point is the sharee's home collection.
-    #
-    @inlineCallbacks
-    def processExternalInvite(
-        self, ownerUID, ownerRID, ownerName, shareUID, bindMode, summary,
-        copy_invite_properties, supported_components=None
-    ):
-        &quot;&quot;&quot;
-        External invite received.
-        &quot;&quot;&quot;
-
-        # Get the owner home - create external one if not present
-        ownerHome = yield self._txn.homeWithUID(
-            self._homeType, ownerUID, create=True
-        )
-        if ownerHome is None or not ownerHome.external():
-            raise ExternalShareFailed(&quot;Invalid owner UID: {}&quot;.format(ownerUID))
-
-        # Try to find owner calendar via its external id
-        ownerView = yield ownerHome.childWithExternalID(ownerRID)
-        if ownerView is None:
-            try:
-                ownerView = yield ownerHome.createChildWithName(
-                    ownerName, externalID=ownerRID
-                )
-            except HomeChildNameAlreadyExistsError:
-                # This is odd - it means we possibly have a left over sharer
-                # collection which the sharer likely removed and re-created
-                # with the same name but now it has a different externalID and
-                # is not found by the initial query. What we do is check to see
-                # whether any shares still reference the old ID - if they do we
-                # are hosed. If not, we can remove the old item and create a new one.
-                oldOwnerView = yield ownerHome.childWithName(ownerName)
-                invites = yield oldOwnerView.sharingInvites()
-                if len(invites) != 0:
-                    log.error(
-                        &quot;External invite collection name is present with a &quot;
-                        &quot;different externalID and still has shares&quot;
-                    )
-                    raise
-                log.error(
-                    &quot;External invite collection name is present with a &quot;
-                    &quot;different externalID - trying to fix&quot;
-                )
-                yield ownerHome.removeExternalChild(oldOwnerView)
-                ownerView = yield ownerHome.createChildWithName(
-                    ownerName, externalID=ownerRID
-                )
-
-            if (
-                supported_components is not None and
-                hasattr(ownerView, &quot;setSupportedComponents&quot;)
-            ):
-                yield ownerView.setSupportedComponents(supported_components)
-
-        # Now carry out the share operation
-        if bindMode == _BIND_MODE_DIRECT:
-            shareeView = yield ownerView.directShareWithUser(
-                self.uid(), shareName=shareUID
-            )
-        else:
-            shareeView = yield ownerView.inviteUIDToShare(
-                self.uid(), bindMode, summary, shareName=shareUID
-            )
-
-        shareeView.setInviteCopyProperties(copy_invite_properties)
-
-
-    @inlineCallbacks
-    def processExternalUninvite(self, ownerUID, ownerRID, shareUID):
-        &quot;&quot;&quot;
-        External invite received.
-        &quot;&quot;&quot;
-
-        # Get the owner home
-        ownerHome = yield self._txn.homeWithUID(self._homeType, ownerUID)
-        if ownerHome is None or not ownerHome.external():
-            raise ExternalShareFailed(&quot;Invalid owner UID: {}&quot;.format(ownerUID))
-
-        # Try to find owner calendar via its external id
-        ownerView = yield ownerHome.childWithExternalID(ownerRID)
-        if ownerView is None:
-            raise ExternalShareFailed(&quot;Invalid share ID: {}&quot;.format(shareUID))
-
-        # Now carry out the share operation
-        yield ownerView.uninviteUIDFromShare(self.uid())
-
-        # See if there are any references to the external share. If not,
-        # remove it
-        invites = yield ownerView.sharingInvites()
-        if len(invites) == 0:
-            yield ownerHome.removeExternalChild(ownerView)
-
-
-    @inlineCallbacks
-    def processExternalReply(
-        self, ownerUID, shareeUID, shareUID, bindStatus, summary=None
-    ):
-        &quot;&quot;&quot;
-        External invite received.
-        &quot;&quot;&quot;
-
-        # Make sure the shareeUID and shareUID match
-
-        # Get the owner home - create external one if not present
-        shareeHome = yield self._txn.homeWithUID(self._homeType, shareeUID)
-        if shareeHome is None or not shareeHome.external():
-            raise ExternalShareFailed(
-                &quot;Invalid sharee UID: {}&quot;.format(shareeUID)
-            )
-
-        # Try to find owner calendar via its external id
-        shareeView = yield shareeHome.anyObjectWithShareUID(shareUID)
-        if shareeView is None:
-            raise ExternalShareFailed(&quot;Invalid share UID: {}&quot;.format(shareUID))
-
-        # Now carry out the share operation
-        if bindStatus == _BIND_STATUS_ACCEPTED:
-            yield shareeHome.acceptShare(shareUID, summary)
-        elif bindStatus == _BIND_STATUS_DECLINED:
-            if shareeView.direct():
-                yield shareeView.deleteShare()
-            else:
-                yield shareeHome.declineShare(shareUID)
-
-
-
</del><span class="cx"> class CommonHome(SharingHomeMixIn):
</span><span class="cx">     log = Logger()
</span><span class="cx"> 
</span><span class="cx">     # All these need to be initialized by derived classes for each store type
</span><span class="cx">     _homeType = None
</span><del>-    _homeTable = None
-    _homeMetaDataTable = None
</del><ins>+    _homeSchema = None
+    _homeMetaDataSchema = None
+
</ins><span class="cx">     _externalClass = None
</span><span class="cx">     _childClass = None
</span><del>-    _childTable = None
</del><ins>+
+    _bindSchema = None
+    _revisionsSchema = None
+    _objectSchema = None
+
</ins><span class="cx">     _notifierPrefix = None
</span><span class="cx"> 
</span><span class="cx">     _dataVersionKey = None
</span><span class="cx">     _dataVersionValue = None
</span><span class="cx"> 
</span><del>-    _cacher = None  # Initialize in derived classes
-
</del><span class="cx">     @classmethod
</span><del>-    @inlineCallbacks
-    def makeClass(cls, transaction, ownerUID, no_cache=False, authzUID=None):
</del><ins>+    def makeClass(cls, transaction, homeData, authzUID=None):
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Build the actual home class taking into account the possibility that we might need to
</span><span class="cx">         switch in the external version of the class.
</span><span class="cx"> 
</span><span class="cx">         @param transaction: transaction
</span><span class="cx">         @type transaction: L{CommonStoreTransaction}
</span><del>-        @param ownerUID: owner UID of home to load
-        @type ownerUID: C{str}
-        @param no_cache: should cached query be used
-        @type no_cache: C{bool}
</del><ins>+        @param homeData: home table column data
+        @type homeData: C{list}
</ins><span class="cx">         &quot;&quot;&quot;
</span><del>-        home = cls(transaction, ownerUID, authzUID=authzUID)
-        actualHome = yield home.initFromStore(no_cache)
-        returnValue(actualHome)
</del><span class="cx"> 
</span><ins>+        status = homeData[cls.homeColumns().index(cls._homeSchema.STATUS)]
+        if status == _HOME_STATUS_EXTERNAL:
+            home = cls._externalClass(transaction, homeData)
+        else:
+            home = cls(transaction, homeData, authzUID=authzUID)
+        return home.initFromStore()
</ins><span class="cx"> 
</span><del>-    def __init__(self, transaction, ownerUID, authzUID=None):
</del><ins>+
+    def __init__(self, transaction, homeData, authzUID=None):
</ins><span class="cx">         self._txn = transaction
</span><del>-        self._ownerUID = ownerUID
</del><ins>+
+        for attr, value in zip(self.homeAttributes(), homeData):
+            setattr(self, attr, value)
+
</ins><span class="cx">         self._authzUID = authzUID
</span><span class="cx">         if self._authzUID is None:
</span><span class="cx">             if self._txn._authz_uid is not None:
</span><span class="cx">                 self._authzUID = self._txn._authz_uid
</span><span class="cx">             else:
</span><span class="cx">                 self._authzUID = self._ownerUID
</span><del>-        self._resourceID = None
-        self._status = _HOME_STATUS_NORMAL
</del><span class="cx">         self._dataVersion = None
</span><span class="cx">         self._childrenLoaded = False
</span><span class="cx">         self._children = {}
</span><span class="lines">@@ -3038,15 +1604,13 @@
</span><span class="cx">         self._created = None
</span><span class="cx">         self._modified = None
</span><span class="cx">         self._syncTokenRevision = None
</span><del>-        if transaction._disableCache:
-            self._cacher = _EmptyCacher()
</del><span class="cx"> 
</span><span class="cx">         # This is used to track whether the originating request is from the store associated
</span><span class="cx">         # by the transaction, or from a remote store. We need to be able to distinguish store
</span><span class="cx">         # objects that are locally hosted (_HOME_STATUS_NORMAL) or remotely hosted
</span><span class="cx">         # (_HOME_STATUS_EXTERNAL). For the later we need to know whether the object is being
</span><span class="cx">         # accessed from the local store (in which case requests for child objects etc will be
</span><del>-        # directed at a remote store) or whether it is being accessed as the tresult of a remote
</del><ins>+        # directed at a remote store) or whether it is being accessed as the result of a remote
</ins><span class="cx">         # request (in which case requests for child objects etc will be directed at the local store).
</span><span class="cx">         self._internalRequest = True
</span><span class="cx"> 
</span><span class="lines">@@ -3071,14 +1635,16 @@
</span><span class="cx">         return Select(
</span><span class="cx">             cls.homeColumns(),
</span><span class="cx">             From=home,
</span><del>-            Where=home.OWNER_UID == Parameter(&quot;ownerUID&quot;)
</del><ins>+            Where=(home.OWNER_UID == Parameter(&quot;ownerUID&quot;)).And(
+                home.STATUS == Parameter(&quot;status&quot;)
+            )
</ins><span class="cx">         )
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @classproperty
</span><span class="cx">     def _ownerFromResourceID(cls):
</span><span class="cx">         home = cls._homeSchema
</span><del>-        return Select([home.OWNER_UID],
</del><ins>+        return Select([home.OWNER_UID, home.STATUS],
</ins><span class="cx">                       From=home,
</span><span class="cx">                       Where=home.RESOURCE_ID == Parameter(&quot;resourceID&quot;))
</span><span class="cx"> 
</span><span class="lines">@@ -3154,41 +1720,22 @@
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><del>-    def initFromStore(self, no_cache=False):
</del><ins>+    def initFromStore(self):
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Initialize this object from the store. We read in and cache all the
</span><span class="cx">         extra meta-data from the DB to avoid having to do DB queries for those
</span><span class="cx">         individually later.
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        result = yield self._cacher.get(self._ownerUID)
-        if result is None:
-            result = yield self._homeColumnsFromOwnerQuery.on(self._txn, ownerUID=self._ownerUID)
-            if result:
-                result = result[0]
-                if not no_cache:
-                    yield self._cacher.set(self._ownerUID, result)
</del><span class="cx"> 
</span><del>-        if result:
-            for attr, value in zip(self.homeAttributes(), result):
-                setattr(self, attr, value)
</del><ins>+        yield self.initMetaDataFromStore()
+        yield self._loadPropertyStore()
</ins><span class="cx"> 
</span><del>-            # STOP! If the status is external we need to convert this object to a CommonHomeExternal class which will
-            # have the right behavior for non-hosted external users.
-            if self._status == _HOME_STATUS_EXTERNAL:
-                actualHome = self._externalClass(self._txn, self._ownerUID, self._resourceID)
-            else:
-                actualHome = self
-            yield actualHome.initMetaDataFromStore()
-            yield actualHome._loadPropertyStore()
</del><ins>+        for factory_type, factory in self._txn._notifierFactories.items():
+            self.addNotifier(factory_type, factory.newNotifier(self))
</ins><span class="cx"> 
</span><del>-            for factory_type, factory in self._txn._notifierFactories.items():
-                actualHome.addNotifier(factory_type, factory.newNotifier(actualHome))
</del><ins>+        returnValue(self)
</ins><span class="cx"> 
</span><del>-            returnValue(actualHome)
-        else:
-            returnValue(None)
</del><span class="cx"> 
</span><del>-
</del><span class="cx">     @inlineCallbacks
</span><span class="cx">     def initMetaDataFromStore(self):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="lines">@@ -3213,6 +1760,24 @@
</span><span class="cx">             setattr(self, attr, value)
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    def serialize(self):
+        &quot;&quot;&quot;
+        Create a dictionary mapping metadata attributes so this object can be sent over a cross-pod call
+        and reconstituted at the other end. Note that the other end may have a different schema so
+        the attributes may not match exactly and will need to be processed accordingly.
+        &quot;&quot;&quot;
+        return dict([(attr[1:], getattr(self, attr, None)) for attr in self.metadataAttributes()])
+
+
+    def deserialize(self, mapping):
+        &quot;&quot;&quot;
+        Given a mapping generated by L{serialize}, convert the values to attributes on this object.
+        &quot;&quot;&quot;
+
+        for attr in self.metadataAttributes():
+            setattr(self, attr, mapping.get(attr[1:]))
+
+
</ins><span class="cx">     @classmethod
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def listHomes(cls, txn):
</span><span class="lines">@@ -3230,16 +1795,93 @@
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><ins>+    def homeWithUID(cls, txn, uid, status=None, create=False, authzUID=None):
+        return cls.homeWith(txn, None, uid, status, create=create, authzUID=authzUID)
+
+
+    @classmethod
+    def homeWithResourceID(cls, txn, rid):
+        return cls.homeWith(txn, rid, None)
+
+
+    @classmethod
</ins><span class="cx">     @inlineCallbacks
</span><del>-    def homeWithUID(cls, txn, uid, create=False, authzUID=None):
</del><ins>+    def homeWith(cls, txn, rid, uid, status=None, create=False, authzUID=None):
</ins><span class="cx">         &quot;&quot;&quot;
</span><del>-        @param uid: I'm going to assume uid is utf-8 encoded bytes
</del><ins>+        Lookup or create a home based in either its resource id or uid. If a status is given,
+        return only the one matching that status. If status is L{None} we lookup any regular
+        status type (normal, external or purging). When creating with status L{None} we create
+        one with a status matching the current directory record thisServer() value. The only
+        other status that can be directly created is migrating.
</ins><span class="cx">         &quot;&quot;&quot;
</span><del>-        homeObject = yield cls.makeClass(txn, uid, authzUID=authzUID)
-        if homeObject is not None:
</del><ins>+
+        # Setup the SQL query and query cacher keys
+        queryCacher = txn._queryCacher
+        cacheKeys = []
+        if rid is not None:
+            query = cls._homeSchema.RESOURCE_ID == rid
+            if queryCacher:
+                cacheKeys.append(queryCacher.keyForHomeWithID(cls._homeType, rid, status))
+        elif uid is not None:
+            query = cls._homeSchema.OWNER_UID == uid
+            if status is not None:
+                query = query.And(cls._homeSchema.STATUS == status)
+                if queryCacher:
+                    cacheKeys.append(queryCacher.keyForHomeWithUID(cls._homeType, uid, status))
+            else:
+                statusSet = (_HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL, _HOME_STATUS_PURGING)
+                if txn._allowDisabled:
+                    statusSet += (_HOME_STATUS_DISABLED,)
+                query = query.And(cls._homeSchema.STATUS.In(statusSet))
+                if queryCacher:
+                    for item in statusSet:
+                        cacheKeys.append(queryCacher.keyForHomeWithUID(cls._homeType, uid, item))
+        else:
+            raise AssertionError(&quot;One of rid or uid must be set&quot;)
+
+        # Try to fetch a result from the query cache first
+        for cacheKey in cacheKeys:
+            result = (yield queryCacher.get(cacheKey))
+            if result is not None:
+                break
+        else:
+            result = None
+
+        # If nothing in the cache, do the SQL query and cache the result
+        if result is None:
+            results = yield Select(
+                cls.homeColumns(),
+                From=cls._homeSchema,
+                Where=query,
+            ).on(txn)
+
+            if len(results) &gt; 1:
+                # Pick the best one in order: normal, disabled and external
+                byStatus = dict([(result[cls.homeColumns().index(cls._homeSchema.STATUS)], result) for result in results])
+                result = byStatus.get(_HOME_STATUS_NORMAL)
+                if result is None:
+                    result = byStatus.get(_HOME_STATUS_DISABLED)
+                if result is None:
+                    result = byStatus.get(_HOME_STATUS_EXTERNAL)
+            elif results:
+                result = results[0]
+            else:
+                result = None
+
+            if result and queryCacher:
+                if rid is not None:
+                    cacheKey = cacheKeys[0]
+                elif uid is not None:
+                    cacheKey = queryCacher.keyForHomeWithUID(cls._homeType, uid, result[cls.homeColumns().index(cls._homeSchema.STATUS)])
+                yield queryCacher.set(cacheKey, result)
+
+        if result:
+            # Return object that already exists in the store
+            homeObject = yield cls.makeClass(txn, result, authzUID=authzUID)
</ins><span class="cx">             returnValue(homeObject)
</span><span class="cx">         else:
</span><del>-            if not create:
</del><ins>+            # Can only create when uid is specified
+            if not create or uid is None:
</ins><span class="cx">                 returnValue(None)
</span><span class="cx"> 
</span><span class="cx">             # Determine if the user is local or external
</span><span class="lines">@@ -3247,8 +1889,18 @@
</span><span class="cx">             if record is None:
</span><span class="cx">                 raise DirectoryRecordNotFoundError(&quot;Cannot create home for UID since no directory record exists: {}&quot;.format(uid))
</span><span class="cx"> 
</span><del>-            state = _HOME_STATUS_NORMAL if record.thisServer() else _HOME_STATUS_EXTERNAL
</del><ins>+            if status is None:
+                createStatus = _HOME_STATUS_NORMAL if record.thisServer() else _HOME_STATUS_EXTERNAL
+            elif status == _HOME_STATUS_MIGRATING:
+                if record.thisServer():
+                    raise RecordNotAllowedError(&quot;Cannot migrate a user data for a user already hosted on this server&quot;)
+                createStatus = status
+            elif status in (_HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL,):
+                createStatus = status
+            else:
+                raise RecordNotAllowedError(&quot;Cannot create home with status {}: {}&quot;.format(status, uid))
</ins><span class="cx"> 
</span><ins>+
</ins><span class="cx">             # Use savepoint so we can do a partial rollback if there is a race condition
</span><span class="cx">             # where this row has already been inserted
</span><span class="cx">             savepoint = SavepointAction(&quot;homeWithUID&quot;)
</span><span class="lines">@@ -3260,7 +1912,7 @@
</span><span class="cx">                 resourceid = (yield Insert(
</span><span class="cx">                     {
</span><span class="cx">                         cls._homeSchema.OWNER_UID: uid,
</span><del>-                        cls._homeSchema.STATUS: state,
</del><ins>+                        cls._homeSchema.STATUS: createStatus,
</ins><span class="cx">                         cls._homeSchema.DATAVERSION: cls._dataVersionValue,
</span><span class="cx">                     },
</span><span class="cx">                     Return=cls._homeSchema.RESOURCE_ID
</span><span class="lines">@@ -3270,8 +1922,13 @@
</span><span class="cx">                 yield savepoint.rollback(txn)
</span><span class="cx"> 
</span><span class="cx">                 # Retry the query - row may exist now, if not re-raise
</span><del>-                homeObject = yield cls.makeClass(txn, uid, authzUID=authzUID)
-                if homeObject:
</del><ins>+                results = yield Select(
+                    cls.homeColumns(),
+                    From=cls._homeSchema,
+                    Where=query,
+                ).on(txn)
+                if results:
+                    homeObject = yield cls.makeClass(txn, results[0], authzUID=authzUID)
</ins><span class="cx">                     returnValue(homeObject)
</span><span class="cx">                 else:
</span><span class="cx">                     raise
</span><span class="lines">@@ -3279,27 +1936,27 @@
</span><span class="cx">                 yield savepoint.release(txn)
</span><span class="cx"> 
</span><span class="cx">                 # Note that we must not cache the owner_uid-&gt;resource_id
</span><del>-                # mapping in _cacher when creating as we don't want that to appear
</del><ins>+                # mapping in the query cacher when creating as we don't want that to appear
</ins><span class="cx">                 # until AFTER the commit
</span><del>-                home = yield cls.makeClass(txn, uid, no_cache=True, authzUID=authzUID)
-                yield home.createdHome()
-                returnValue(home)
</del><ins>+                results = yield Select(
+                    cls.homeColumns(),
+                    From=cls._homeSchema,
+                    Where=cls._homeSchema.RESOURCE_ID == resourceid,
+                ).on(txn)
+                homeObject = yield cls.makeClass(txn, results[0], authzUID=authzUID)
+                if homeObject.normal():
+                    yield homeObject.createdHome()
+                returnValue(homeObject)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><del>-    @classmethod
-    @inlineCallbacks
-    def homeUIDWithResourceID(cls, txn, rid):
-        rows = (yield cls._ownerFromResourceID.on(txn, resourceID=rid))
-        if rows:
-            returnValue(rows[0][0])
-        else:
-            returnValue(None)
-
-
</del><span class="cx">     def __repr__(self):
</span><span class="cx">         return &quot;&lt;%s: %s, %s&gt;&quot; % (self.__class__.__name__, self._resourceID, self._ownerUID)
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    def cacheKey(self):
+        return &quot;{}-{}&quot;.format(self._status, self._ownerUID)
+
+
</ins><span class="cx">     def id(self):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Retrieve the store identifier for this home.
</span><span class="lines">@@ -3328,6 +1985,19 @@
</span><span class="cx">         return self._authzUID
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    def status(self):
+        return self._status
+
+
+    def normal(self):
+        &quot;&quot;&quot;
+        Is this an normal (internal) home.
+
+        @return: a L{bool}.
+        &quot;&quot;&quot;
+        return self._status == _HOME_STATUS_NORMAL
+
+
</ins><span class="cx">     def external(self):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Is this an external home.
</span><span class="lines">@@ -3357,6 +2027,15 @@
</span><span class="cx">         return self._status == _HOME_STATUS_PURGING
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    def migrating(self):
+        &quot;&quot;&quot;
+        Is this an external home.
+
+        @return: a string.
+        &quot;&quot;&quot;
+        return self._status == _HOME_STATUS_MIGRATING
+
+
</ins><span class="cx">     def purge(self):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Mark this home as being purged.
</span><span class="lines">@@ -3364,6 +2043,13 @@
</span><span class="cx">         return self.setStatus(_HOME_STATUS_PURGING)
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    def migrate(self):
+        &quot;&quot;&quot;
+        Mark this home as being purged.
+        &quot;&quot;&quot;
+        return self.setStatus(_HOME_STATUS_MIGRATING)
+
+
</ins><span class="cx">     @inlineCallbacks
</span><span class="cx">     def setStatus(self, newStatus):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="lines">@@ -3375,10 +2061,67 @@
</span><span class="cx">                 {self._homeSchema.STATUS: newStatus},
</span><span class="cx">                 Where=(self._homeSchema.RESOURCE_ID == self._resourceID),
</span><span class="cx">             ).on(self._txn)
</span><ins>+            if self._txn._queryCacher:
+                yield self._txn._queryCacher.delete(self._txn._queryCacher.keyForHomeWithUID(
+                    self._homeType,
+                    self.uid(),
+                    self._status,
+                ))
+                yield self._txn._queryCacher.delete(self._txn._queryCacher.keyForHomeWithID(
+                    self._homeType,
+                    self.id(),
+                    self._status,
+                ))
</ins><span class="cx">             self._status = newStatus
</span><del>-            yield self._cacher.delete(self._ownerUID)
</del><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    @inlineCallbacks
+    def remove(self):
+
+        # Removing the home table entry does NOT remove the child class entry - it does remove
+        # the associated bind entry. So manually remove each child.
+        yield self.removeAllChildren()
+
+        r = self._childClass._revisionsSchema
+        yield Delete(
+            From=r,
+            Where=r.HOME_RESOURCE_ID == self._resourceID,
+        ).on(self._txn)
+
+        h = self._homeSchema
+        yield Delete(
+            From=h,
+            Where=h.RESOURCE_ID == self._resourceID,
+        ).on(self._txn)
+
+        yield self.properties()._removeResource()
+
+        if self._txn._queryCacher:
+            yield self._txn._queryCacher.delete(self._txn._queryCacher.keyForHomeWithUID(
+                self._homeType,
+                self.uid(),
+                self._status,
+            ))
+            yield self._txn._queryCacher.delete(self._txn._queryCacher.keyForHomeWithID(
+                self._homeType,
+                self.id(),
+                self._status,
+            ))
+
+
+    @inlineCallbacks
+    def removeAllChildren(self):
+        &quot;&quot;&quot;
+        Remove each child.
+        &quot;&quot;&quot;
+
+        children = yield self.loadChildren()
+        for child in children:
+            yield child.remove()
+            self._children.pop(child.name(), None)
+            self._children.pop(child.id(), None)
+
+
</ins><span class="cx">     def transaction(self):
</span><span class="cx">         return self._txn
</span><span class="cx"> 
</span><span class="lines">@@ -3496,15 +2239,15 @@
</span><span class="cx">         return self._childClass.objectWithID(self, resourceID)
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-    def childWithExternalID(self, externalID):
</del><ins>+    def childWithBindUID(self, bindUID):
</ins><span class="cx">         &quot;&quot;&quot;
</span><del>-        Retrieve the child with the given C{externalID} contained in this
</del><ins>+        Retrieve the child with the given C{bindUID} contained in this
</ins><span class="cx">         home.
</span><span class="cx"> 
</span><span class="cx">         @param name: a string.
</span><span class="cx">         @return: an L{ICalendar} or C{None} if no such child exists.
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        return self._childClass.objectWithExternalID(self, externalID)
</del><ins>+        return self._childClass.objectWithBindUID(self, bindUID)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     def allChildWithID(self, resourceID):
</span><span class="lines">@@ -3519,11 +2262,11 @@
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><del>-    def createChildWithName(self, name, externalID=None):
</del><ins>+    def createChildWithName(self, name, bindUID=None):
</ins><span class="cx">         if name.startswith(&quot;.&quot;):
</span><span class="cx">             raise HomeChildNameNotAllowedError(name)
</span><span class="cx"> 
</span><del>-        child = yield self._childClass.create(self, name, externalID=externalID)
</del><ins>+        child = yield self._childClass.create(self, name, bindUID=bindUID)
</ins><span class="cx">         returnValue(child)
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="lines">@@ -3598,13 +2341,18 @@
</span><span class="cx">         taken to invalid the cached value properly.
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         if self._syncTokenRevision is None:
</span><del>-            self._syncTokenRevision = (yield self._syncTokenQuery.on(
-                self._txn, resourceID=self._resourceID))[0][0]
-            if self._syncTokenRevision is None:
-                self._syncTokenRevision = int((yield self._txn.calendarserverValue(&quot;MIN-VALID-REVISION&quot;)))
</del><ins>+            self._syncTokenRevision = yield self.syncTokenRevision()
</ins><span class="cx">         returnValue(&quot;%s_%s&quot; % (self._resourceID, self._syncTokenRevision))
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    @inlineCallbacks
+    def syncTokenRevision(self):
+        revision = (yield self._syncTokenQuery.on(self._txn, resourceID=self._resourceID))[0][0]
+        if revision is None:
+            revision = int((yield self._txn.calendarserverValue(&quot;MIN-VALID-REVISION&quot;)))
+        returnValue(revision)
+
+
</ins><span class="cx">     @classproperty
</span><span class="cx">     def _changesQuery(cls):
</span><span class="cx">         bind = cls._bindSchema
</span><span class="lines">@@ -4091,1492 +2839,16 @@
</span><span class="cx">         Get the owner home for a shared child ID and the owner's name for that bound child.
</span><span class="cx">         Subclasses may override.
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        ownerHomeID, ownerName = (yield self._childClass._ownerHomeWithResourceID.on(self._txn, resourceID=resourceID))[0]
-        ownerHome = yield self._txn.homeWithResourceID(self._homeType, ownerHomeID)
-        returnValue((ownerHome, ownerName))
-
-
-
-class _SharedSyncLogic(object):
-    &quot;&quot;&quot;
-    Logic for maintaining sync-token shared between notification collections and
-    shared collections.
-    &quot;&quot;&quot;
-
-    @classproperty
-    def _childSyncTokenQuery(cls):
-        &quot;&quot;&quot;
-        DAL query for retrieving the sync token of a L{CommonHomeChild} based on
-        its resource ID.
-        &quot;&quot;&quot;
-        rev = cls._revisionsSchema
-        return Select([Max(rev.REVISION)], From=rev,
-                      Where=rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;))
-
-
-    def revisionFromToken(self, token):
-        if token is None:
-            return 0
-        elif isinstance(token, str) or isinstance(token, unicode):
-            _ignore_uuid, revision = token.split(&quot;_&quot;, 1)
-            return int(revision)
-        else:
-            return token
-
-
-    @inlineCallbacks
-    def syncToken(self):
-        if self._syncTokenRevision is None:
-            self._syncTokenRevision = (yield self._childSyncTokenQuery.on(
-                self._txn, resourceID=self._resourceID))[0][0]
-            if self._syncTokenRevision is None:
-                self._syncTokenRevision = int((yield self._txn.calendarserverValue(&quot;MIN-VALID-REVISION&quot;)))
-        returnValue((&quot;%s_%s&quot; % (self._resourceID, self._syncTokenRevision,)))
-
-
-    def objectResourcesSinceToken(self, token):
-        raise NotImplementedError()
-
-
-    @classmethod
-    def _objectNamesSinceRevisionQuery(cls, deleted=True):
-        &quot;&quot;&quot;
-        DAL query for (resource, deleted-flag)
-        &quot;&quot;&quot;
-        rev = cls._revisionsSchema
-        where = (rev.REVISION &gt; Parameter(&quot;revision&quot;)).And(rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;))
-        if not deleted:
-            where = where.And(rev.DELETED == False)
-        return Select(
-            [rev.RESOURCE_NAME, rev.DELETED],
-            From=rev,
-            Where=where,
-        )
-
-
-    def resourceNamesSinceToken(self, token):
-        &quot;&quot;&quot;
-        Return the changed and deleted resources since a particular sync-token. This simply extracts
-        the revision from from the token then calls L{resourceNamesSinceRevision}.
-
-        @param revision: the revision to determine changes since
-        @type revision: C{int}
-        &quot;&quot;&quot;
-
-        return self.resourceNamesSinceRevision(self.revisionFromToken(token))
-
-
-    @inlineCallbacks
-    def resourceNamesSinceRevision(self, revision):
-        &quot;&quot;&quot;
-        Return the changed and deleted resources since a particular revision.
-
-        @param revision: the revision to determine changes since
-        @type revision: C{int}
-        &quot;&quot;&quot;
-        changed = []
-        deleted = []
-        invalid = []
-        if revision:
-            minValidRevision = yield self._txn.calendarserverValue(&quot;MIN-VALID-REVISION&quot;)
-            if revision &lt; int(minValidRevision):
-                raise SyncTokenValidException
-
-            results = [
-                (name if name else &quot;&quot;, removed) for name, removed in (
-                    yield self._objectNamesSinceRevisionQuery().on(
-                        self._txn, revision=revision, resourceID=self._resourceID)
-                )
-            ]
-            results.sort(key=lambda x: x[1])
-
-            for name, wasdeleted in results:
-                if name:
-                    if wasdeleted:
-                        deleted.append(name)
-                    else:
-                        changed.append(name)
-        else:
-            changed = yield self.listObjectResources()
-
-        returnValue((changed, deleted, invalid))
-
-
-    @classproperty
-    def _removeDeletedRevision(cls):
-        rev = cls._revisionsSchema
-        return Delete(From=rev,
-                      Where=(rev.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)).And(
-                          rev.COLLECTION_NAME == Parameter(&quot;collectionName&quot;)))
-
-
-    @classproperty
-    def _addNewRevision(cls):
-        rev = cls._revisionsSchema
-        return Insert(
-            {
-                rev.HOME_RESOURCE_ID: Parameter(&quot;homeID&quot;),
-                rev.RESOURCE_ID: Parameter(&quot;resourceID&quot;),
-                rev.COLLECTION_NAME: Parameter(&quot;collectionName&quot;),
-                rev.RESOURCE_NAME: None,
-                # Always starts false; may be updated to be a tombstone
-                # later.
-                rev.DELETED: False
-            },
-            Return=[rev.REVISION]
-        )
-
-
-    @inlineCallbacks
-    def _initSyncToken(self):
-        yield self._removeDeletedRevision.on(
-            self._txn, homeID=self._home._resourceID, collectionName=self._name
-        )
-        self._syncTokenRevision = (yield (
-            self._addNewRevision.on(self._txn, homeID=self._home._resourceID,
-                                    resourceID=self._resourceID,
-                                    collectionName=self._name)))[0][0]
-        self._txn.bumpRevisionForObject(self)
-
-
-    @classproperty
-    def _renameSyncTokenQuery(cls):
-        &quot;&quot;&quot;
-        DAL query to change sync token for a rename (increment and adjust
-        resource name).
-        &quot;&quot;&quot;
-        rev = cls._revisionsSchema
-        return Update(
-            {
-                rev.REVISION: schema.REVISION_SEQ,
-                rev.COLLECTION_NAME: Parameter(&quot;name&quot;)
-            },
-            Where=(rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And
-                  (rev.RESOURCE_NAME == None),
-            Return=rev.REVISION
-        )
-
-
-    @inlineCallbacks
-    def _renameSyncToken(self):
-        rows = yield self._renameSyncTokenQuery.on(
-            self._txn, name=self._name, resourceID=self._resourceID)
</del><ins>+        rows = yield self._childClass._ownerHomeWithResourceID.on(self._txn, resourceID=resourceID)
</ins><span class="cx">         if rows:
</span><del>-            self._syncTokenRevision = rows[0][0]
-            self._txn.bumpRevisionForObject(self)
</del><ins>+            ownerHomeID, ownerName = rows[0]
+            ownerHome = yield self._txn.homeWithResourceID(self._homeType, ownerHomeID)
+            returnValue((ownerHome, ownerName))
</ins><span class="cx">         else:
</span><del>-            yield self._initSyncToken()
</del><ins>+            returnValue((None, None))
</ins><span class="cx"> 
</span><span class="cx"> 
</span><del>-    @classproperty
-    def _bumpSyncTokenQuery(cls):
-        &quot;&quot;&quot;
-        DAL query to change collection sync token. Note this can impact multiple rows if the
-        collection is shared.
-        &quot;&quot;&quot;
-        rev = cls._revisionsSchema
-        return Update(
-            {rev.REVISION: schema.REVISION_SEQ, },
-            Where=(rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And
-                  (rev.RESOURCE_NAME == None)
-        )
</del><span class="cx"> 
</span><del>-
-    @inlineCallbacks
-    def _bumpSyncToken(self):
-
-        if not self._txn.isRevisionBumpedAlready(self):
-            self._txn.bumpRevisionForObject(self)
-            yield self._bumpSyncTokenQuery.on(
-                self._txn,
-                resourceID=self._resourceID,
-            )
-            self._syncTokenRevision = None
-
-
-    @classproperty
-    def _deleteSyncTokenQuery(cls):
-        &quot;&quot;&quot;
-        DAL query to remove all child revision information. The revision for the collection
-        itself is not touched.
-        &quot;&quot;&quot;
-        rev = cls._revisionsSchema
-        return Delete(
-            From=rev,
-            Where=(rev.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)).And
-                  (rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And
-                  (rev.COLLECTION_NAME == None)
-        )
-
-
-    @classproperty
-    def _sharedRemovalQuery(cls):
-        &quot;&quot;&quot;
-        DAL query to indicate a shared collection has been deleted.
-        &quot;&quot;&quot;
-        rev = cls._revisionsSchema
-        return Update(
-            {
-                rev.RESOURCE_ID: None,
-                rev.REVISION: schema.REVISION_SEQ,
-                rev.DELETED: True
-            },
-            Where=(rev.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)).And(
-                rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And(
-                rev.RESOURCE_NAME == None)
-        )
-
-
-    @classproperty
-    def _unsharedRemovalQuery(cls):
-        &quot;&quot;&quot;
-        DAL query to indicate an owned collection has been deleted.
-        &quot;&quot;&quot;
-        rev = cls._revisionsSchema
-        return Update(
-            {
-                rev.RESOURCE_ID: None,
-                rev.REVISION: schema.REVISION_SEQ,
-                rev.DELETED: True
-            },
-            Where=(rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And(
-                rev.RESOURCE_NAME == None),
-        )
-
-
-    @inlineCallbacks
-    def _deletedSyncToken(self, sharedRemoval=False):
-        &quot;&quot;&quot;
-        When a collection is deleted we remove all the revision information for its child resources.
-        We update the collection's sync token to indicate it has been deleted - that way a sync on
-        the home collection can report the deletion of the collection.
-
-        @param sharedRemoval: indicates whether the collection being removed is shared
-        @type sharedRemoval: L{bool}
-        &quot;&quot;&quot;
-        # Remove all child entries
-        yield self._deleteSyncTokenQuery.on(self._txn,
-                                            homeID=self._home._resourceID,
-                                            resourceID=self._resourceID)
-
-        # If this is a share being removed then we only mark this one specific
-        # home/resource-id as being deleted.  On the other hand, if it is a
-        # non-shared collection, then we need to mark all collections
-        # with the resource-id as being deleted to account for direct shares.
-        if sharedRemoval:
-            yield self._sharedRemovalQuery.on(self._txn,
-                                              homeID=self._home._resourceID,
-                                              resourceID=self._resourceID)
-        else:
-            yield self._unsharedRemovalQuery.on(self._txn,
-                                                resourceID=self._resourceID)
-        self._syncTokenRevision = None
-
-
-    def _insertRevision(self, name):
-        return self._changeRevision(&quot;insert&quot;, name)
-
-
-    def _updateRevision(self, name):
-        return self._changeRevision(&quot;update&quot;, name)
-
-
-    def _deleteRevision(self, name):
-        return self._changeRevision(&quot;delete&quot;, name)
-
-
-    @classproperty
-    def _deleteBumpTokenQuery(cls):
-        rev = cls._revisionsSchema
-        return Update(
-            {rev.REVISION: schema.REVISION_SEQ, rev.DELETED: True},
-            Where=(rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And(
-                rev.RESOURCE_NAME == Parameter(&quot;name&quot;)),
-            Return=rev.REVISION
-        )
-
-
-    @classproperty
-    def _updateBumpTokenQuery(cls):
-        rev = cls._revisionsSchema
-        return Update(
-            {rev.REVISION: schema.REVISION_SEQ},
-            Where=(rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And(
-                rev.RESOURCE_NAME == Parameter(&quot;name&quot;)),
-            Return=rev.REVISION
-        )
-
-
-    @classproperty
-    def _insertFindPreviouslyNamedQuery(cls):
-        rev = cls._revisionsSchema
-        return Select(
-            [rev.RESOURCE_ID],
-            From=rev,
-            Where=(rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And(
-                rev.RESOURCE_NAME == Parameter(&quot;name&quot;))
-        )
-
-
-    @classproperty
-    def _updatePreviouslyNamedQuery(cls):
-        rev = cls._revisionsSchema
-        return Update(
-            {rev.REVISION: schema.REVISION_SEQ, rev.DELETED: False},
-            Where=(rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And(
-                rev.RESOURCE_NAME == Parameter(&quot;name&quot;)),
-            Return=rev.REVISION
-        )
-
-
-    @classproperty
-    def _completelyNewRevisionQuery(cls):
-        rev = cls._revisionsSchema
-        return Insert(
-            {
-                rev.HOME_RESOURCE_ID: Parameter(&quot;homeID&quot;),
-                rev.RESOURCE_ID: Parameter(&quot;resourceID&quot;),
-                rev.RESOURCE_NAME: Parameter(&quot;name&quot;),
-                rev.REVISION: schema.REVISION_SEQ,
-                rev.DELETED: False
-            },
-            Return=rev.REVISION
-        )
-
-
-    @classproperty
-    def _completelyNewDeletedRevisionQuery(cls):
-        rev = cls._revisionsSchema
-        return Insert(
-            {
-                rev.HOME_RESOURCE_ID: Parameter(&quot;homeID&quot;),
-                rev.RESOURCE_ID: Parameter(&quot;resourceID&quot;),
-                rev.RESOURCE_NAME: Parameter(&quot;name&quot;),
-                rev.REVISION: schema.REVISION_SEQ,
-                rev.DELETED: True
-            },
-            Return=rev.REVISION
-        )
-
-
-    @inlineCallbacks
-    def _changeRevision(self, action, name):
-
-        # Need to handle the case where for some reason the revision entry is
-        # actually missing. For a &quot;delete&quot; we don't care, for an &quot;update&quot; we
-        # will turn it into an &quot;insert&quot;.
-        if action == &quot;delete&quot;:
-            rows = (
-                yield self._deleteBumpTokenQuery.on(
-                    self._txn, resourceID=self._resourceID, name=name))
-            if rows:
-                self._syncTokenRevision = rows[0][0]
-            else:
-                self._syncTokenRevision = (
-                    yield self._completelyNewDeletedRevisionQuery.on(
-                        self._txn, homeID=self.ownerHome()._resourceID,
-                        resourceID=self._resourceID, name=name)
-                )[0][0]
-
-        elif action == &quot;update&quot;:
-            rows = (
-                yield self._updateBumpTokenQuery.on(
-                    self._txn, resourceID=self._resourceID, name=name))
-            if rows:
-                self._syncTokenRevision = rows[0][0]
-            else:
-                self._syncTokenRevision = (
-                    yield self._completelyNewRevisionQuery.on(
-                        self._txn, homeID=self.ownerHome()._resourceID,
-                        resourceID=self._resourceID, name=name)
-                )[0][0]
-
-        elif action == &quot;insert&quot;:
-            # Note that an &quot;insert&quot; may happen for a resource that previously
-            # existed and then was deleted. In that case an entry in the
-            # REVISIONS table still exists so we have to detect that and do db
-            # INSERT or UPDATE as appropriate
-
-            found = bool((
-                yield self._insertFindPreviouslyNamedQuery.on(
-                    self._txn, resourceID=self._resourceID, name=name)))
-            if found:
-                self._syncTokenRevision = (
-                    yield self._updatePreviouslyNamedQuery.on(
-                        self._txn, resourceID=self._resourceID, name=name)
-                )[0][0]
-            else:
-                self._syncTokenRevision = (
-                    yield self._completelyNewRevisionQuery.on(
-                        self._txn, homeID=self.ownerHome()._resourceID,
-                        resourceID=self._resourceID, name=name)
-                )[0][0]
-        yield self._maybeNotify()
-        returnValue(self._syncTokenRevision)
-
-
-    def _maybeNotify(self):
-        &quot;&quot;&quot;
-        Maybe notify changed.  (Overridden in NotificationCollection.)
-        &quot;&quot;&quot;
-        return succeed(None)
-
-
-
-SharingInvitation = namedtuple(
-    &quot;SharingInvitation&quot;,
-    [&quot;uid&quot;, &quot;ownerUID&quot;, &quot;ownerHomeID&quot;, &quot;shareeUID&quot;, &quot;shareeHomeID&quot;, &quot;mode&quot;, &quot;status&quot;, &quot;summary&quot;]
-)
-
-
-
-class SharingMixIn(object):
-    &quot;&quot;&quot;
-    Common class for CommonHomeChild and AddressBookObject
-    &quot;&quot;&quot;
-
-    @classproperty
-    def _bindInsertQuery(cls, **kw):
-        &quot;&quot;&quot;
-        DAL statement to create a bind entry that connects a collection to its
-        home.
-        &quot;&quot;&quot;
-        bind = cls._bindSchema
-        return Insert({
-            bind.HOME_RESOURCE_ID: Parameter(&quot;homeID&quot;),
-            bind.RESOURCE_ID: Parameter(&quot;resourceID&quot;),
-            bind.EXTERNAL_ID: Parameter(&quot;externalID&quot;),
-            bind.RESOURCE_NAME: Parameter(&quot;name&quot;),
-            bind.BIND_MODE: Parameter(&quot;mode&quot;),
-            bind.BIND_STATUS: Parameter(&quot;bindStatus&quot;),
-            bind.MESSAGE: Parameter(&quot;message&quot;),
-        })
-
-
-    @classmethod
-    def _updateBindColumnsQuery(cls, columnMap):
-        bind = cls._bindSchema
-        return Update(
-            columnMap,
-            Where=(bind.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And(
-                bind.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)),
-        )
-
-
-    @classproperty
-    def _deleteBindForResourceIDAndHomeID(cls):
-        bind = cls._bindSchema
-        return Delete(
-            From=bind,
-            Where=(bind.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And(
-                bind.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)),
-        )
-
-
-    @classmethod
-    def _bindFor(cls, condition):
-        bind = cls._bindSchema
-        columns = cls.bindColumns() + cls.additionalBindColumns()
-        return Select(
-            columns,
-            From=bind,
-            Where=condition
-        )
-
-
-    @classmethod
-    def _bindInviteFor(cls, condition):
-        home = cls._homeSchema
-        bind = cls._bindSchema
-        return Select(
-            [
-                home.OWNER_UID,
-                bind.HOME_RESOURCE_ID,
-                bind.RESOURCE_ID,
-                bind.RESOURCE_NAME,
-                bind.BIND_MODE,
-                bind.BIND_STATUS,
-                bind.MESSAGE,
-            ],
-            From=bind.join(home, on=(bind.HOME_RESOURCE_ID == home.RESOURCE_ID)),
-            Where=condition
-        )
-
-
-    @classproperty
-    def _sharedInvitationBindForResourceID(cls):
-        bind = cls._bindSchema
-        return cls._bindInviteFor(
-            (bind.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And
-            (bind.BIND_MODE != _BIND_MODE_OWN)
-        )
-
-
-    @classproperty
-    def _acceptedBindForHomeID(cls):
-        bind = cls._bindSchema
-        return cls._bindFor((bind.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;))
-                            .And(bind.BIND_STATUS == _BIND_STATUS_ACCEPTED))
-
-
-    @classproperty
-    def _bindForResourceIDAndHomeID(cls):
-        &quot;&quot;&quot;
-        DAL query that looks up home bind rows by home child
-        resource ID and home resource ID.
-        &quot;&quot;&quot;
-        bind = cls._bindSchema
-        return cls._bindFor((bind.RESOURCE_ID == Parameter(&quot;resourceID&quot;))
-                            .And(bind.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)))
-
-
-    @classproperty
-    def _bindForExternalIDAndHomeID(cls):
-        &quot;&quot;&quot;
-        DAL query that looks up home bind rows by home child
-        resource ID and home resource ID.
-        &quot;&quot;&quot;
-        bind = cls._bindSchema
-        return cls._bindFor((bind.EXTERNAL_ID == Parameter(&quot;externalID&quot;))
-                            .And(bind.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)))
-
-
-    @classproperty
-    def _bindForNameAndHomeID(cls):
-        &quot;&quot;&quot;
-        DAL query that looks up any bind rows by home child
-        resource ID and home resource ID.
-        &quot;&quot;&quot;
-        bind = cls._bindSchema
-        return cls._bindFor((bind.RESOURCE_NAME == Parameter(&quot;name&quot;))
-                            .And(bind.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)))
-
-
-    #
-    # Higher level API
-    #
-    @inlineCallbacks
-    def inviteUIDToShare(self, shareeUID, mode, summary=None, shareName=None):
-        &quot;&quot;&quot;
-        Invite a user to share this collection - either create the share if it does not exist, or
-        update the existing share with new values. Make sure a notification is sent as well.
-
-        @param shareeUID: UID of the sharee
-        @type shareeUID: C{str}
-        @param mode: access mode
-        @type mode: C{int}
-        @param summary: share message
-        @type summary: C{str}
-        &quot;&quot;&quot;
-
-        # Look for existing invite and update its fields or create new one
-        shareeView = yield self.shareeView(shareeUID)
-        if shareeView is not None:
-            status = _BIND_STATUS_INVITED if shareeView.shareStatus() in (_BIND_STATUS_DECLINED, _BIND_STATUS_INVALID) else None
-            yield self.updateShare(shareeView, mode=mode, status=status, summary=summary)
-        else:
-            shareeView = yield self.createShare(shareeUID=shareeUID, mode=mode, summary=summary, shareName=shareName)
-
-        # Check for external
-        if shareeView.viewerHome().external():
-            yield self._sendExternalInvite(shareeView)
-        else:
-            # Send invite notification
-            yield self._sendInviteNotification(shareeView)
-        returnValue(shareeView)
-
-
-    @inlineCallbacks
-    def directShareWithUser(self, shareeUID, shareName=None):
-        &quot;&quot;&quot;
-        Create a direct share with the specified user. Note it is currently up to the app layer
-        to enforce access control - this is not ideal as we really should have control of that in
-        the store. Once we do, this api will need to verify that access is allowed for a direct share.
-
-        NB no invitations are used with direct sharing.
-
-        @param shareeUID: UID of the sharee
-        @type shareeUID: C{str}
-        &quot;&quot;&quot;
-
-        # Ignore if it already exists
-        shareeView = yield self.shareeView(shareeUID)
-        if shareeView is None:
-            shareeView = yield self.createShare(shareeUID=shareeUID, mode=_BIND_MODE_DIRECT, shareName=shareName)
-            yield shareeView.newShare()
-
-            # Check for external
-            if shareeView.viewerHome().external():
-                yield self._sendExternalInvite(shareeView)
-
-        returnValue(shareeView)
-
-
-    @inlineCallbacks
-    def uninviteUIDFromShare(self, shareeUID):
-        &quot;&quot;&quot;
-        Remove a user from a share. Make sure a notification is sent as well.
-
-        @param shareeUID: UID of the sharee
-        @type shareeUID: C{str}
-        &quot;&quot;&quot;
-        # Cancel invites - we'll just use whatever userid we are given
-
-        shareeView = yield self.shareeView(shareeUID)
-        if shareeView is not None:
-            if shareeView.viewerHome().external():
-                yield self._sendExternalUninvite(shareeView)
-            else:
-                # If current user state is accepted then we send an invite with the new state, otherwise
-                # we cancel any existing invites for the user
-                if not shareeView.direct():
-                    if shareeView.shareStatus() != _BIND_STATUS_ACCEPTED:
-                        yield self._removeInviteNotification(shareeView)
-                    else:
-                        yield self._sendInviteNotification(shareeView, notificationState=_BIND_STATUS_DELETED)
-
-            # Remove the bind
-            yield self.removeShare(shareeView)
-
-
-    @inlineCallbacks
-    def acceptShare(self, summary=None):
-        &quot;&quot;&quot;
-        This share is being accepted.
-        &quot;&quot;&quot;
-
-        if not self.direct() and self.shareStatus() != _BIND_STATUS_ACCEPTED:
-            if self.external():
-                yield self._replyExternalInvite(_BIND_STATUS_ACCEPTED, summary)
-            ownerView = yield self.ownerView()
-            yield ownerView.updateShare(self, status=_BIND_STATUS_ACCEPTED)
-            yield self.newShare(displayname=summary)
-            if not ownerView.external():
-                yield self._sendReplyNotification(ownerView, summary)
-
-
-    @inlineCallbacks
-    def declineShare(self):
-        &quot;&quot;&quot;
-        This share is being declined.
-        &quot;&quot;&quot;
-
-        if not self.direct() and self.shareStatus() != _BIND_STATUS_DECLINED:
-            if self.external():
-                yield self._replyExternalInvite(_BIND_STATUS_DECLINED)
-            ownerView = yield self.ownerView()
-            yield ownerView.updateShare(self, status=_BIND_STATUS_DECLINED)
-            if not ownerView.external():
-                yield self._sendReplyNotification(ownerView)
-
-
-    @inlineCallbacks
-    def deleteShare(self):
-        &quot;&quot;&quot;
-        This share is being deleted (by the sharee) - either decline or remove (for direct shares).
-        &quot;&quot;&quot;
-
-        ownerView = yield self.ownerView()
-        if self.direct():
-            yield ownerView.removeShare(self)
-            if ownerView.external():
-                yield self._replyExternalInvite(_BIND_STATUS_DECLINED)
-        else:
-            yield self.declineShare()
-
-
-    @inlineCallbacks
-    def ownerDeleteShare(self):
-        &quot;&quot;&quot;
-        This share is being deleted (by the owner) - either decline or remove (for direct shares).
-        &quot;&quot;&quot;
-
-        # Change status on store object
-        yield self.setShared(False)
-
-        # Remove all sharees (direct and invited)
-        for invitation in (yield self.sharingInvites()):
-            yield self.uninviteUIDFromShare(invitation.shareeUID)
-
-
-    def newShare(self, displayname=None):
-        &quot;&quot;&quot;
-        Override in derived classes to do any specific operations needed when a share
-        is first accepted.
-        &quot;&quot;&quot;
-        return succeed(None)
-
-
-    @inlineCallbacks
-    def allInvitations(self):
-        &quot;&quot;&quot;
-        Get list of all invitations (non-direct) to this object.
-        &quot;&quot;&quot;
-        invitations = yield self.sharingInvites()
-
-        # remove direct shares as those are not &quot;real&quot; invitations
-        invitations = filter(lambda x: x.mode != _BIND_MODE_DIRECT, invitations)
-        invitations.sort(key=lambda invitation: invitation.shareeUID)
-        returnValue(invitations)
-
-
-    @inlineCallbacks
-    def _sendInviteNotification(self, shareeView, notificationState=None):
-        &quot;&quot;&quot;
-        Called on the owner's resource.
-        &quot;&quot;&quot;
-        # When deleting the message is the sharee's display name
-        displayname = shareeView.shareMessage()
-        if notificationState == _BIND_STATUS_DELETED:
-            displayname = str(shareeView.properties().get(PropertyName.fromElement(element.DisplayName), displayname))
-
-        notificationtype = {
-            &quot;notification-type&quot;: &quot;invite-notification&quot;,
-            &quot;shared-type&quot;: shareeView.sharedResourceType(),
-        }
-        notificationdata = {
-            &quot;notification-type&quot;: &quot;invite-notification&quot;,
-            &quot;shared-type&quot;: shareeView.sharedResourceType(),
-            &quot;dtstamp&quot;: DateTime.getNowUTC().getText(),
-            &quot;owner&quot;: shareeView.ownerHome().uid(),
-            &quot;sharee&quot;: shareeView.viewerHome().uid(),
-            &quot;uid&quot;: shareeView.shareUID(),
-            &quot;status&quot;: shareeView.shareStatus() if notificationState is None else notificationState,
-            &quot;access&quot;: (yield shareeView.effectiveShareMode()),
-            &quot;ownerName&quot;: self.shareName(),
-            &quot;summary&quot;: displayname,
-        }
-        if hasattr(self, &quot;getSupportedComponents&quot;):
-            notificationdata[&quot;supported-components&quot;] = self.getSupportedComponents()
-
-        # Add to sharee's collection
-        notifications = yield self._txn.notificationsWithUID(shareeView.viewerHome().uid())
-        yield notifications.writeNotificationObject(shareeView.shareUID(), notificationtype, notificationdata)
-
-
-    @inlineCallbacks
-    def _sendReplyNotification(self, ownerView, summary=None):
-        &quot;&quot;&quot;
-        Create a reply notification based on the current state of this shared resource.
-        &quot;&quot;&quot;
-
-        # Generate invite XML
-        notificationUID = &quot;%s-reply&quot; % (self.shareUID(),)
-
-        notificationtype = {
-            &quot;notification-type&quot;: &quot;invite-reply&quot;,
-            &quot;shared-type&quot;: self.sharedResourceType(),
-        }
-
-        notificationdata = {
-            &quot;notification-type&quot;: &quot;invite-reply&quot;,
-            &quot;shared-type&quot;: self.sharedResourceType(),
-            &quot;dtstamp&quot;: DateTime.getNowUTC().getText(),
-            &quot;owner&quot;: self.ownerHome().uid(),
-            &quot;sharee&quot;: self.viewerHome().uid(),
-            &quot;status&quot;: self.shareStatus(),
-            &quot;ownerName&quot;: ownerView.shareName(),
-            &quot;in-reply-to&quot;: self.shareUID(),
-            &quot;summary&quot;: summary,
-        }
-
-        # Add to owner notification collection
-        notifications = yield self._txn.notificationsWithUID(self.ownerHome().uid())
-        yield notifications.writeNotificationObject(notificationUID, notificationtype, notificationdata)
-
-
-    @inlineCallbacks
-    def _removeInviteNotification(self, shareeView):
-        &quot;&quot;&quot;
-        Called on the owner's resource.
-        &quot;&quot;&quot;
-
-        # Remove from sharee's collection
-        notifications = yield self._txn.notificationsWithUID(shareeView.viewerHome().uid())
-        yield notifications.removeNotificationObjectWithUID(shareeView.shareUID())
-
-
-    #
-    # External/cross-pod API
-    #
-    @inlineCallbacks
-    def _sendExternalInvite(self, shareeView):
-
-        yield self._txn.store().conduit.send_shareinvite(
-            self._txn,
-            shareeView.ownerHome()._homeType,
-            shareeView.ownerHome().uid(),
-            self.id(),
-            self.shareName(),
-            shareeView.viewerHome().uid(),
-            shareeView.shareUID(),
-            shareeView.shareMode(),
-            shareeView.shareMessage(),
-            self.getInviteCopyProperties(),
-            supported_components=self.getSupportedComponents() if hasattr(self, &quot;getSupportedComponents&quot;) else None,
-        )
-
-
-    @inlineCallbacks
-    def _sendExternalUninvite(self, shareeView):
-
-        yield self._txn.store().conduit.send_shareuninvite(
-            self._txn,
-            shareeView.ownerHome()._homeType,
-            shareeView.ownerHome().uid(),
-            self.id(),
-            shareeView.viewerHome().uid(),
-            shareeView.shareUID(),
-        )
-
-
-    @inlineCallbacks
-    def _replyExternalInvite(self, status, summary=None):
-
-        yield self._txn.store().conduit.send_sharereply(
-            self._txn,
-            self.viewerHome()._homeType,
-            self.ownerHome().uid(),
-            self.viewerHome().uid(),
-            self.shareUID(),
-            status,
-            summary,
-        )
-
-
-    #
-    # Lower level API
-    #
-    @inlineCallbacks
-    def ownerView(self):
-        &quot;&quot;&quot;
-        Return the owner resource counterpart of this shared resource.
-
-        Note we have to play a trick with the property store to coerce it to match
-        the per-user properties for the owner.
-        &quot;&quot;&quot;
-        # Get the child of the owner home that has the same resource id as the owned one
-        ownerView = yield self.ownerHome().childWithID(self.id())
-        returnValue(ownerView)
-
-
-    @inlineCallbacks
-    def shareeView(self, shareeUID):
-        &quot;&quot;&quot;
-        Return the shared resource counterpart of this owned resource for the specified sharee.
-
-        Note we have to play a trick with the property store to coerce it to match
-        the per-user properties for the sharee.
-        &quot;&quot;&quot;
-
-        # Never return the owner's own resource
-        if self._home.uid() == shareeUID:
-            returnValue(None)
-
-        # Get the child of the sharee home that has the same resource id as the owned one
-        shareeHome = yield self._txn.homeWithUID(self._home._homeType, shareeUID, authzUID=shareeUID)
-        shareeView = (yield shareeHome.allChildWithID(self.id())) if shareeHome is not None else None
-        returnValue(shareeView)
-
-
-    @inlineCallbacks
-    def shareWithUID(self, shareeUID, mode, status=None, summary=None, shareName=None):
-        &quot;&quot;&quot;
-        Share this (owned) L{CommonHomeChild} with another principal.
-
-        @param shareeUID: The UID of the sharee.
-        @type: L{str}
-
-        @param mode: The sharing mode; L{_BIND_MODE_READ} or
-            L{_BIND_MODE_WRITE} or L{_BIND_MODE_DIRECT}
-        @type mode: L{str}
-
-        @param status: The sharing status; L{_BIND_STATUS_INVITED} or
-            L{_BIND_STATUS_ACCEPTED}
-        @type: L{str}
-
-        @param summary: The proposed message to go along with the share, which
-            will be used as the default display name.
-        @type: L{str}
-
-        @return: the name of the shared calendar in the new calendar home.
-        @rtype: L{str}
-        &quot;&quot;&quot;
-        shareeHome = yield self._txn.calendarHomeWithUID(shareeUID, create=True)
-        returnValue(
-            (yield self.shareWith(shareeHome, mode, status, summary, shareName))
-        )
-
-
-    @inlineCallbacks
-    def shareWith(self, shareeHome, mode, status=None, summary=None, shareName=None):
-        &quot;&quot;&quot;
-        Share this (owned) L{CommonHomeChild} with another home.
-
-        @param shareeHome: The home of the sharee.
-        @type: L{CommonHome}
-
-        @param mode: The sharing mode; L{_BIND_MODE_READ} or
-            L{_BIND_MODE_WRITE} or L{_BIND_MODE_DIRECT}
-        @type: L{str}
-
-        @param status: The sharing status; L{_BIND_STATUS_INVITED} or
-            L{_BIND_STATUS_ACCEPTED}
-        @type: L{str}
-
-        @param summary: The proposed message to go along with the share, which
-            will be used as the default display name.
-        @type: L{str}
-
-        @param shareName: The proposed name of the new share.
-        @type: L{str}
-
-        @return: the name of the shared calendar in the new calendar home.
-        @rtype: L{str}
-        &quot;&quot;&quot;
-
-        if status is None:
-            status = _BIND_STATUS_ACCEPTED
-
-        @inlineCallbacks
-        def doInsert(subt):
-            newName = shareName if shareName is not None else self.newShareName()
-            yield self._bindInsertQuery.on(
-                subt,
-                homeID=shareeHome._resourceID,
-                resourceID=self._resourceID,
-                externalID=self._externalID,
-                name=newName,
-                mode=mode,
-                bindStatus=status,
-                message=summary
-            )
-            returnValue(newName)
-        try:
-            bindName = yield self._txn.subtransaction(doInsert)
-        except AllRetriesFailed:
-            # FIXME: catch more specific exception
-            child = yield shareeHome.allChildWithID(self._resourceID)
-            yield self.updateShare(
-                child, mode=mode, status=status,
-                summary=summary
-            )
-            bindName = child._name
-        else:
-            if status == _BIND_STATUS_ACCEPTED:
-                shareeView = yield shareeHome.anyObjectWithShareUID(bindName)
-                yield shareeView._initSyncToken()
-                yield shareeView._initBindRevision()
-
-        # Mark this as shared
-        yield self.setShared(True)
-
-        # Must send notification to ensure cache invalidation occurs
-        yield self.notifyPropertyChanged()
-        yield shareeHome.notifyChanged()
-
-        returnValue(bindName)
-
-
-    @inlineCallbacks
-    def createShare(self, shareeUID, mode, summary=None, shareName=None):
-        &quot;&quot;&quot;
-        Create a new shared resource. If the mode is direct, the share is created in accepted state,
-        otherwise the share is created in invited state.
-        &quot;&quot;&quot;
-        shareeHome = yield self._txn.homeWithUID(self.ownerHome()._homeType, shareeUID, create=True)
-
-        yield self.shareWith(
-            shareeHome,
-            mode=mode,
-            status=_BIND_STATUS_INVITED if mode != _BIND_MODE_DIRECT else _BIND_STATUS_ACCEPTED,
-            summary=summary,
-            shareName=shareName,
-        )
-        shareeView = yield self.shareeView(shareeUID)
-        returnValue(shareeView)
-
-
-    @inlineCallbacks
-    def updateShare(self, shareeView, mode=None, status=None, summary=None):
-        &quot;&quot;&quot;
-        Update share mode, status, and message for a home child shared with
-        this (owned) L{CommonHomeChild}.
-
-        @param shareeView: The sharee home child that shares this.
-        @type shareeView: L{CommonHomeChild}
-
-        @param mode: The sharing mode; L{_BIND_MODE_READ} or
-            L{_BIND_MODE_WRITE} or None to not update
-        @type mode: L{str}
-
-        @param status: The sharing status; L{_BIND_STATUS_INVITED} or
-            L{_BIND_STATUS_ACCEPTED} or L{_BIND_STATUS_DECLINED} or
-            L{_BIND_STATUS_INVALID}  or None to not update
-        @type status: L{str}
-
-        @param summary: The proposed message to go along with the share, which
-            will be used as the default display name, or None to not update
-        @type summary: L{str}
-        &quot;&quot;&quot;
-        # TODO: raise a nice exception if shareeView is not, in fact, a shared
-        # version of this same L{CommonHomeChild}
-
-        # remove None parameters, and substitute None for empty string
-        bind = self._bindSchema
-        columnMap = {}
-        if mode != None and mode != shareeView._bindMode:
-            columnMap[bind.BIND_MODE] = mode
-        if status != None and status != shareeView._bindStatus:
-            columnMap[bind.BIND_STATUS] = status
-        if summary != None and summary != shareeView._bindMessage:
-            columnMap[bind.MESSAGE] = summary
-
-        if columnMap:
-
-            # Count accepted
-            if bind.BIND_STATUS in columnMap:
-                previouslyAcceptedCount = yield shareeView._previousAcceptCount()
-
-            yield self._updateBindColumnsQuery(columnMap).on(
-                self._txn,
-                resourceID=self._resourceID, homeID=shareeView._home._resourceID
-            )
-
-            # Update affected attributes
-            if bind.BIND_MODE in columnMap:
-                shareeView._bindMode = columnMap[bind.BIND_MODE]
-
-            if bind.BIND_STATUS in columnMap:
-                shareeView._bindStatus = columnMap[bind.BIND_STATUS]
-                yield shareeView._changedStatus(previouslyAcceptedCount)
-
-            if bind.MESSAGE in columnMap:
-                shareeView._bindMessage = columnMap[bind.MESSAGE]
-
-            yield shareeView.invalidateQueryCache()
-
-            # Must send notification to ensure cache invalidation occurs
-            yield self.notifyPropertyChanged()
-            yield shareeView.viewerHome().notifyChanged()
-
-
-    def _previousAcceptCount(self):
-        return succeed(1)
-
-
-    @inlineCallbacks
-    def _changedStatus(self, previouslyAcceptedCount):
-        if self._bindStatus == _BIND_STATUS_ACCEPTED:
-            yield self._initSyncToken()
-            yield self._initBindRevision()
-            self._home._children[self._name] = self
-            self._home._children[self._resourceID] = self
-        elif self._bindStatus in (_BIND_STATUS_INVITED, _BIND_STATUS_DECLINED):
-            yield self._deletedSyncToken(sharedRemoval=True)
-            self._home._children.pop(self._name, None)
-            self._home._children.pop(self._resourceID, None)
-
-
-    @inlineCallbacks
-    def removeShare(self, shareeView):
-        &quot;&quot;&quot;
-        Remove the shared version of this (owned) L{CommonHomeChild} from the
-        referenced L{CommonHome}.
-
-        @see: L{CommonHomeChild.shareWith}
-
-        @param shareeView: The shared resource being removed.
-
-        @return: a L{Deferred} which will fire with the previous shareUID
-        &quot;&quot;&quot;
-
-        # remove sync tokens
-        shareeHome = shareeView.viewerHome()
-        yield shareeView._deletedSyncToken(sharedRemoval=True)
-        shareeHome._children.pop(shareeView._name, None)
-        shareeHome._children.pop(shareeView._resourceID, None)
-
-        # Must send notification to ensure cache invalidation occurs
-        yield self.notifyPropertyChanged()
-        yield shareeHome.notifyChanged()
-
-        # delete binds including invites
-        yield self._deleteBindForResourceIDAndHomeID.on(
-            self._txn,
-            resourceID=self._resourceID,
-            homeID=shareeHome._resourceID,
-        )
-
-        yield shareeView.invalidateQueryCache()
-
-
-    @inlineCallbacks
-    def unshare(self):
-        &quot;&quot;&quot;
-        Unshares a collection, regardless of which &quot;direction&quot; it was shared.
-        &quot;&quot;&quot;
-        if self.owned():
-            # This collection may be shared to others
-            invites = yield self.sharingInvites()
-            for invite in invites:
-                shareeView = yield self.shareeView(invite.shareeUID)
-                yield self.removeShare(shareeView)
-        else:
-            # This collection is shared to me
-            ownerView = yield self.ownerView()
-            yield ownerView.removeShare(self)
-
-
-    @inlineCallbacks
-    def sharingInvites(self):
-        &quot;&quot;&quot;
-        Retrieve the list of all L{SharingInvitation}'s for this L{CommonHomeChild}, irrespective of mode.
-
-        @return: L{SharingInvitation} objects
-        @rtype: a L{Deferred} which fires with a L{list} of L{SharingInvitation}s.
-        &quot;&quot;&quot;
-        if not self.owned():
-            returnValue([])
-
-        # get all accepted binds
-        invitedRows = yield self._sharedInvitationBindForResourceID.on(
-            self._txn, resourceID=self._resourceID, homeID=self._home._resourceID
-        )
-
-        result = []
-        for homeUID, homeRID, _ignore_resourceID, resourceName, bindMode, bindStatus, bindMessage in invitedRows:
-            invite = SharingInvitation(
-                resourceName,
-                self.ownerHome().name(),
-                self.ownerHome().id(),
-                homeUID,
-                homeRID,
-                bindMode,
-                bindStatus,
-                bindMessage,
-            )
-            result.append(invite)
-        returnValue(result)
-
-
-    @inlineCallbacks
-    def _initBindRevision(self):
-        yield self.syncToken() # init self._syncTokenRevision if None
-        self._bindRevision = self._syncTokenRevision
-
-        bind = self._bindSchema
-        yield self._updateBindColumnsQuery(
-            {bind.BIND_REVISION : Parameter(&quot;revision&quot;), }
-        ).on(
-            self._txn,
-            revision=self._bindRevision,
-            resourceID=self._resourceID,
-            homeID=self.viewerHome()._resourceID,
-        )
-        yield self.invalidateQueryCache()
-
-
-    def sharedResourceType(self):
-        &quot;&quot;&quot;
-        The sharing resource type. Needs to be overridden by each type of resource that can be shared.
-
-        @return: an identifier for the type of the share.
-        @rtype: C{str}
-        &quot;&quot;&quot;
-        return &quot;&quot;
-
-
-    def newShareName(self):
-        &quot;&quot;&quot;
-        Name used when creating a new share. By default this is a UUID.
-        &quot;&quot;&quot;
-        return str(uuid4())
-
-
-    def owned(self):
-        &quot;&quot;&quot;
-        @see: L{ICalendar.owned}
-        &quot;&quot;&quot;
-        return self._bindMode == _BIND_MODE_OWN
-
-
-    def isShared(self):
-        &quot;&quot;&quot;
-        For an owned collection indicate whether it is shared.
-
-        @return: C{True} if shared, C{False} otherwise
-        @rtype: C{bool}
-        &quot;&quot;&quot;
-        return self.owned() and self._bindMessage == &quot;shared&quot;
-
-
-    @inlineCallbacks
-    def setShared(self, shared):
-        &quot;&quot;&quot;
-        Set an owned collection to shared or unshared state. Technically this is not useful as &quot;shared&quot;
-        really means it has invitees, but the current sharing spec supports a notion of a shared collection
-        that has not yet had invitees added. For the time being we will support that option by using a new
-        MESSAGE value to indicate an owned collection that is &quot;shared&quot;.
-
-        @param shared: whether or not the owned collection is &quot;shared&quot;
-        @type shared: C{bool}
-        &quot;&quot;&quot;
-        assert self.owned(), &quot;Cannot change share mode on a shared collection&quot;
-
-        # Only if change is needed
-        newMessage = &quot;shared&quot; if shared else None
-        if self._bindMessage == newMessage:
-            returnValue(None)
-
-        self._bindMessage = newMessage
-
-        bind = self._bindSchema
-        yield Update(
-            {bind.MESSAGE: self._bindMessage},
-            Where=(bind.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And(
-                bind.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)),
-        ).on(self._txn, resourceID=self._resourceID, homeID=self.viewerHome()._resourceID)
-
-        yield self.invalidateQueryCache()
-        yield self.notifyPropertyChanged()
-
-
-    def direct(self):
-        &quot;&quot;&quot;
-        Is this a &quot;direct&quot; share?
-
-        @return: a boolean indicating whether it's direct.
-        &quot;&quot;&quot;
-        return self._bindMode == _BIND_MODE_DIRECT
-
-
-    def indirect(self):
-        &quot;&quot;&quot;
-        Is this an &quot;indirect&quot; share?
-
-        @return: a boolean indicating whether it's indirect.
-        &quot;&quot;&quot;
-        return self._bindMode == _BIND_MODE_INDIRECT
-
-
-    def shareUID(self):
-        &quot;&quot;&quot;
-        @see: L{ICalendar.shareUID}
-        &quot;&quot;&quot;
-        return self.name()
-
-
-    def shareMode(self):
-        &quot;&quot;&quot;
-        @see: L{ICalendar.shareMode}
-        &quot;&quot;&quot;
-        return self._bindMode
-
-
-    def _effectiveShareMode(self, bindMode, viewerUID, txn):
-        &quot;&quot;&quot;
-        Get the effective share mode without a calendar object
-        &quot;&quot;&quot;
-        return bindMode
-
-
-    def effectiveShareMode(self):
-        &quot;&quot;&quot;
-        @see: L{ICalendar.shareMode}
-        &quot;&quot;&quot;
-        return self._bindMode
-
-
-    def shareName(self):
-        &quot;&quot;&quot;
-        This is a path like name for the resource within the home being shared. For object resource
-        shares this will be a combination of the L{CommonHomeChild} name and the L{CommonObjecrResource}
-        name. Otherwise it is just the L{CommonHomeChild} name. This is needed to expose a value to the
-        app-layer such that it can construct a URI for the actual WebDAV resource being shared.
-        &quot;&quot;&quot;
-        name = self.name()
-        if self.sharedResourceType() == &quot;group&quot;:
-            name = self.parentCollection().name() + &quot;/&quot; + name
-        return name
-
-
-    def shareStatus(self):
-        &quot;&quot;&quot;
-        @see: L{ICalendar.shareStatus}
-        &quot;&quot;&quot;
-        return self._bindStatus
-
-
-    def accepted(self):
-        &quot;&quot;&quot;
-        @see: L{ICalendar.shareStatus}
-        &quot;&quot;&quot;
-        return self._bindStatus == _BIND_STATUS_ACCEPTED
-
-
-    def shareMessage(self):
-        &quot;&quot;&quot;
-        @see: L{ICalendar.shareMessage}
-        &quot;&quot;&quot;
-        return self._bindMessage
-
-
-    def getInviteCopyProperties(self):
-        &quot;&quot;&quot;
-        Get a dictionary of property name/values (as strings) for properties that are shadowable and
-        need to be copied to a sharee's collection when an external (cross-pod) share is created.
-        Sub-classes should override to expose the properties they care about.
-        &quot;&quot;&quot;
-        return {}
-
-
-    def setInviteCopyProperties(self, props):
-        &quot;&quot;&quot;
-        Copy a set of shadowable properties (as name/value strings) onto this shared resource when
-        a cross-pod invite is processed. Sub-classes should override to expose the properties they
-        care about.
-        &quot;&quot;&quot;
-        pass
-
-
-    @classmethod
-    def metadataColumns(cls):
-        &quot;&quot;&quot;
-        Return a list of column name for retrieval of metadata. This allows
-        different child classes to have their own type specific data, but still make use of the
-        common base logic.
-        &quot;&quot;&quot;
-
-        # Common behavior is to have created and modified
-
-        return (
-            cls._homeChildMetaDataSchema.CREATED,
-            cls._homeChildMetaDataSchema.MODIFIED,
-        )
-
-
-    @classmethod
-    def metadataAttributes(cls):
-        &quot;&quot;&quot;
-        Return a list of attribute names for retrieval of metadata. This allows
-        different child classes to have their own type specific data, but still make use of the
-        common base logic.
-        &quot;&quot;&quot;
-
-        # Common behavior is to have created and modified
-
-        return (
-            &quot;_created&quot;,
-            &quot;_modified&quot;,
-        )
-
-
-    @classmethod
-    def bindColumns(cls):
-        &quot;&quot;&quot;
-        Return a list of column names for retrieval during creation. This allows
-        different child classes to have their own type specific data, but still make use of the
-        common base logic.
-        &quot;&quot;&quot;
-
-        return (
-            cls._bindSchema.BIND_MODE,
-            cls._bindSchema.HOME_RESOURCE_ID,
-            cls._bindSchema.RESOURCE_ID,
-            cls._bindSchema.EXTERNAL_ID,
-            cls._bindSchema.RESOURCE_NAME,
-            cls._bindSchema.BIND_STATUS,
-            cls._bindSchema.BIND_REVISION,
-            cls._bindSchema.MESSAGE
-        )
-
-
-    @classmethod
-    def bindAttributes(cls):
-        &quot;&quot;&quot;
-        Return a list of column names for retrieval during creation. This allows
-        different child classes to have their own type specific data, but still make use of the
-        common base logic.
-        &quot;&quot;&quot;
-
-        return (
-            &quot;_bindMode&quot;,
-            &quot;_homeResourceID&quot;,
-            &quot;_resourceID&quot;,
-            &quot;_externalID&quot;,
-            &quot;_name&quot;,
-            &quot;_bindStatus&quot;,
-            &quot;_bindRevision&quot;,
-            &quot;_bindMessage&quot;,
-        )
-
-    bindColumnCount = 8
-
-    @classmethod
-    def additionalBindColumns(cls):
-        &quot;&quot;&quot;
-        Return a list of column names for retrieval during creation. This allows
-        different child classes to have their own type specific data, but still make use of the
-        common base logic.
-        &quot;&quot;&quot;
-
-        return ()
-
-
-    @classmethod
-    def additionalBindAttributes(cls):
-        &quot;&quot;&quot;
-        Return a list of attribute names for retrieval of during creation. This allows
-        different child classes to have their own type specific data, but still make use of the
-        common base logic.
-        &quot;&quot;&quot;
-
-        return ()
-
-
-    @classproperty
-    def _childrenAndMetadataForHomeID(cls):
-        bind = cls._bindSchema
-        child = cls._homeChildSchema
-        childMetaData = cls._homeChildMetaDataSchema
-
-        columns = cls.bindColumns() + cls.additionalBindColumns() + cls.metadataColumns()
-        return Select(
-            columns,
-            From=child.join(
-                bind, child.RESOURCE_ID == bind.RESOURCE_ID,
-                'left outer').join(
-                    childMetaData, childMetaData.RESOURCE_ID == bind.RESOURCE_ID,
-                    'left outer'),
-            Where=(bind.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)).And(
-                bind.BIND_STATUS == _BIND_STATUS_ACCEPTED)
-        )
-
-
-    @classmethod
-    def _revisionsForResourceIDs(cls, resourceIDs):
-        rev = cls._revisionsSchema
-        return Select(
-            [rev.RESOURCE_ID, Max(rev.REVISION)],
-            From=rev,
-            Where=rev.RESOURCE_ID.In(Parameter(&quot;resourceIDs&quot;, len(resourceIDs))).And(
-                (rev.RESOURCE_NAME != None).Or(rev.DELETED == False)),
-            GroupBy=rev.RESOURCE_ID
-        )
-
-
-    @inlineCallbacks
-    def invalidateQueryCache(self):
-        queryCacher = self._txn._queryCacher
-        if queryCacher is not None:
-            yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForHomeChildMetaData(self._resourceID))
-            yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForObjectWithName(self._home._resourceID, self._name))
-            yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForObjectWithResourceID(self._home._resourceID, self._resourceID))
-            yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForObjectWithExternalID(self._home._resourceID, self._externalID))
-
-
-
</del><span class="cx"> class CommonHomeChild(FancyEqMixin, Memoizable, _SharedSyncLogic, HomeChildBase, SharingMixIn):
</span><span class="cx">     &quot;&quot;&quot;
</span><span class="cx">     Common ancestor class of AddressBooks and Calendars.
</span><span class="lines">@@ -5590,6 +2862,11 @@
</span><span class="cx">     )
</span><span class="cx"> 
</span><span class="cx">     _externalClass = None
</span><ins>+    _homeRecordClass = None
+    _metadataRecordClass = None
+    _bindRecordClass = None
+    _bindHomeIDAttributeName = None
+    _bindResourceIDAttributeName = None
</ins><span class="cx">     _objectResourceClass = None
</span><span class="cx"> 
</span><span class="cx">     _bindSchema = None
</span><span class="lines">@@ -5623,7 +2900,7 @@
</span><span class="cx">         @rtype: L{CommonHomeChild}
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx"> 
</span><del>-        bindMode, _ignore_homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = bindData
</del><ins>+        _ignore_homeID, resourceID, name, bindMode, bindStatus, bindRevision, bindUID, bindMessage = bindData
</ins><span class="cx"> 
</span><span class="cx">         if ownerHome is None:
</span><span class="cx">             if bindMode == _BIND_MODE_OWN:
</span><span class="lines">@@ -5634,7 +2911,7 @@
</span><span class="cx">         else:
</span><span class="cx">             ownerName = None
</span><span class="cx"> 
</span><del>-        c = cls._externalClass if ownerHome.externalClass() else cls
</del><ins>+        c = cls._externalClass if ownerHome and ownerHome.externalClass() else cls
</ins><span class="cx">         child = c(
</span><span class="cx">             home=home,
</span><span class="cx">             name=name,
</span><span class="lines">@@ -5645,7 +2922,7 @@
</span><span class="cx">             message=bindMessage,
</span><span class="cx">             ownerHome=ownerHome,
</span><span class="cx">             ownerName=ownerName,
</span><del>-            externalID=externalID,
</del><ins>+            bindUID=bindUID,
</ins><span class="cx">         )
</span><span class="cx"> 
</span><span class="cx">         if additionalBindData:
</span><span class="lines">@@ -5658,7 +2935,7 @@
</span><span class="cx"> 
</span><span class="cx">         # We have to re-adjust the property store object to account for possible shared
</span><span class="cx">         # collections as previously we loaded them all as if they were owned
</span><del>-        if propstore and bindMode != _BIND_MODE_OWN:
</del><ins>+        if ownerHome and propstore and bindMode != _BIND_MODE_OWN:
</ins><span class="cx">             propstore._setDefaultUserUID(ownerHome.uid())
</span><span class="cx">         yield child._loadPropertyStore(propstore)
</span><span class="cx"> 
</span><span class="lines">@@ -5667,10 +2944,10 @@
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><span class="cx">     @inlineCallbacks
</span><del>-    def _getDBData(cls, home, name, resourceID, externalID):
</del><ins>+    def _getDBData(cls, home, name, resourceID, bindUID):
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Given a set of identifying information, load the data rows for the object. Only one of
</span><del>-        L{name}, L{resourceID} or L{externalID} is specified - others are C{None}.
</del><ins>+        L{name}, L{resourceID} or L{bindUID} is specified - others are C{None}.
</ins><span class="cx"> 
</span><span class="cx">         @param home: the parent home object
</span><span class="cx">         @type home: L{CommonHome}
</span><span class="lines">@@ -5678,8 +2955,8 @@
</span><span class="cx">         @type name: C{str}
</span><span class="cx">         @param resourceID: the resource ID
</span><span class="cx">         @type resourceID: C{int}
</span><del>-        @param externalID: the resource ID of the external (cross-pod) referenced item
-        @type externalID: C{int}
</del><ins>+        @param bindUID: the unique ID of the external (cross-pod) referenced item
+        @type bindUID: C{int}
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx"> 
</span><span class="cx">         # Get the bind row data
</span><span class="lines">@@ -5692,8 +2969,8 @@
</span><span class="cx">                 cacheKey = queryCacher.keyForObjectWithName(home._resourceID, name)
</span><span class="cx">             elif resourceID:
</span><span class="cx">                 cacheKey = queryCacher.keyForObjectWithResourceID(home._resourceID, resourceID)
</span><del>-            elif externalID:
-                cacheKey = queryCacher.keyForObjectWithExternalID(home._resourceID, externalID)
</del><ins>+            elif bindUID:
+                cacheKey = queryCacher.keyForObjectWithBindUID(home._resourceID, bindUID)
</ins><span class="cx">             row = yield queryCacher.get(cacheKey)
</span><span class="cx"> 
</span><span class="cx">         if row is None:
</span><span class="lines">@@ -5702,8 +2979,8 @@
</span><span class="cx">                 rows = yield cls._bindForNameAndHomeID.on(home._txn, name=name, homeID=home._resourceID)
</span><span class="cx">             elif resourceID:
</span><span class="cx">                 rows = yield cls._bindForResourceIDAndHomeID.on(home._txn, resourceID=resourceID, homeID=home._resourceID)
</span><del>-            elif externalID:
-                rows = yield cls._bindForExternalIDAndHomeID.on(home._txn, externalID=externalID, homeID=home._resourceID)
</del><ins>+            elif bindUID:
+                rows = yield cls._bindForBindUIDAndHomeID.on(home._txn, bindUID=bindUID, homeID=home._resourceID)
</ins><span class="cx">             row = rows[0] if rows else None
</span><span class="cx"> 
</span><span class="cx">         if not row:
</span><span class="lines">@@ -5713,7 +2990,7 @@
</span><span class="cx">             # Cache the result
</span><span class="cx">             queryCacher.setAfterCommit(home._txn, queryCacher.keyForObjectWithName(home._resourceID, name), row)
</span><span class="cx">             queryCacher.setAfterCommit(home._txn, queryCacher.keyForObjectWithResourceID(home._resourceID, resourceID), row)
</span><del>-            queryCacher.setAfterCommit(home._txn, queryCacher.keyForObjectWithExternalID(home._resourceID, externalID), row)
</del><ins>+            queryCacher.setAfterCommit(home._txn, queryCacher.keyForObjectWithBindUID(home._resourceID, bindUID), row)
</ins><span class="cx"> 
</span><span class="cx">         bindData = row[:cls.bindColumnCount]
</span><span class="cx">         additionalBindData = row[cls.bindColumnCount:cls.bindColumnCount + len(cls.additionalBindColumns())]
</span><span class="lines">@@ -5736,15 +3013,15 @@
</span><span class="cx">         returnValue((bindData, additionalBindData, metadataData,))
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-    def __init__(self, home, name, resourceID, mode, status, revision=0, message=None, ownerHome=None, ownerName=None, externalID=None):
</del><ins>+    def __init__(self, home, name, resourceID, mode, status, revision=0, message=None, ownerHome=None, ownerName=None, bindUID=None):
</ins><span class="cx"> 
</span><span class="cx">         self._home = home
</span><span class="cx">         self._name = name
</span><span class="cx">         self._resourceID = resourceID
</span><del>-        self._externalID = externalID
</del><span class="cx">         self._bindMode = mode
</span><span class="cx">         self._bindStatus = status
</span><span class="cx">         self._bindRevision = revision
</span><ins>+        self._bindUID = bindUID
</ins><span class="cx">         self._bindMessage = message
</span><span class="cx">         self._ownerHome = home if ownerHome is None else ownerHome
</span><span class="cx">         self._ownerName = name if ownerName is None else ownerName
</span><span class="lines">@@ -5808,9 +3085,10 @@
</span><span class="cx">         # Load from the main table first
</span><span class="cx">         dataRows = (yield cls._childrenAndMetadataForHomeID.on(home._txn, homeID=home._resourceID))
</span><span class="cx"> 
</span><ins>+        resourceID_index = cls.bindColumns().index(cls._bindSchema.RESOURCE_ID)
</ins><span class="cx">         if dataRows:
</span><span class="cx">             # Get property stores
</span><del>-            childResourceIDs = [dataRow[2] for dataRow in dataRows]
</del><ins>+            childResourceIDs = [dataRow[resourceID_index] for dataRow in dataRows]
</ins><span class="cx"> 
</span><span class="cx">             propertyStores = yield PropertyStore.forMultipleResourcesWithResourceIDs(
</span><span class="cx">                 home.uid(), None, None, home._txn, childResourceIDs
</span><span class="lines">@@ -5823,7 +3101,7 @@
</span><span class="cx">         # Create the actual objects merging in properties
</span><span class="cx">         for dataRow in dataRows:
</span><span class="cx">             bindData = dataRow[:cls.bindColumnCount]
</span><del>-            resourceID = bindData[cls.bindColumns().index(cls._bindSchema.RESOURCE_ID)]
</del><ins>+            resourceID = bindData[resourceID_index]
</ins><span class="cx">             additionalBindData = dataRow[cls.bindColumnCount:cls.bindColumnCount + len(cls.additionalBindColumns())]
</span><span class="cx">             metadataData = dataRow[cls.bindColumnCount + len(cls.additionalBindColumns()):]
</span><span class="cx">             propstore = propertyStores.get(resourceID, None)
</span><span class="lines">@@ -5846,13 +3124,13 @@
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><del>-    def objectWithExternalID(cls, home, externalID, accepted=True):
-        return cls.objectWith(home, externalID=externalID, accepted=accepted)
</del><ins>+    def objectWithBindUID(cls, home, bindUID, accepted=True):
+        return cls.objectWith(home, bindUID=bindUID, accepted=accepted)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><span class="cx">     @inlineCallbacks
</span><del>-    def objectWith(cls, home, name=None, resourceID=None, externalID=None, accepted=True):
</del><ins>+    def objectWith(cls, home, name=None, resourceID=None, bindUID=None, accepted=True):
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Create the object using one of the specified arguments as the key to load it. One
</span><span class="cx">         and only one of the keyword arguments must be set.
</span><span class="lines">@@ -5872,7 +3150,7 @@
</span><span class="cx">         @rtype: C{CommonHomeChild}
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx"> 
</span><del>-        dbData = yield cls._getDBData(home, name, resourceID, externalID)
</del><ins>+        dbData = yield cls._getDBData(home, name, resourceID, bindUID)
</ins><span class="cx">         if dbData is None:
</span><span class="cx">             returnValue(None)
</span><span class="cx">         bindData, additionalBindData, metadataData = dbData
</span><span class="lines">@@ -5909,7 +3187,7 @@
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><span class="cx">     @inlineCallbacks
</span><del>-    def create(cls, home, name, externalID=None):
</del><ins>+    def create(cls, home, name, bindUID=None):
</ins><span class="cx"> 
</span><span class="cx">         if (yield cls._bindForNameAndHomeID.on(home._txn, name=name, homeID=home._resourceID)):
</span><span class="cx">             raise HomeChildNameAlreadyExistsError(name)
</span><span class="lines">@@ -5924,7 +3202,7 @@
</span><span class="cx">         _created, _modified = (yield cls._insertHomeChildMetaData.on(home._txn, resourceID=resourceID))[0]
</span><span class="cx">         # Bind table needs entry
</span><span class="cx">         yield cls._bindInsertQuery.on(
</span><del>-            home._txn, homeID=home._resourceID, resourceID=resourceID, externalID=externalID,
</del><ins>+            home._txn, homeID=home._resourceID, resourceID=resourceID, bindUID=bindUID,
</ins><span class="cx">             name=name, mode=_BIND_MODE_OWN, bindStatus=_BIND_STATUS_ACCEPTED,
</span><span class="cx">             message=None,
</span><span class="cx">         )
</span><span class="lines">@@ -5961,15 +3239,6 @@
</span><span class="cx">         return self._resourceID
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-    def external_id(self):
-        &quot;&quot;&quot;
-        Retrieve the external store identifier for this collection.
-
-        @return: a string.
-        &quot;&quot;&quot;
-        return self._externalID
-
-
</del><span class="cx">     def external(self):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Is this an external home.
</span><span class="lines">@@ -5988,7 +3257,7 @@
</span><span class="cx">         return self.ownerHome().externalClass()
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-    def externalize(self):
</del><ins>+    def serialize(self):
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Create a dictionary mapping key attributes so this object can be sent over a cross-pod call
</span><span class="cx">         and reconstituted at the other end. Note that the other end may have a different schema so
</span><span class="lines">@@ -6003,9 +3272,9 @@
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><span class="cx">     @inlineCallbacks
</span><del>-    def internalize(cls, parent, mapping):
</del><ins>+    def deserialize(cls, parent, mapping):
</ins><span class="cx">         &quot;&quot;&quot;
</span><del>-        Given a mapping generated by L{externalize}, convert the values into an array of database
</del><ins>+        Given a mapping generated by L{serialize}, convert the values into an array of database
</ins><span class="cx">         like items that conforms to the ordering of L{_allColumns} so it can be fed into L{makeClass}.
</span><span class="cx">         Note that there may be a schema mismatch with the external data, so treat missing items as
</span><span class="cx">         C{None} and ignore extra items.
</span><span class="lines">@@ -6117,6 +3386,13 @@
</span><span class="cx">         yield self._home.notifyChanged()
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    def purge(self):
+        &quot;&quot;&quot;
+        Do a &quot;silent&quot; removal of this object resource.
+        &quot;&quot;&quot;
+        return self.remove()
+
+
</ins><span class="cx">     def ownerHome(self):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         @see: L{ICalendar.ownerCalendarHome}
</span><span class="lines">@@ -7238,7 +4514,7 @@
</span><span class="cx">         )
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-    def externalize(self):
</del><ins>+    def serialize(self):
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Create a dictionary mapping key attributes so this object can be sent over a cross-pod call
</span><span class="cx">         and reconstituted at the other end. Note that the other end may have a different schema so
</span><span class="lines">@@ -7249,9 +4525,9 @@
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><span class="cx">     @inlineCallbacks
</span><del>-    def internalize(cls, parent, mapping):
</del><ins>+    def deserialize(cls, parent, mapping):
</ins><span class="cx">         &quot;&quot;&quot;
</span><del>-        Given a mapping generated by L{externalize}, convert the values into an array of database
</del><ins>+        Given a mapping generated by L{serialize}, convert the values into an array of database
</ins><span class="cx">         like items that conforms to the ordering of L{_allColumns} so it can be fed into L{makeClass}.
</span><span class="cx">         Note that there may be a schema mismatch with the external data, so treat missing items as
</span><span class="cx">         C{None} and ignore extra items.
</span><span class="lines">@@ -7440,6 +4716,13 @@
</span><span class="cx">         self._cachedComponent = None
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    def purge(self):
+        &quot;&quot;&quot;
+        Do a &quot;silent&quot; removal of this object resource.
+        &quot;&quot;&quot;
+        return self.remove()
+
+
</ins><span class="cx">     def removeNotifyCategory(self):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Indicates what category to use when determining the priority of push
</span><span class="lines">@@ -7506,1045 +4789,3 @@
</span><span class="cx">                 raise ConcurrentModification()
</span><span class="cx">         else:
</span><span class="cx">             returnValue(self._textData)
</span><del>-
-
-
-class NotificationCollection(FancyEqMixin, _SharedSyncLogic):
-    log = Logger()
-
-    implements(INotificationCollection)
-
-    compareAttributes = (
-        &quot;_uid&quot;,
-        &quot;_resourceID&quot;,
-    )
-
-    _revisionsSchema = schema.NOTIFICATION_OBJECT_REVISIONS
-    _homeSchema = schema.NOTIFICATION_HOME
-
-
-    def __init__(self, txn, uid, resourceID):
-
-        self._txn = txn
-        self._uid = uid
-        self._resourceID = resourceID
-        self._dataVersion = None
-        self._notifications = {}
-        self._notificationNames = None
-        self._syncTokenRevision = None
-
-        # Make sure we have push notifications setup to push on this collection
-        # as well as the home it is in
-        self._notifiers = dict([(factory_name, factory.newNotifier(self),) for factory_name, factory in txn._notifierFactories.items()])
-
-    _resourceIDFromUIDQuery = Select(
-        [_homeSchema.RESOURCE_ID], From=_homeSchema,
-        Where=_homeSchema.OWNER_UID == Parameter(&quot;uid&quot;))
-
-    _UIDFromResourceIDQuery = Select(
-        [_homeSchema.OWNER_UID], From=_homeSchema,
-        Where=_homeSchema.RESOURCE_ID == Parameter(&quot;rid&quot;))
-
-    _provisionNewNotificationsQuery = Insert(
-        {_homeSchema.OWNER_UID: Parameter(&quot;uid&quot;)},
-        Return=_homeSchema.RESOURCE_ID
-    )
-
-
-    @property
-    def _home(self):
-        &quot;&quot;&quot;
-        L{NotificationCollection} serves as its own C{_home} for the purposes of
-        working with L{_SharedSyncLogic}.
-        &quot;&quot;&quot;
-        return self
-
-
-    @classmethod
-    @inlineCallbacks
-    def notificationsWithUID(cls, txn, uid, create):
-        &quot;&quot;&quot;
-        @param uid: I'm going to assume uid is utf-8 encoded bytes
-        &quot;&quot;&quot;
-        rows = yield cls._resourceIDFromUIDQuery.on(txn, uid=uid)
-
-        if rows:
-            resourceID = rows[0][0]
-            created = False
-        elif create:
-            # Determine if the user is local or external
-            record = yield txn.directoryService().recordWithUID(uid.decode(&quot;utf-8&quot;))
-            if record is None:
-                raise DirectoryRecordNotFoundError(&quot;Cannot create home for UID since no directory record exists: {}&quot;.format(uid))
-
-            state = _HOME_STATUS_NORMAL if record.thisServer() else _HOME_STATUS_EXTERNAL
-            if state == _HOME_STATUS_EXTERNAL:
-                raise RecordNotAllowedError(&quot;Cannot store notifications for external user: {}&quot;.format(uid))
-
-            # Use savepoint so we can do a partial rollback if there is a race
-            # condition where this row has already been inserted
-            savepoint = SavepointAction(&quot;notificationsWithUID&quot;)
-            yield savepoint.acquire(txn)
-
-            try:
-                resourceID = str((
-                    yield cls._provisionNewNotificationsQuery.on(txn, uid=uid)
-                )[0][0])
-            except Exception:
-                # FIXME: Really want to trap the pg.DatabaseError but in a non-
-                # DB specific manner
-                yield savepoint.rollback(txn)
-
-                # Retry the query - row may exist now, if not re-raise
-                rows = yield cls._resourceIDFromUIDQuery.on(txn, uid=uid)
-                if rows:
-                    resourceID = rows[0][0]
-                    created = False
-                else:
-                    raise
-            else:
-                created = True
-                yield savepoint.release(txn)
-        else:
-            returnValue(None)
-        collection = cls(txn, uid, resourceID)
-        yield collection._loadPropertyStore()
-        if created:
-            yield collection._initSyncToken()
-            yield collection.notifyChanged()
-        returnValue(collection)
-
-
-    @classmethod
-    @inlineCallbacks
-    def notificationsWithResourceID(cls, txn, rid):
-        rows = yield cls._UIDFromResourceIDQuery.on(txn, rid=rid)
-
-        if rows:
-            uid = rows[0][0]
-            result = (yield cls.notificationsWithUID(txn, uid, create=False))
-            returnValue(result)
-        else:
-            returnValue(None)
-
-
-    @inlineCallbacks
-    def _loadPropertyStore(self):
-        self._propertyStore = yield PropertyStore.load(
-            self._uid,
-            self._uid,
-            None,
-            self._txn,
-            self._resourceID,
-            notifyCallback=self.notifyChanged
-        )
-
-
-    def __repr__(self):
-        return &quot;&lt;%s: %s&gt;&quot; % (self.__class__.__name__, self._resourceID)
-
-
-    def id(self):
-        &quot;&quot;&quot;
-        Retrieve the store identifier for this collection.
-
-        @return: store identifier.
-        @rtype: C{int}
-        &quot;&quot;&quot;
-        return self._resourceID
-
-
-    @classproperty
-    def _dataVersionQuery(cls):
-        nh = cls._homeSchema
-        return Select(
-            [nh.DATAVERSION], From=nh,
-            Where=nh.RESOURCE_ID == Parameter(&quot;resourceID&quot;)
-        )
-
-
-    @inlineCallbacks
-    def dataVersion(self):
-        if self._dataVersion is None:
-            self._dataVersion = (yield self._dataVersionQuery.on(
-                self._txn, resourceID=self._resourceID))[0][0]
-        returnValue(self._dataVersion)
-
-
-    def name(self):
-        return &quot;notification&quot;
-
-
-    def uid(self):
-        return self._uid
-
-
-    def owned(self):
-        return True
-
-
-    def ownerHome(self):
-        return self._home
-
-
-    def viewerHome(self):
-        return self._home
-
-
-    @inlineCallbacks
-    def notificationObjects(self):
-        results = (yield NotificationObject.loadAllObjects(self))
-        for result in results:
-            self._notifications[result.uid()] = result
-        self._notificationNames = sorted([result.name() for result in results])
-        returnValue(results)
-
-    _notificationUIDsForHomeQuery = Select(
-        [schema.NOTIFICATION.NOTIFICATION_UID], From=schema.NOTIFICATION,
-        Where=schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID ==
-        Parameter(&quot;resourceID&quot;))
-
-
-    @inlineCallbacks
-    def listNotificationObjects(self):
-        if self._notificationNames is None:
-            rows = yield self._notificationUIDsForHomeQuery.on(
-                self._txn, resourceID=self._resourceID)
-            self._notificationNames = sorted([row[0] for row in rows])
-        returnValue(self._notificationNames)
-
-
-    # used by _SharedSyncLogic.resourceNamesSinceRevision()
-    def listObjectResources(self):
-        return self.listNotificationObjects()
-
-
-    def _nameToUID(self, name):
-        &quot;&quot;&quot;
-        Based on the file-backed implementation, the 'name' is just uid +
-        &quot;.xml&quot;.
-        &quot;&quot;&quot;
-        return name.rsplit(&quot;.&quot;, 1)[0]
-
-
-    def notificationObjectWithName(self, name):
-        return self.notificationObjectWithUID(self._nameToUID(name))
-
-
-    @memoizedKey(&quot;uid&quot;, &quot;_notifications&quot;)
-    @inlineCallbacks
-    def notificationObjectWithUID(self, uid):
-        &quot;&quot;&quot;
-        Create an empty notification object first then have it initialize itself
-        from the store.
-        &quot;&quot;&quot;
-        no = NotificationObject(self, uid)
-        no = (yield no.initFromStore())
-        returnValue(no)
-
-
-    @inlineCallbacks
-    def writeNotificationObject(self, uid, notificationtype, notificationdata):
-
-        inserting = False
-        notificationObject = yield self.notificationObjectWithUID(uid)
-        if notificationObject is None:
-            notificationObject = NotificationObject(self, uid)
-            inserting = True
-        yield notificationObject.setData(uid, notificationtype, notificationdata, inserting=inserting)
-        if inserting:
-            yield self._insertRevision(&quot;%s.xml&quot; % (uid,))
-            if self._notificationNames is not None:
-                self._notificationNames.append(notificationObject.uid())
-        else:
-            yield self._updateRevision(&quot;%s.xml&quot; % (uid,))
-        yield self.notifyChanged()
-
-
-    def removeNotificationObjectWithName(self, name):
-        if self._notificationNames is not None:
-            self._notificationNames.remove(self._nameToUID(name))
-        return self.removeNotificationObjectWithUID(self._nameToUID(name))
-
-    _removeByUIDQuery = Delete(
-        From=schema.NOTIFICATION,
-        Where=(schema.NOTIFICATION.NOTIFICATION_UID == Parameter(&quot;uid&quot;)).And(
-            schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID
-            == Parameter(&quot;resourceID&quot;)))
-
-
-    @inlineCallbacks
-    def removeNotificationObjectWithUID(self, uid):
-        yield self._removeByUIDQuery.on(
-            self._txn, uid=uid, resourceID=self._resourceID)
-        self._notifications.pop(uid, None)
-        yield self._deleteRevision(&quot;%s.xml&quot; % (uid,))
-        yield self.notifyChanged()
-
-    _initSyncTokenQuery = Insert(
-        {
-            _revisionsSchema.HOME_RESOURCE_ID : Parameter(&quot;resourceID&quot;),
-            _revisionsSchema.RESOURCE_NAME    : None,
-            _revisionsSchema.REVISION         : schema.REVISION_SEQ,
-            _revisionsSchema.DELETED          : False
-        }, Return=_revisionsSchema.REVISION
-    )
-
-
-    @inlineCallbacks
-    def _initSyncToken(self):
-        self._syncTokenRevision = (yield self._initSyncTokenQuery.on(
-            self._txn, resourceID=self._resourceID))[0][0]
-
-    _syncTokenQuery = Select(
-        [Max(_revisionsSchema.REVISION)], From=_revisionsSchema,
-        Where=_revisionsSchema.HOME_RESOURCE_ID == Parameter(&quot;resourceID&quot;)
-    )
-
-
-    @inlineCallbacks
-    def syncToken(self):
-        if self._syncTokenRevision is None:
-            self._syncTokenRevision = (
-                yield self._syncTokenQuery.on(
-                    self._txn, resourceID=self._resourceID)
-            )[0][0]
-            if self._syncTokenRevision is None:
-                self._syncTokenRevision = int((yield self._txn.calendarserverValue(&quot;MIN-VALID-REVISION&quot;)))
-        returnValue(&quot;%s_%s&quot; % (self._resourceID, self._syncTokenRevision))
-
-
-    def properties(self):
-        return self._propertyStore
-
-
-    def addNotifier(self, factory_name, notifier):
-        if self._notifiers is None:
-            self._notifiers = {}
-        self._notifiers[factory_name] = notifier
-
-
-    def getNotifier(self, factory_name):
-        return self._notifiers.get(factory_name)
-
-
-    def notifierID(self):
-        return (self._txn._homeClass[self._txn._primaryHomeType]._notifierPrefix, &quot;%s/notification&quot; % (self.ownerHome().uid(),),)
-
-
-    def parentNotifierID(self):
-        return (self._txn._homeClass[self._txn._primaryHomeType]._notifierPrefix, &quot;%s&quot; % (self.ownerHome().uid(),),)
-
-
-    @inlineCallbacks
-    def notifyChanged(self, category=ChangeCategory.default):
-        &quot;&quot;&quot;
-        Send notifications, change sync token and bump last modified because
-        the resource has changed.  We ensure we only do this once per object
-        per transaction.
-        &quot;&quot;&quot;
-        if self._txn.isNotifiedAlready(self):
-            returnValue(None)
-        self._txn.notificationAddedForObject(self)
-
-        # Send notifications
-        if self._notifiers:
-            # cache notifiers run in post commit
-            notifier = self._notifiers.get(&quot;cache&quot;, None)
-            if notifier:
-                self._txn.postCommit(notifier.notify)
-            # push notifiers add their work items immediately
-            notifier = self._notifiers.get(&quot;push&quot;, None)
-            if notifier:
-                yield notifier.notify(self._txn, priority=category.value)
-
-        returnValue(None)
-
-
-    @classproperty
-    def _completelyNewRevisionQuery(cls):
-        rev = cls._revisionsSchema
-        return Insert({rev.HOME_RESOURCE_ID: Parameter(&quot;homeID&quot;),
-                       # rev.RESOURCE_ID: Parameter(&quot;resourceID&quot;),
-                       rev.RESOURCE_NAME: Parameter(&quot;name&quot;),
-                       rev.REVISION: schema.REVISION_SEQ,
-                       rev.DELETED: False},
-                      Return=rev.REVISION)
-
-
-    def _maybeNotify(self):
-        &quot;&quot;&quot;
-        Emit a push notification after C{_changeRevision}.
-        &quot;&quot;&quot;
-        return self.notifyChanged()
-
-
-    @inlineCallbacks
-    def remove(self):
-        &quot;&quot;&quot;
-        Remove DB rows corresponding to this notification home.
-        &quot;&quot;&quot;
-        # Delete NOTIFICATION rows
-        no = schema.NOTIFICATION
-        kwds = {&quot;ResourceID&quot;: self._resourceID}
-        yield Delete(
-            From=no,
-            Where=(
-                no.NOTIFICATION_HOME_RESOURCE_ID == Parameter(&quot;ResourceID&quot;)
-            ),
-        ).on(self._txn, **kwds)
-
-        # Delete NOTIFICATION_HOME (will cascade to NOTIFICATION_OBJECT_REVISIONS)
-        nh = schema.NOTIFICATION_HOME
-        yield Delete(
-            From=nh,
-            Where=(
-                nh.RESOURCE_ID == Parameter(&quot;ResourceID&quot;)
-            ),
-        ).on(self._txn, **kwds)
-
-
-
-class NotificationObject(FancyEqMixin, object):
-    &quot;&quot;&quot;
-    This used to store XML data and an XML element for the type. But we are now switching it
-    to use JSON internally. The app layer will convert that to XML and fill in the &quot;blanks&quot; as
-    needed for the app.
-    &quot;&quot;&quot;
-    log = Logger()
-
-    implements(INotificationObject)
-
-    compareAttributes = (
-        &quot;_resourceID&quot;,
-        &quot;_home&quot;,
-    )
-
-    _objectSchema = schema.NOTIFICATION
-
-    def __init__(self, home, uid):
-        self._home = home
-        self._resourceID = None
-        self._uid = uid
-        self._md5 = None
-        self._size = None
-        self._created = None
-        self._modified = None
-        self._notificationType = None
-        self._notificationData = None
-
-
-    def __repr__(self):
-        return &quot;&lt;%s: %s&gt;&quot; % (self.__class__.__name__, self._resourceID)
-
-
-    @classproperty
-    def _allColumnsByHomeIDQuery(cls):
-        &quot;&quot;&quot;
-        DAL query to load all columns by home ID.
-        &quot;&quot;&quot;
-        obj = cls._objectSchema
-        return Select(
-            [obj.RESOURCE_ID, obj.NOTIFICATION_UID, obj.MD5,
-             Len(obj.NOTIFICATION_DATA), obj.NOTIFICATION_TYPE, obj.CREATED, obj.MODIFIED],
-            From=obj,
-            Where=(obj.NOTIFICATION_HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;))
-        )
-
-
-    @classmethod
-    @inlineCallbacks
-    def loadAllObjects(cls, parent):
-        &quot;&quot;&quot;
-        Load all child objects and return a list of them. This must create the
-        child classes and initialize them using &quot;batched&quot; SQL operations to keep
-        this constant wrt the number of children. This is an optimization for
-        Depth:1 operations on the collection.
-        &quot;&quot;&quot;
-
-        results = []
-
-        # Load from the main table first
-        dataRows = (
-            yield cls._allColumnsByHomeIDQuery.on(parent._txn,
-                                                  homeID=parent._resourceID))
-
-        if dataRows:
-            # Get property stores for all these child resources (if any found)
-            propertyStores = (yield PropertyStore.forMultipleResources(
-                parent.uid(),
-                None,
-                None,
-                parent._txn,
-                schema.NOTIFICATION.RESOURCE_ID,
-                schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID,
-                parent._resourceID,
-            ))
-
-        # Create the actual objects merging in properties
-        for row in dataRows:
-            child = cls(parent, None)
-            (child._resourceID,
-             child._uid,
-             child._md5,
-             child._size,
-             child._notificationType,
-             child._created,
-             child._modified,) = tuple(row)
-            try:
-                child._notificationType = json.loads(child._notificationType)
-            except ValueError:
-                pass
-            if isinstance(child._notificationType, unicode):
-                child._notificationType = child._notificationType.encode(&quot;utf-8&quot;)
-            child._loadPropertyStore(
-                props=propertyStores.get(child._resourceID, None)
-            )
-            results.append(child)
-
-        returnValue(results)
-
-
-    @classproperty
-    def _oneNotificationQuery(cls):
-        no = cls._objectSchema
-        return Select(
-            [
-                no.RESOURCE_ID,
-                no.MD5,
-                Len(no.NOTIFICATION_DATA),
-                no.NOTIFICATION_TYPE,
-                no.CREATED,
-                no.MODIFIED
-            ],
-            From=no,
-            Where=(no.NOTIFICATION_UID ==
-                   Parameter(&quot;uid&quot;)).And(no.NOTIFICATION_HOME_RESOURCE_ID ==
-                                         Parameter(&quot;homeID&quot;)))
-
-
-    @inlineCallbacks
-    def initFromStore(self):
-        &quot;&quot;&quot;
-        Initialise this object from the store, based on its UID and home
-        resource ID. We read in and cache all the extra metadata from the DB to
-        avoid having to do DB queries for those individually later.
-
-        @return: L{self} if object exists in the DB, else C{None}
-        &quot;&quot;&quot;
-        rows = (yield self._oneNotificationQuery.on(
-            self._txn, uid=self._uid, homeID=self._home._resourceID))
-        if rows:
-            (self._resourceID,
-             self._md5,
-             self._size,
-             self._notificationType,
-             self._created,
-             self._modified,) = tuple(rows[0])
-            try:
-                self._notificationType = json.loads(self._notificationType)
-            except ValueError:
-                pass
-            if isinstance(self._notificationType, unicode):
-                self._notificationType = self._notificationType.encode(&quot;utf-8&quot;)
-            self._loadPropertyStore()
-            returnValue(self)
-        else:
-            returnValue(None)
-
-
-    def _loadPropertyStore(self, props=None, created=False):
-        if props is None:
-            props = NonePropertyStore(self._home.uid())
-        self._propertyStore = props
-
-
-    def properties(self):
-        return self._propertyStore
-
-
-    def id(self):
-        &quot;&quot;&quot;
-        Retrieve the store identifier for this object.
-
-        @return: store identifier.
-        @rtype: C{int}
-        &quot;&quot;&quot;
-        return self._resourceID
-
-
-    @property
-    def _txn(self):
-        return self._home._txn
-
-
-    def notificationCollection(self):
-        return self._home
-
-
-    def uid(self):
-        return self._uid
-
-
-    def name(self):
-        return self.uid() + &quot;.xml&quot;
-
-
-    @classproperty
-    def _newNotificationQuery(cls):
-        no = cls._objectSchema
-        return Insert(
-            {
-                no.NOTIFICATION_HOME_RESOURCE_ID: Parameter(&quot;homeID&quot;),
-                no.NOTIFICATION_UID: Parameter(&quot;uid&quot;),
-                no.NOTIFICATION_TYPE: Parameter(&quot;notificationType&quot;),
-                no.NOTIFICATION_DATA: Parameter(&quot;notificationData&quot;),
-                no.MD5: Parameter(&quot;md5&quot;),
-            },
-            Return=[no.RESOURCE_ID, no.CREATED, no.MODIFIED]
-        )
-
-
-    @classproperty
-    def _updateNotificationQuery(cls):
-        no = cls._objectSchema
-        return Update(
-            {
-                no.NOTIFICATION_TYPE: Parameter(&quot;notificationType&quot;),
-                no.NOTIFICATION_DATA: Parameter(&quot;notificationData&quot;),
-                no.MD5: Parameter(&quot;md5&quot;),
-            },
-            Where=(no.NOTIFICATION_HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)).And(
-                no.NOTIFICATION_UID == Parameter(&quot;uid&quot;)),
-            Return=no.MODIFIED
-        )
-
-
-    @inlineCallbacks
-    def setData(self, uid, notificationtype, notificationdata, inserting=False):
-        &quot;&quot;&quot;
-        Set the object resource data and update and cached metadata.
-        &quot;&quot;&quot;
-
-        notificationtext = json.dumps(notificationdata)
-        self._notificationType = notificationtype
-        self._md5 = hashlib.md5(notificationtext).hexdigest()
-        self._size = len(notificationtext)
-        if inserting:
-            rows = yield self._newNotificationQuery.on(
-                self._txn, homeID=self._home._resourceID, uid=uid,
-                notificationType=json.dumps(self._notificationType),
-                notificationData=notificationtext, md5=self._md5
-            )
-            self._resourceID, self._created, self._modified = rows[0]
-            self._loadPropertyStore()
-        else:
-            rows = yield self._updateNotificationQuery.on(
-                self._txn, homeID=self._home._resourceID, uid=uid,
-                notificationType=json.dumps(self._notificationType),
-                notificationData=notificationtext, md5=self._md5
-            )
-            self._modified = rows[0][0]
-        self._notificationData = notificationdata
-
-    _notificationDataFromID = Select(
-        [_objectSchema.NOTIFICATION_DATA], From=_objectSchema,
-        Where=_objectSchema.RESOURCE_ID == Parameter(&quot;resourceID&quot;))
-
-
-    @inlineCallbacks
-    def notificationData(self):
-        if self._notificationData is None:
-            self._notificationData = (yield self._notificationDataFromID.on(self._txn, resourceID=self._resourceID))[0][0]
-            try:
-                self._notificationData = json.loads(self._notificationData)
-            except ValueError:
-                pass
-            if isinstance(self._notificationData, unicode):
-                self._notificationData = self._notificationData.encode(&quot;utf-8&quot;)
-        returnValue(self._notificationData)
-
-
-    def contentType(self):
-        &quot;&quot;&quot;
-        The content type of NotificationObjects is text/xml.
-        &quot;&quot;&quot;
-        return MimeType.fromString(&quot;text/xml&quot;)
-
-
-    def md5(self):
-        return self._md5
-
-
-    def size(self):
-        return self._size
-
-
-    def notificationType(self):
-        return self._notificationType
-
-
-    def created(self):
-        return datetimeMktime(parseSQLTimestamp(self._created))
-
-
-    def modified(self):
-        return datetimeMktime(parseSQLTimestamp(self._modified))
-
-
-
-def determineNewest(uid, homeType):
-    &quot;&quot;&quot;
-    Construct a query to determine the modification time of the newest object
-    in a given home.
-
-    @param uid: the UID of the home to scan.
-    @type uid: C{str}
-
-    @param homeType: The type of home to scan; C{ECALENDARTYPE},
-        C{ENOTIFICATIONTYPE}, or C{EADDRESSBOOKTYPE}.
-    @type homeType: C{int}
-
-    @return: A select query that will return a single row containing a single
-        column which is the maximum value.
-    @rtype: L{Select}
-    &quot;&quot;&quot;
-    if homeType == ENOTIFICATIONTYPE:
-        return Select(
-            [Max(schema.NOTIFICATION.MODIFIED)],
-            From=schema.NOTIFICATION_HOME.join(
-                schema.NOTIFICATION,
-                on=schema.NOTIFICATION_HOME.RESOURCE_ID ==
-                schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID),
-            Where=schema.NOTIFICATION_HOME.OWNER_UID == uid
-        )
-    homeTypeName = {ECALENDARTYPE: &quot;CALENDAR&quot;,
-                    EADDRESSBOOKTYPE: &quot;ADDRESSBOOK&quot;}[homeType]
-    home = getattr(schema, homeTypeName + &quot;_HOME&quot;)
-    bind = getattr(schema, homeTypeName + &quot;_BIND&quot;)
-    child = getattr(schema, homeTypeName)
-    obj = getattr(schema, homeTypeName + &quot;_OBJECT&quot;)
-    return Select(
-        [Max(obj.MODIFIED)],
-        From=home.join(bind, on=bind.HOME_RESOURCE_ID == home.RESOURCE_ID).join(
-            child, on=child.RESOURCE_ID == bind.RESOURCE_ID).join(
-            obj, on=obj.PARENT_RESOURCE_ID == child.RESOURCE_ID),
-        Where=(bind.BIND_MODE == 0).And(home.OWNER_UID == uid)
-    )
-
-
-
-@inlineCallbacks
-def mergeHomes(sqlTxn, one, other, homeType):
-    &quot;&quot;&quot;
-    Merge two homes together.  This determines which of C{one} or C{two} is
-    newer - that is, has been modified more recently - and pulls all the data
-    from the older into the newer home.  Then, it changes the UID of the old
-    home to its UID, normalized and prefixed with &quot;old.&quot;, and then re-names the
-    new home to its name, normalized.
-
-    Because the UIDs of both homes have changed, B{both one and two will be
-    invalid to all other callers from the start of the invocation of this
-    function}.
-
-    @param sqlTxn: the transaction to use
-    @type sqlTxn: A L{CommonTransaction}
-
-    @param one: A calendar home.
-    @type one: L{ICalendarHome}
-
-    @param two: Another, different calendar home.
-    @type two: L{ICalendarHome}
-
-    @param homeType: The type of home to scan; L{ECALENDARTYPE} or
-        L{EADDRESSBOOKTYPE}.
-    @type homeType: C{int}
-
-    @return: a L{Deferred} which fires with with the newer of C{one} or C{two},
-        into which the data from the other home has been merged, when the merge
-        is complete.
-    &quot;&quot;&quot;
-    from txdav.caldav.datastore.util import migrateHome as migrateCalendarHome
-    from txdav.carddav.datastore.util import migrateHome as migrateABHome
-    migrateHome = {EADDRESSBOOKTYPE: migrateABHome,
-                   ECALENDARTYPE: migrateCalendarHome,
-                   ENOTIFICATIONTYPE: _dontBotherWithNotifications}[homeType]
-    homeTable = {EADDRESSBOOKTYPE: schema.ADDRESSBOOK_HOME,
-                 ECALENDARTYPE: schema.CALENDAR_HOME,
-                 ENOTIFICATIONTYPE: schema.NOTIFICATION_HOME}[homeType]
-    both = []
-    both.append([one,
-                 (yield determineNewest(one.uid(), homeType).on(sqlTxn))])
-    both.append([other,
-                 (yield determineNewest(other.uid(), homeType).on(sqlTxn))])
-    both.sort(key=lambda x: x[1])
-
-    older = both[0][0]
-    newer = both[1][0]
-    yield migrateHome(older, newer, merge=True)
-    # Rename the old one to 'old.&lt;correct-guid&gt;'
-    newNormalized = normalizeUUIDOrNot(newer.uid())
-    oldNormalized = normalizeUUIDOrNot(older.uid())
-    yield _renameHome(sqlTxn, homeTable, older.uid(), &quot;old.&quot; + oldNormalized)
-    # Rename the new one to '&lt;correct-guid&gt;'
-    if newer.uid() != newNormalized:
-        yield _renameHome(sqlTxn, homeTable, newer.uid(), newNormalized)
-    yield returnValue(newer)
-
-
-
-def _renameHome(txn, table, oldUID, newUID):
-    &quot;&quot;&quot;
-    Rename a calendar, addressbook, or notification home.  Note that this
-    function is only safe in transactions that have had caching disabled, and
-    more specifically should only ever be used during upgrades.  Running this
-    in a normal transaction will have unpredictable consequences, especially
-    with respect to memcache.
-
-    @param txn: an SQL transaction to use for this update
-    @type txn: L{twext.enterprise.ienterprise.IAsyncTransaction}
-
-    @param table: the storage table of the desired home type
-    @type table: L{TableSyntax}
-
-    @param oldUID: the old UID, the existing home's UID
-    @type oldUID: L{str}
-
-    @param newUID: the new UID, to change the UID to
-    @type newUID: L{str}
-
-    @return: a L{Deferred} which fires when the home is renamed.
-    &quot;&quot;&quot;
-    return Update({table.OWNER_UID: newUID},
-                  Where=table.OWNER_UID == oldUID).on(txn)
-
-
-
-def _dontBotherWithNotifications(older, newer, merge):
-    &quot;&quot;&quot;
-    Notifications are more transient and can be easily worked around; don't
-    bother to migrate all of them when there is a UUID case mismatch.
-    &quot;&quot;&quot;
-    pass
-
-
-
-@inlineCallbacks
-def _normalizeHomeUUIDsIn(t, homeType):
-    &quot;&quot;&quot;
-    Normalize the UUIDs in the given L{txdav.common.datastore.CommonStore}.
-
-    This changes the case of the UUIDs in the calendar home.
-
-    @param t: the transaction to normalize all the UUIDs in.
-    @type t: L{CommonStoreTransaction}
-
-    @param homeType: The type of home to scan, L{ECALENDARTYPE},
-        L{EADDRESSBOOKTYPE}, or L{ENOTIFICATIONTYPE}.
-    @type homeType: C{int}
-
-    @return: a L{Deferred} which fires with C{None} when the UUID normalization
-        is complete.
-    &quot;&quot;&quot;
-    from txdav.caldav.datastore.util import fixOneCalendarHome
-    homeTable = {EADDRESSBOOKTYPE: schema.ADDRESSBOOK_HOME,
-                 ECALENDARTYPE: schema.CALENDAR_HOME,
-                 ENOTIFICATIONTYPE: schema.NOTIFICATION_HOME}[homeType]
-    homeTypeName = homeTable.model.name.split(&quot;_&quot;)[0]
-
-    allUIDs = yield Select([homeTable.OWNER_UID],
-                           From=homeTable,
-                           OrderBy=homeTable.OWNER_UID).on(t)
-    total = len(allUIDs)
-    allElapsed = []
-    for n, [UID] in enumerate(allUIDs):
-        start = time.time()
-        if allElapsed:
-            estimate = &quot;%0.3d&quot; % ((sum(allElapsed) / len(allElapsed)) *
-                                  total - n)
-        else:
-            estimate = &quot;unknown&quot;
-        log.info(
-            &quot;Scanning UID {uid} [{homeType}] &quot;
-            &quot;({pct!0.2d}%, {estimate} seconds remaining)...&quot;,
-            uid=UID, pct=(n / float(total)) * 100, estimate=estimate,
-            homeType=homeTypeName
-        )
-        other = None
-        this = yield _getHome(t, homeType, UID)
-        if homeType == ECALENDARTYPE:
-            fixedThisHome = yield fixOneCalendarHome(this)
-        else:
-            fixedThisHome = 0
-        fixedOtherHome = 0
-        if this is None:
-            log.info(
-                &quot;{uid!r} appears to be missing, already processed&quot;, uid=UID
-            )
-        try:
-            uuidobj = UUID(UID)
-        except ValueError:
-            pass
-        else:
-            newname = str(uuidobj).upper()
-            if UID != newname:
-                log.info(
-                    &quot;Detected case variance: {uid} {newuid}[{homeType}]&quot;,
-                    uid=UID, newuid=newname, homeType=homeTypeName
-                )
-                other = yield _getHome(t, homeType, newname)
-                if other is None:
-                    # No duplicate: just fix the name.
-                    yield _renameHome(t, homeTable, UID, newname)
-                else:
-                    if homeType == ECALENDARTYPE:
-                        fixedOtherHome = yield fixOneCalendarHome(other)
-                    this = yield mergeHomes(t, this, other, homeType)
-                # NOTE: WE MUST NOT TOUCH EITHER HOME OBJECT AFTER THIS POINT.
-                # THE UIDS HAVE CHANGED AND ALL OPERATIONS WILL FAIL.
-
-        end = time.time()
-        elapsed = end - start
-        allElapsed.append(elapsed)
-        log.info(
-            &quot;Scanned UID {uid}; {elapsed} seconds elapsed,&quot;
-            &quot; {fixes} properties fixed ({duplicate} fixes in duplicate).&quot;,
-            uid=UID, elapsed=elapsed, fixes=fixedThisHome,
-            duplicate=fixedOtherHome
-        )
-    returnValue(None)
-
-
-
-def _getHome(txn, homeType, uid):
-    &quot;&quot;&quot;
-    Like L{CommonHome.homeWithUID} but also honoring ENOTIFICATIONTYPE which
-    isn't I{really} a type of home.
-
-    @param txn: the transaction to retrieve the home from
-    @type txn: L{CommonStoreTransaction}
-
-    @param homeType: L{ENOTIFICATIONTYPE}, L{ECALENDARTYPE}, or
-        L{EADDRESSBOOKTYPE}.
-
-    @param uid: the UID of the home to retrieve.
-    @type uid: L{str}
-
-    @return: a L{Deferred} that fires with the L{CommonHome} or
-        L{NotificationHome} when it has been retrieved.
-    &quot;&quot;&quot;
-    if homeType == ENOTIFICATIONTYPE:
-        return txn.notificationsWithUID(uid, create=False)
-    else:
-        return txn.homeWithUID(homeType, uid)
-
-
-
-@inlineCallbacks
-def _normalizeColumnUUIDs(txn, column):
-    &quot;&quot;&quot;
-    Upper-case the UUIDs in the given SQL DAL column.
-
-    @param txn: The transaction.
-    @type txn: L{CommonStoreTransaction}
-
-    @param column: the column, which may contain UIDs, to normalize.
-    @type column: L{ColumnSyntax}
-
-    @return: A L{Deferred} that will fire when the UUID normalization of the
-        given column has completed.
-    &quot;&quot;&quot;
-    tableModel = column.model.table
-    # Get a primary key made of column syntax objects for querying and
-    # comparison later.
-    pkey = [ColumnSyntax(columnModel)
-            for columnModel in tableModel.primaryKey]
-    for row in (yield Select([column] + pkey,
-                             From=TableSyntax(tableModel)).on(txn)):
-        before = row[0]
-        pkeyparts = row[1:]
-        after = normalizeUUIDOrNot(before)
-        if after != before:
-            where = _AndNothing
-            # Build a where clause out of the primary key and the parts of the
-            # primary key that were found.
-            for pkeycol, pkeypart in zip(pkeyparts, pkey):
-                where = where.And(pkeycol == pkeypart)
-            yield Update({column: after}, Where=where).on(txn)
-
-
-
-class _AndNothing(object):
-    &quot;&quot;&quot;
-    Simple placeholder for iteratively generating a 'Where' clause; the 'And'
-    just returns its argument, so it can be used at the start of the loop.
-    &quot;&quot;&quot;
-    @staticmethod
-    def And(self):
-        &quot;&quot;&quot;
-        Return the argument.
-        &quot;&quot;&quot;
-        return self
-
-
-
-@inlineCallbacks
-def _needsNormalizationUpgrade(txn):
-    &quot;&quot;&quot;
-    Determine whether a given store requires a UUID normalization data upgrade.
-
-    @param txn: the transaction to use
-    @type txn: L{CommonStoreTransaction}
-
-    @return: a L{Deferred} that fires with C{True} or C{False} depending on
-        whether we need the normalization upgrade or not.
-    &quot;&quot;&quot;
-    for x in [schema.CALENDAR_HOME, schema.ADDRESSBOOK_HOME,
-              schema.NOTIFICATION_HOME]:
-        slct = Select([x.OWNER_UID], From=x,
-                      Where=x.OWNER_UID != Upper(x.OWNER_UID))
-        rows = yield slct.on(txn)
-        if rows:
-            for [uid] in rows:
-                if normalizeUUIDOrNot(uid) != uid:
-                    returnValue(True)
-    returnValue(False)
-
-
-
-@inlineCallbacks
-def fixUUIDNormalization(store):
-    &quot;&quot;&quot;
-    Fix all UUIDs in the given SQL store to be in a canonical form;
-    00000000-0000-0000-0000-000000000000 format and upper-case.
-    &quot;&quot;&quot;
-    t = store.newTransaction(disableCache=True)
-
-    # First, let's see if there are any calendar, addressbook, or notification
-    # homes that have a de-normalized OWNER_UID.  If there are none, then we can
-    # early-out and avoid the tedious and potentially expensive inspection of
-    # oodles of calendar data.
-    if not (yield _needsNormalizationUpgrade(t)):
-        log.info(&quot;No potentially denormalized UUIDs detected, &quot;
-                 &quot;skipping normalization upgrade.&quot;)
-        yield t.abort()
-        returnValue(None)
-    try:
-        yield _normalizeHomeUUIDsIn(t, ECALENDARTYPE)
-        yield _normalizeHomeUUIDsIn(t, EADDRESSBOOKTYPE)
-        yield _normalizeHomeUUIDsIn(t, ENOTIFICATIONTYPE)
-        yield _normalizeColumnUUIDs(t, schema.RESOURCE_PROPERTY.VIEWER_UID)
-        yield _normalizeColumnUUIDs(t, schema.APN_SUBSCRIPTIONS.SUBSCRIBER_GUID)
-    except:
-        log.failure(&quot;Unable to normalize UUIDs&quot;)
-        yield t.abort()
-        # There's a lot of possible problems here which are very hard to test
-        # for individually; unexpected data that might cause constraint
-        # violations under one of the manipulations done by
-        # normalizeHomeUUIDsIn. Since this upgrade does not come along with a
-        # schema version bump and may be re- attempted at any time, just raise
-        # the exception and log it so that we can try again later, and the
-        # service will survive for everyone _not_ affected by this somewhat
-        # obscure bug.
-    else:
-        yield t.commit()
</del></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_apnpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_apnpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_apn.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_apn.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_apn.py                                (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_apn.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,121 @@
</span><ins>+# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twext.enterprise.dal.record import SerializableRecord, fromTable
+from twext.python.log import Logger
+from twisted.internet.defer import inlineCallbacks
+from txdav.common.datastore.sql_tables import schema
+from txdav.common.icommondatastore import InvalidSubscriptionValues
+
+log = Logger()
+
+&quot;&quot;&quot;
+Classes and methods that relate to APN objects in the SQL store.
+&quot;&quot;&quot;
+
+class APNSubscriptionsRecord(SerializableRecord, fromTable(schema.APN_SUBSCRIPTIONS)):
+    &quot;&quot;&quot;
+    @DynamicAttrs
+    L{Record} for L{schema.APN_SUBSCRIPTIONS}.
+    &quot;&quot;&quot;
+    pass
+
+
+
+class APNSubscriptionsMixin(object):
+    &quot;&quot;&quot;
+    A mixin for L{CommonStoreTransaction} that covers the APN API.
+    &quot;&quot;&quot;
+
+    @inlineCallbacks
+    def addAPNSubscription(
+        self, token, key, timestamp, subscriber,
+        userAgent, ipAddr
+    ):
+        if not (token and key and timestamp and subscriber):
+            raise InvalidSubscriptionValues()
+
+        # Cap these values at 255 characters
+        userAgent = userAgent[:255]
+        ipAddr = ipAddr[:255]
+
+        records = yield APNSubscriptionsRecord.querysimple(
+            self,
+            token=token, resourceKey=key
+        )
+        if not records:  # Subscription does not yet exist
+            try:
+                yield APNSubscriptionsRecord.create(
+                    self,
+                    token=token,
+                    resourceKey=key,
+                    modified=timestamp,
+                    subscriberGUID=subscriber,
+                    userAgent=userAgent,
+                    ipAddr=ipAddr
+                )
+            except Exception:
+                # Subscription may have been added by someone else, which is fine
+                pass
+
+        else:  # Subscription exists, so update with new timestamp and subscriber
+            try:
+                yield records[0].update(
+                    modified=timestamp,
+                    subscriberGUID=subscriber,
+                    userAgent=userAgent,
+                    ipAddr=ipAddr,
+                )
+            except Exception:
+                # Subscription may have been added by someone else, which is fine
+                pass
+
+
+    def removeAPNSubscription(self, token, key):
+        return APNSubscriptionsRecord.deletesimple(
+            self,
+            token=token,
+            resourceKey=key
+        )
+
+
+    def purgeOldAPNSubscriptions(self, olderThan):
+        return APNSubscriptionsRecord.deletesome(
+            self,
+            APNSubscriptionsRecord.modified &lt; olderThan,
+        )
+
+
+    def apnSubscriptionsByToken(self, token):
+        return APNSubscriptionsRecord.querysimple(
+            self,
+            token=token,
+        )
+
+
+    def apnSubscriptionsByKey(self, key):
+        return APNSubscriptionsRecord.querysimple(
+            self,
+            resourceKey=key,
+        )
+
+
+    def apnSubscriptionsBySubscriber(self, guid):
+        return APNSubscriptionsRecord.querysimple(
+            self,
+            subscriberGUID=guid,
+        )
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_directorypyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_directorypy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_directory.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_directory.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_directory.py                                (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_directory.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,848 @@
</span><ins>+# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twext.enterprise.dal.record import SerializableRecord, fromTable
+from twext.enterprise.dal.syntax import SavepointAction, Select
+from twext.python.log import Logger
+from twisted.internet.defer import inlineCallbacks, returnValue
+from txdav.common.datastore.sql_tables import schema
+from txdav.common.icommondatastore import AllRetriesFailed, NotFoundError
+import datetime
+import hashlib
+from txdav.who.delegates import Delegates
+
+log = Logger()
+
+&quot;&quot;&quot;
+Classes and methods that relate to directory objects in the SQL store. e.g.,
+delegates, groups etc
+&quot;&quot;&quot;
+
+class GroupsRecord(SerializableRecord, fromTable(schema.GROUPS)):
+    &quot;&quot;&quot;
+    @DynamicAttrs
+    L{Record} for L{schema.GROUPS}.
+    &quot;&quot;&quot;
+
+    @classmethod
+    def groupsForMember(cls, txn, memberUID):
+
+        return GroupsRecord.query(
+            txn,
+            GroupsRecord.groupID.In(
+                GroupMembershipRecord.queryExpr(
+                    GroupMembershipRecord.memberUID == memberUID.encode(&quot;utf-8&quot;),
+                    attributes=(GroupMembershipRecord.groupID,),
+                )
+            ),
+        )
+
+
+
+class GroupMembershipRecord(SerializableRecord, fromTable(schema.GROUP_MEMBERSHIP)):
+    &quot;&quot;&quot;
+    @DynamicAttrs
+    L{Record} for L{schema.GROUP_MEMBERSHIP}.
+    &quot;&quot;&quot;
+    pass
+
+
+
+class DelegateRecord(SerializableRecord, fromTable(schema.DELEGATES)):
+    &quot;&quot;&quot;
+    @DynamicAttrs
+    L{Record} for L{schema.DELEGATES}.
+    &quot;&quot;&quot;
+    pass
+
+
+
+class DelegateGroupsRecord(SerializableRecord, fromTable(schema.DELEGATE_GROUPS)):
+    &quot;&quot;&quot;
+    @DynamicAttrs
+    L{Record} for L{schema.DELEGATE_GROUPS}.
+    &quot;&quot;&quot;
+
+    @classmethod
+    def allGroupDelegates(cls, txn):
+        &quot;&quot;&quot;
+        Get the directly-delegated-to groups.
+        &quot;&quot;&quot;
+
+        return GroupsRecord.query(
+            txn,
+            GroupsRecord.groupID.In(
+                DelegateGroupsRecord.queryExpr(
+                    None,
+                    attributes=(DelegateGroupsRecord.groupID,),
+                )
+            ),
+        )
+
+
+    @classmethod
+    def delegateGroups(cls, txn, delegator, readWrite):
+        &quot;&quot;&quot;
+        Get the directly-delegated-to groups.
+        &quot;&quot;&quot;
+
+        return GroupsRecord.query(
+            txn,
+            GroupsRecord.groupID.In(
+                DelegateGroupsRecord.queryExpr(
+                    (DelegateGroupsRecord.delegator == delegator.encode(&quot;utf-8&quot;)).And(
+                        DelegateGroupsRecord.readWrite == (1 if readWrite else 0)
+                    ),
+                    attributes=(DelegateGroupsRecord.groupID,),
+                )
+            ),
+        )
+
+
+    @classmethod
+    def indirectDelegators(cls, txn, delegate, readWrite):
+        &quot;&quot;&quot;
+        Get delegators who have delegated to groups the delegate is a member of.
+        &quot;&quot;&quot;
+
+        return cls.query(
+            txn,
+            cls.groupID.In(
+                GroupMembershipRecord.queryExpr(
+                    GroupMembershipRecord.memberUID == delegate.encode(&quot;utf-8&quot;),
+                    attributes=(GroupMembershipRecord.groupID,),
+                )
+            ).And(cls.readWrite == (1 if readWrite else 0)),
+        )
+
+
+    @classmethod
+    def indirectDelegates(cls, txn, delegator, readWrite):
+        &quot;&quot;&quot;
+        Get delegates who are in groups which have been delegated to.
+        &quot;&quot;&quot;
+
+        return GroupMembershipRecord.query(
+            txn,
+            GroupMembershipRecord.groupID.In(
+                DelegateGroupsRecord.queryExpr(
+                    (DelegateGroupsRecord.delegator == delegator.encode(&quot;utf-8&quot;)).And(
+                        DelegateGroupsRecord.readWrite == (1 if readWrite else 0)
+                    ),
+                    attributes=(DelegateGroupsRecord.groupID,),
+                )
+            ),
+        )
+
+
+    @classmethod
+    @inlineCallbacks
+    def delegatorGroups(cls, txn, delegator):
+        &quot;&quot;&quot;
+        Get delegator/group pairs for the specified delegator.
+        &quot;&quot;&quot;
+
+        # Do a join to get what we need
+        rows = yield Select(
+            list(DelegateGroupsRecord.table) + list(GroupsRecord.table),
+            From=DelegateGroupsRecord.table.join(GroupsRecord.table, DelegateGroupsRecord.groupID == GroupsRecord.groupID),
+            Where=(DelegateGroupsRecord.delegator == delegator.encode(&quot;utf-8&quot;))
+        ).on(txn)
+
+        results = []
+        delegatorNames = [DelegateGroupsRecord.__colmap__[column] for column in list(DelegateGroupsRecord.table)]
+        groupsNames = [GroupsRecord.__colmap__[column] for column in list(GroupsRecord.table)]
+        split_point = len(delegatorNames)
+        for row in rows:
+            delegatorRow = row[:split_point]
+            delegatorRecord = DelegateGroupsRecord()
+            delegatorRecord._attributesFromRow(zip(delegatorNames, delegatorRow))
+            delegatorRecord.transaction = txn
+            groupsRow = row[split_point:]
+            groupsRecord = GroupsRecord()
+            groupsRecord._attributesFromRow(zip(groupsNames, groupsRow))
+            groupsRecord.transaction = txn
+            results.append((delegatorRecord, groupsRecord,))
+
+        returnValue(results)
+
+
+
+class ExternalDelegateGroupsRecord(SerializableRecord, fromTable(schema.EXTERNAL_DELEGATE_GROUPS)):
+    &quot;&quot;&quot;
+    @DynamicAttrs
+    L{Record} for L{schema.EXTERNAL_DELEGATE_GROUPS}.
+    &quot;&quot;&quot;
+    pass
+
+
+
+class GroupsAPIMixin(object):
+    &quot;&quot;&quot;
+    A mixin for L{CommonStoreTransaction} that covers the groups API.
+    &quot;&quot;&quot;
+
+    @inlineCallbacks
+    def addGroup(self, groupUID, name, membershipHash):
+        &quot;&quot;&quot;
+        @type groupUID: C{unicode}
+        @type name: C{unicode}
+        @type membershipHash: C{str}
+        &quot;&quot;&quot;
+        record = yield self.directoryService().recordWithUID(groupUID)
+        if record is None:
+            returnValue(None)
+
+        group = yield GroupsRecord.create(
+            self,
+            name=name.encode(&quot;utf-8&quot;),
+            groupUID=groupUID.encode(&quot;utf-8&quot;),
+            membershipHash=membershipHash,
+        )
+
+        yield self.refreshGroup(group, record)
+        returnValue(group)
+
+
+    def updateGroup(self, groupUID, name, membershipHash, extant=True):
+        &quot;&quot;&quot;
+        @type groupUID: C{unicode}
+        @type name: C{unicode}
+        @type membershipHash: C{str}
+        @type extant: C{boolean}
+        &quot;&quot;&quot;
+        timestamp = datetime.datetime.utcnow()
+        group = yield self.groupByUID(groupUID, create=False)
+        if group is not None:
+            yield group.update(
+                name=name.encode(&quot;utf-8&quot;),
+                membershipHash=membershipHash,
+                extant=(1 if extant else 0),
+                modified=timestamp,
+            )
+
+
+    @inlineCallbacks
+    def groupByUID(self, groupUID, create=True):
+        &quot;&quot;&quot;
+        Return or create a record for the group UID.
+
+        @type groupUID: C{unicode}
+
+        @return: Deferred firing with tuple of group ID C{str}, group name
+            C{unicode}, membership hash C{str}, modified timestamp, and
+            extant C{boolean}
+        &quot;&quot;&quot;
+        results = yield GroupsRecord.query(
+            self,
+            GroupsRecord.groupUID == groupUID.encode(&quot;utf-8&quot;)
+        )
+        if results:
+            returnValue(results[0])
+        elif create:
+            savepoint = SavepointAction(&quot;groupByUID&quot;)
+            yield savepoint.acquire(self)
+            try:
+                group = yield self.addGroup(groupUID, u&quot;&quot;, &quot;&quot;)
+                if group is None:
+                    # The record does not actually exist within the directory
+                    yield savepoint.release(self)
+                    returnValue(None)
+
+            except Exception:
+                yield savepoint.rollback(self)
+                results = yield GroupsRecord.query(
+                    self,
+                    GroupsRecord.groupUID == groupUID.encode(&quot;utf-8&quot;)
+                )
+                returnValue(results[0] if results else None)
+            else:
+                yield savepoint.release(self)
+                returnValue(group)
+        else:
+            returnValue(None)
+
+
+    @inlineCallbacks
+    def groupByID(self, groupID):
+        &quot;&quot;&quot;
+        Given a group ID, return the group UID, or raise NotFoundError
+
+        @type groupID: C{str}
+        @return: Deferred firing with a tuple of group UID C{unicode},
+            group name C{unicode}, membership hash C{str}, and extant C{boolean}
+        &quot;&quot;&quot;
+        results = yield GroupsRecord.query(
+            self,
+            GroupsRecord.groupID == groupID,
+        )
+        if results:
+            returnValue(results[0])
+        else:
+            raise NotFoundError
+
+
+
+class GroupCacherAPIMixin(object):
+    &quot;&quot;&quot;
+    A mixin for L{CommonStoreTransaction} that covers the group cacher API.
+    &quot;&quot;&quot;
+
+    def addMemberToGroup(self, memberUID, groupID):
+        return GroupMembershipRecord.create(self, groupID=groupID, memberUID=memberUID.encode(&quot;utf-8&quot;))
+
+
+    def removeMemberFromGroup(self, memberUID, groupID):
+        return GroupMembershipRecord.deletesimple(
+            self, groupID=groupID, memberUID=memberUID.encode(&quot;utf-8&quot;)
+        )
+
+
+    @inlineCallbacks
+    def groupMemberUIDs(self, groupID):
+        &quot;&quot;&quot;
+        Returns the cached set of UIDs for members of the given groupID.
+        Sub-groups are not returned in the results but their members are,
+        because the group membership has already been expanded/flattened
+        before storing in the db.
+
+        @param groupID: the group ID
+        @type groupID: C{int}
+        @return: the set of member UIDs
+        @rtype: a Deferred which fires with a set() of C{str} UIDs
+        &quot;&quot;&quot;
+
+        members = yield GroupMembershipRecord.query(self, GroupMembershipRecord.groupID == groupID)
+        returnValue(set([record.memberUID.decode(&quot;utf-8&quot;) for record in members]))
+
+
+    @inlineCallbacks
+    def refreshGroup(self, group, record):
+        &quot;&quot;&quot;
+        @param group: the group record
+        @type group: L{GroupsRecord}
+        @param record: the directory record
+        @type record: C{iDirectoryRecord}
+
+        @return: Deferred firing with membershipChanged C{boolean}
+
+        &quot;&quot;&quot;
+
+        if record is not None:
+            memberUIDs = yield record.expandedMemberUIDs()
+            name = record.displayName
+            extant = True
+        else:
+            memberUIDs = frozenset()
+            name = group.name
+            extant = False
+
+        membershipHashContent = hashlib.md5()
+        for memberUID in sorted(memberUIDs):
+            membershipHashContent.update(str(memberUID))
+        membershipHash = membershipHashContent.hexdigest()
+
+        if group.membershipHash != membershipHash:
+            membershipChanged = True
+            log.debug(
+                &quot;Group '{group}' changed&quot;, group=name
+            )
+        else:
+            membershipChanged = False
+
+        if membershipChanged or extant != group.extant:
+            # also updates group mod date
+            yield group.update(
+                name=name,
+                membershipHash=membershipHash,
+                extant=(1 if extant else 0),
+            )
+
+        if membershipChanged:
+            addedUIDs, removedUIDs = yield self.synchronizeMembers(group.groupID, set(memberUIDs))
+        else:
+            addedUIDs = removedUIDs = None
+
+        returnValue((membershipChanged, addedUIDs, removedUIDs,))
+
+
+    @inlineCallbacks
+    def synchronizeMembers(self, groupID, newMemberUIDs):
+        &quot;&quot;&quot;
+        Update the group membership table in the database to match the new membership list. This
+        method will diff the existing set with the new set and apply the changes. It also calls out
+        to a groupChanged() method with the set of added and removed members so that other modules
+        that depend on groups can monitor the changes.
+
+        @param groupID: group id of group to update
+        @type groupID: L{str}
+        @param newMemberUIDs: set of new member UIDs in the group
+        @type newMemberUIDs: L{set} of L{str}
+        &quot;&quot;&quot;
+        cachedMemberUIDs = yield self.groupMemberUIDs(groupID)
+
+        removed = cachedMemberUIDs - newMemberUIDs
+        for memberUID in removed:
+            yield self.removeMemberFromGroup(memberUID, groupID)
+
+        added = newMemberUIDs - cachedMemberUIDs
+        for memberUID in added:
+            yield self.addMemberToGroup(memberUID, groupID)
+
+        yield self.groupChanged(groupID, added, removed)
+
+        returnValue((added, removed,))
+
+
+    @inlineCallbacks
+    def groupChanged(self, groupID, addedUIDs, removedUIDs):
+        &quot;&quot;&quot;
+        Called when membership of a group changes.
+
+        @param groupID: group id of group that changed
+        @type groupID: L{str}
+        @param addedUIDs: set of new member UIDs added to the group
+        @type addedUIDs: L{set} of L{str}
+        @param removedUIDs: set of old member UIDs removed from the group
+        @type removedUIDs: L{set} of L{str}
+        &quot;&quot;&quot;
+        yield Delegates.groupChanged(self, groupID, addedUIDs, removedUIDs)
+
+
+    @inlineCallbacks
+    def groupMembers(self, groupID):
+        &quot;&quot;&quot;
+        The members of the given group as recorded in the db
+        &quot;&quot;&quot;
+        members = set()
+        memberUIDs = (yield self.groupMemberUIDs(groupID))
+        for uid in memberUIDs:
+            record = (yield self.directoryService().recordWithUID(uid))
+            if record is not None:
+                members.add(record)
+        returnValue(members)
+
+
+    @inlineCallbacks
+    def groupUIDsFor(self, uid):
+        &quot;&quot;&quot;
+        Returns the cached set of UIDs for the groups this given uid is
+        a member of.
+
+        @param uid: the uid
+        @type uid: C{unicode}
+        @return: the set of group IDs
+        @rtype: a Deferred which fires with a set() of C{int} group IDs
+        &quot;&quot;&quot;
+        groups = yield GroupsRecord.groupsForMember(self, uid)
+        returnValue(set([group.groupUID.decode(&quot;utf-8&quot;) for group in groups]))
+
+
+
+class DelegatesAPIMixin(object):
+    &quot;&quot;&quot;
+    A mixin for L{CommonStoreTransaction} that covers the delegates API.
+    &quot;&quot;&quot;
+
+    @inlineCallbacks
+    def addDelegate(self, delegator, delegate, readWrite):
+        &quot;&quot;&quot;
+        Adds a row to the DELEGATES table.  The delegate should not be a
+        group.  To delegate to a group, call addDelegateGroup() instead.
+
+        @param delegator: the UID of the delegator
+        @type delegator: C{unicode}
+        @param delegate: the UID of the delegate
+        @type delegate: C{unicode}
+        @param readWrite: grant read and write access if True, otherwise
+            read-only access
+        @type readWrite: C{boolean}
+        &quot;&quot;&quot;
+
+        def _addDelegate(subtxn):
+            return DelegateRecord.create(
+                subtxn,
+                delegator=delegator.encode(&quot;utf-8&quot;),
+                delegate=delegate.encode(&quot;utf-8&quot;),
+                readWrite=1 if readWrite else 0
+            )
+
+        try:
+            yield self.subtransaction(_addDelegate, retries=0, failureOK=True)
+        except AllRetriesFailed:
+            pass
+
+
+    @inlineCallbacks
+    def addDelegateGroup(self, delegator, delegateGroupID, readWrite,
+                         isExternal=False):
+        &quot;&quot;&quot;
+        Adds a row to the DELEGATE_GROUPS table.  The delegate should be a
+        group.  To delegate to a person, call addDelegate() instead.
+
+        @param delegator: the UID of the delegator
+        @type delegator: C{unicode}
+        @param delegateGroupID: the GROUP_ID of the delegate group
+        @type delegateGroupID: C{int}
+        @param readWrite: grant read and write access if True, otherwise
+            read-only access
+        @type readWrite: C{boolean}
+        &quot;&quot;&quot;
+
+        def _addDelegateGroup(subtxn):
+            return DelegateGroupsRecord.create(
+                subtxn,
+                delegator=delegator.encode(&quot;utf-8&quot;),
+                groupID=delegateGroupID,
+                readWrite=1 if readWrite else 0,
+                isExternal=1 if isExternal else 0
+            )
+
+        try:
+            yield self.subtransaction(_addDelegateGroup, retries=0, failureOK=True)
+        except AllRetriesFailed:
+            pass
+
+
+    def removeDelegate(self, delegator, delegate, readWrite):
+        &quot;&quot;&quot;
+        Removes a row from the DELEGATES table.  The delegate should not be a
+        group.  To remove a delegate group, call removeDelegateGroup() instead.
+
+        @param delegator: the UID of the delegator
+        @type delegator: C{unicode}
+        @param delegate: the UID of the delegate
+        @type delegate: C{unicode}
+        @param readWrite: remove read and write access if True, otherwise
+            read-only access
+        @type readWrite: C{boolean}
+        &quot;&quot;&quot;
+        return DelegateRecord.deletesimple(
+            self,
+            delegator=delegator.encode(&quot;utf-8&quot;),
+            delegate=delegate.encode(&quot;utf-8&quot;),
+            readWrite=(1 if readWrite else 0),
+        )
+
+
+    def removeDelegates(self, delegator, readWrite):
+        &quot;&quot;&quot;
+        Removes all rows for this delegator/readWrite combination from the
+        DELEGATES table.
+
+        @param delegator: the UID of the delegator
+        @type delegator: C{unicode}
+        @param readWrite: remove read and write access if True, otherwise
+            read-only access
+        @type readWrite: C{boolean}
+        &quot;&quot;&quot;
+        return DelegateRecord.deletesimple(
+            self,
+            delegator=delegator.encode(&quot;utf-8&quot;),
+            readWrite=(1 if readWrite else 0)
+        )
+
+
+    def removeDelegateGroup(self, delegator, delegateGroupID, readWrite):
+        &quot;&quot;&quot;
+        Removes a row from the DELEGATE_GROUPS table.  The delegate should be a
+        group.  To remove a delegate person, call removeDelegate() instead.
+
+        @param delegator: the UID of the delegator
+        @type delegator: C{unicode}
+        @param delegateGroupID: the GROUP_ID of the delegate group
+        @type delegateGroupID: C{int}
+        @param readWrite: remove read and write access if True, otherwise
+            read-only access
+        @type readWrite: C{boolean}
+        &quot;&quot;&quot;
+        return DelegateGroupsRecord.deletesimple(
+            self,
+            delegator=delegator.encode(&quot;utf-8&quot;),
+            groupID=delegateGroupID,
+            readWrite=(1 if readWrite else 0),
+        )
+
+
+    def removeDelegateGroups(self, delegator, readWrite):
+        &quot;&quot;&quot;
+        Removes all rows for this delegator/readWrite combination from the
+        DELEGATE_GROUPS table.
+
+        @param delegator: the UID of the delegator
+        @type delegator: C{unicode}
+        @param readWrite: remove read and write access if True, otherwise
+            read-only access
+        @type readWrite: C{boolean}
+        &quot;&quot;&quot;
+        return DelegateGroupsRecord.deletesimple(
+            self,
+            delegator=delegator.encode(&quot;utf-8&quot;),
+            readWrite=(1 if readWrite else 0),
+        )
+
+
+    @inlineCallbacks
+    def delegates(self, delegator, readWrite, expanded=False):
+        &quot;&quot;&quot;
+        Returns the UIDs of all delegates for the given delegator.  If
+        expanded is False, only the direct delegates (users and groups)
+        are returned.  If expanded is True, the expanded membership is
+        returned, not including the groups themselves.
+
+        @param delegator: the UID of the delegator
+        @type delegator: C{unicode}
+        @param readWrite: the access-type to check for; read and write
+            access if True, otherwise read-only access
+        @type readWrite: C{boolean}
+        @returns: the UIDs of the delegates (for the specified access
+            type)
+        @rtype: a Deferred resulting in a set
+        &quot;&quot;&quot;
+        delegates = set()
+        delegatorU = delegator.encode(&quot;utf-8&quot;)
+
+        # First get the direct delegates
+        results = yield DelegateRecord.query(
+            self,
+            (DelegateRecord.delegator == delegatorU).And(
+                DelegateRecord.readWrite == (1 if readWrite else 0)
+            )
+        )
+        delegates.update([record.delegate.decode(&quot;utf-8&quot;) for record in results])
+
+        if expanded:
+            # Get those who are in groups which have been delegated to
+            results = yield DelegateGroupsRecord.indirectDelegates(
+                self, delegator, readWrite
+            )
+            # Skip the delegator if they are in one of the groups
+            delegates.update([record.memberUID.decode(&quot;utf-8&quot;) for record in results if record.memberUID != delegatorU])
+
+        else:
+            # Get the directly-delegated-to groups
+            results = yield DelegateGroupsRecord.delegateGroups(
+                self, delegator, readWrite,
+            )
+            delegates.update([record.groupUID.decode(&quot;utf-8&quot;) for record in results])
+
+        returnValue(delegates)
+
+
+    @inlineCallbacks
+    def delegators(self, delegate, readWrite):
+        &quot;&quot;&quot;
+        Returns the UIDs of all delegators which have granted access to
+        the given delegate, either directly or indirectly via groups.
+
+        @param delegate: the UID of the delegate
+        @type delegate: C{unicode}
+        @param readWrite: the access-type to check for; read and write
+            access if True, otherwise read-only access
+        @type readWrite: C{boolean}
+        @returns: the UIDs of the delegators (for the specified access
+            type)
+        @rtype: a Deferred resulting in a set
+        &quot;&quot;&quot;
+        delegators = set()
+        delegateU = delegate.encode(&quot;utf-8&quot;)
+
+        # First get the direct delegators
+        results = yield DelegateRecord.query(
+            self,
+            (DelegateRecord.delegate == delegateU).And(
+                DelegateRecord.readWrite == (1 if readWrite else 0)
+            )
+        )
+        delegators.update([record.delegator.decode(&quot;utf-8&quot;) for record in results])
+
+        # Finally get those who have delegated to groups the delegate
+        # is a member of
+        results = yield DelegateGroupsRecord.indirectDelegators(
+            self, delegate, readWrite
+        )
+        # Skip the delegator if they are in one of the groups
+        delegators.update([record.delegator.decode(&quot;utf-8&quot;) for record in results if record.delegator != delegateU])
+
+        returnValue(delegators)
+
+
+    @inlineCallbacks
+    def delegatorsToGroup(self, delegateGroupID, readWrite):
+        &quot;&quot;&quot;
+        Return the UIDs of those who have delegated to the given group with the
+        given access level.
+
+        @param delegateGroupID: the group ID of the delegate group
+        @type delegateGroupID: C{int}
+        @param readWrite: the access-type to check for; read and write
+            access if True, otherwise read-only access
+        @type readWrite: C{boolean}
+        @returns: the UIDs of the delegators (for the specified access
+            type)
+        @rtype: a Deferred resulting in a set
+
+        &quot;&quot;&quot;
+        results = yield DelegateGroupsRecord.query(
+            self,
+            (DelegateGroupsRecord.groupID == delegateGroupID).And(
+                DelegateGroupsRecord.readWrite == (1 if readWrite else 0)
+            )
+        )
+        delegators = set([record.delegator.decode(&quot;utf-8&quot;) for record in results])
+        returnValue(delegators)
+
+
+    @inlineCallbacks
+    def allGroupDelegates(self):
+        &quot;&quot;&quot;
+        Return the UIDs of all groups which have been delegated to.  Useful
+        for obtaining the set of groups which need to be synchronized from
+        the directory.
+
+        @returns: the UIDs of all delegated-to groups
+        @rtype: a Deferred resulting in a set
+        &quot;&quot;&quot;
+
+        results = yield DelegateGroupsRecord.allGroupDelegates(self)
+        delegates = set([record.groupUID.decode(&quot;utf-8&quot;) for record in results])
+
+        returnValue(delegates)
+
+
+    @inlineCallbacks
+    def externalDelegates(self):
+        &quot;&quot;&quot;
+        Returns a dictionary mapping delegate UIDs to (read-group, write-group)
+        tuples, including only those assignments that originated from the
+        directory.
+
+        @returns: dictionary mapping delegator uid to (readDelegateUID,
+            writeDelegateUID) tuples
+        @rtype: a Deferred resulting in a dictionary
+        &quot;&quot;&quot;
+        delegates = {}
+
+        # Get the externally managed delegates (which are all groups)
+        results = yield ExternalDelegateGroupsRecord.all(self)
+        for record in results:
+            delegates[record.delegator.encode(&quot;utf-8&quot;)] = (
+                record.groupUIDRead.encode(&quot;utf-8&quot;) if record.groupUIDRead else None,
+                record.groupUIDWrite.encode(&quot;utf-8&quot;) if record.groupUIDWrite else None
+            )
+
+        returnValue(delegates)
+
+
+    @inlineCallbacks
+    def assignExternalDelegates(
+        self, delegator, readDelegateGroupID, writeDelegateGroupID,
+        readDelegateUID, writeDelegateUID
+    ):
+        &quot;&quot;&quot;
+        Update the external delegate group table so we can quickly identify
+        diffs next time, and update the delegate group table itself
+
+        @param delegator
+        @type delegator: C{UUID}
+        &quot;&quot;&quot;
+
+        # Delete existing external assignments for the delegator
+        yield DelegateGroupsRecord.deletesimple(
+            self,
+            delegator=str(delegator),
+            isExternal=1,
+        )
+
+        # Remove from the external comparison table
+        yield ExternalDelegateGroupsRecord.deletesimple(
+            self,
+            delegator=str(delegator),
+        )
+
+        # Store new assignments in the external comparison table
+        if readDelegateUID or writeDelegateUID:
+            readDelegateForDB = (
+                readDelegateUID.encode(&quot;utf-8&quot;) if readDelegateUID else &quot;&quot;
+            )
+            writeDelegateForDB = (
+                writeDelegateUID.encode(&quot;utf-8&quot;) if writeDelegateUID else &quot;&quot;
+            )
+            yield ExternalDelegateGroupsRecord.create(
+                self,
+                delegator=str(delegator),
+                groupUIDRead=readDelegateForDB,
+                groupUIDWrite=writeDelegateForDB,
+            )
+
+        # Apply new assignments
+        if readDelegateGroupID is not None:
+            yield self.addDelegateGroup(
+                delegator, readDelegateGroupID, False, isExternal=True
+            )
+        if writeDelegateGroupID is not None:
+            yield self.addDelegateGroup(
+                delegator, writeDelegateGroupID, True, isExternal=True
+            )
+
+
+    def dumpIndividualDelegatesLocal(self, delegator):
+        &quot;&quot;&quot;
+        Get the L{DelegateRecord} for all delegates associated with this delegator.
+        &quot;&quot;&quot;
+        return DelegateRecord.querysimple(self, delegator=delegator.encode(&quot;utf-8&quot;))
+
+
+    @inlineCallbacks
+    def dumpIndividualDelegatesExternal(self, delegator):
+        &quot;&quot;&quot;
+        Get the L{DelegateRecord} for all delegates associated with this delegator.
+        &quot;&quot;&quot;
+        raw_results = yield self.store().conduit.send_dump_individual_delegates(self, delegator)
+        returnValue([DelegateRecord.deserialize(row) for row in raw_results])
+
+
+    def dumpGroupDelegatesLocal(self, delegator):
+        &quot;&quot;&quot;
+        Get the L{DelegateGroupsRecord},L{GroupsRecord} for all group delegates associated with this delegator.
+        &quot;&quot;&quot;
+        return DelegateGroupsRecord.delegatorGroups(self, delegator)
+
+
+    @inlineCallbacks
+    def dumpGroupDelegatesExternal(self, delegator):
+        &quot;&quot;&quot;
+        Get the L{DelegateGroupsRecord},L{GroupsRecord} for all delegates associated with this delegator.
+        &quot;&quot;&quot;
+        raw_results = yield self.store().conduit.send_dump_group_delegates(self, delegator)
+        returnValue([(DelegateGroupsRecord.deserialize(row[0]), GroupsRecord.deserialize(row[1]),) for row in raw_results])
+
+
+    def dumpExternalDelegatesLocal(self, delegator):
+        &quot;&quot;&quot;
+        Get the L{ExternalDelegateGroupsRecord} for all delegates associated with this delegator.
+        &quot;&quot;&quot;
+        return ExternalDelegateGroupsRecord.querysimple(self, delegator=delegator.encode(&quot;utf-8&quot;))
+
+
+    @inlineCallbacks
+    def dumpExternalDelegatesExternal(self, delegator):
+        &quot;&quot;&quot;
+        Get the L{ExternalDelegateGroupsRecord} for all delegates associated with this delegator.
+        &quot;&quot;&quot;
+        raw_results = yield self.store().conduit.send_dump_external_delegates(self, delegator)
+        returnValue([ExternalDelegateGroupsRecord.deserialize(row) for row in raw_results])
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_externalpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/sql_external.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_external.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/sql_external.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -26,6 +26,8 @@
</span><span class="cx"> from txdav.base.propertystore.sql import PropertyStore
</span><span class="cx"> from txdav.common.datastore.sql import CommonHome, CommonHomeChild, \
</span><span class="cx">     CommonObjectResource
</span><ins>+from txdav.common.datastore.sql_notification import NotificationCollection, \
+    NotificationObjectRecord
</ins><span class="cx"> from txdav.common.datastore.sql_tables import _HOME_STATUS_EXTERNAL
</span><span class="cx"> from txdav.common.icommondatastore import NonExistentExternalShare, \
</span><span class="cx">     ExternalShareFailed
</span><span class="lines">@@ -40,19 +42,63 @@
</span><span class="cx">     are all stubbed out since no data for the user is actually hosted in this store.
</span><span class="cx">     &quot;&quot;&quot;
</span><span class="cx"> 
</span><del>-    def __init__(self, transaction, ownerUID, resourceID):
-        super(CommonHomeExternal, self).__init__(transaction, ownerUID)
-        self._resourceID = resourceID
-        self._status = _HOME_STATUS_EXTERNAL
</del><ins>+    @classmethod
+    def makeSyntheticExternalHome(cls, transaction, diruid, resourceID):
+        &quot;&quot;&quot;
+        During migration we need to refer to the remote home as an external home but without have a local representation
+        of it in the store. There will be a new local store home for the migrating user that will operate on local store
+        objects. The synthetic home operates only on remote objects.
</ins><span class="cx"> 
</span><ins>+        @param diruid: directory UID of user
+        @type diruid: L{str}
+        @param resourceID: resource ID in the remote store
+        @type resourceID: L{int}
+        &quot;&quot;&quot;
+        attrMap = {
+            &quot;_resourceID&quot;: resourceID,
+            &quot;_ownerUID&quot;: diruid,
+            &quot;_status&quot;: _HOME_STATUS_EXTERNAL,
+        }
+        homeData = [attrMap.get(attr) for attr in cls.homeAttributes()]
+        result = cls(transaction, homeData)
+        result._childClass = result._childClass._externalClass
+        return result
</ins><span class="cx"> 
</span><del>-    def initFromStore(self, no_cache=False):
</del><ins>+
+    def __init__(self, transaction, homeData):
+        super(CommonHomeExternal, self).__init__(transaction, homeData)
+
+
+    def initFromStore(self):
</ins><span class="cx">         &quot;&quot;&quot;
</span><del>-        Never called - this should be done by CommonHome.initFromStore only.
</del><ins>+        NoOp for an external share as there is no metadata or properties.
</ins><span class="cx">         &quot;&quot;&quot;
</span><del>-        raise AssertionError(&quot;CommonHomeExternal: not supported&quot;)
</del><ins>+        return succeed(self)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    @inlineCallbacks
+    def readMetaData(self):
+        &quot;&quot;&quot;
+        Read the home metadata from remote home and save as attributes on this object.
+        &quot;&quot;&quot;
+        mapping = yield self._txn.store().conduit.send_home_metadata(self)
+        self.deserialize(mapping)
+
+
+    def setStatus(self, newStatus):
+        return self._txn.store().conduit.send_home_set_status(self, newStatus)
+
+
+    def setLocalStatus(self, newStatus):
+        &quot;&quot;&quot;
+        Set the status on the object in the local store not the remote one.
+
+        @param newStatus: the new status to set
+        @type newStatus: L{int}
+        &quot;&quot;&quot;
+        return super(CommonHomeExternal, self).setStatus(newStatus)
+
+
</ins><span class="cx">     def external(self):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Is this an external home.
</span><span class="lines">@@ -78,13 +124,13 @@
</span><span class="cx"> 
</span><span class="cx">     @memoizedKey(&quot;name&quot;, &quot;_children&quot;)
</span><span class="cx">     @inlineCallbacks
</span><del>-    def createChildWithName(self, name, externalID=None):
</del><ins>+    def createChildWithName(self, name, bindUID=None):
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         No real children - only external ones.
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        if externalID is None:
</del><ins>+        if bindUID is None:
</ins><span class="cx">             raise AssertionError(&quot;CommonHomeExternal: not supported&quot;)
</span><del>-        child = yield super(CommonHomeExternal, self).createChildWithName(name, externalID)
</del><ins>+        child = yield super(CommonHomeExternal, self).createChildWithName(name, bindUID)
</ins><span class="cx">         returnValue(child)
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="lines">@@ -101,7 +147,7 @@
</span><span class="cx">         Remove an external child. Check that it is invalid or unused before calling this because if there
</span><span class="cx">         are valid references to it, removing will break things.
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        if child._externalID is None:
</del><ins>+        if child._bindUID is None:
</ins><span class="cx">             raise AssertionError(&quot;CommonHomeExternal: not supported&quot;)
</span><span class="cx">         yield super(CommonHomeExternal, self).removeChildWithName(child.name())
</span><span class="cx"> 
</span><span class="lines">@@ -175,11 +221,17 @@
</span><span class="cx">         raise AssertionError(&quot;CommonHomeExternal: not supported&quot;)
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-#    def ownerHomeAndChildNameForChildID(self, resourceID):
-#        &quot;&quot;&quot;
-#        No children.
-#        &quot;&quot;&quot;
-#        raise AssertionError(&quot;CommonHomeExternal: not supported&quot;)
</del><ins>+    @inlineCallbacks
+    def sharedToBindRecords(self):
+        results = yield self._txn.store().conduit.send_home_shared_to_records(self)
+        returnValue(dict([(
+            k,
+            (
+                self._childClass._bindRecordClass.deserialize(v[0]),
+                self._childClass._bindRecordClass.deserialize(v[1]),
+                self._childClass._metadataRecordClass.deserialize(v[2]),
+            ),
+        ) for k, v in results.items()]))
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="lines">@@ -190,7 +242,6 @@
</span><span class="cx">     &quot;&quot;&quot;
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><del>-    @inlineCallbacks
</del><span class="cx">     def listObjects(cls, home):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Retrieve the names of the children that exist in the given home.
</span><span class="lines">@@ -198,8 +249,7 @@
</span><span class="cx">         @return: an iterable of C{str}s.
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx"> 
</span><del>-        results = yield home._txn.store().conduit.send_homechild_listobjects(home)
-        returnValue(results)
</del><ins>+        return home._txn.store().conduit.send_homechild_listobjects(home)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><span class="lines">@@ -209,18 +259,18 @@
</span><span class="cx"> 
</span><span class="cx">         results = []
</span><span class="cx">         for mapping in raw_results:
</span><del>-            child = yield cls.internalize(home, mapping)
</del><ins>+            child = yield cls.deserialize(home, mapping)
</ins><span class="cx">             results.append(child)
</span><span class="cx">         returnValue(results)
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><span class="cx">     @inlineCallbacks
</span><del>-    def objectWith(cls, home, name=None, resourceID=None, externalID=None, accepted=True):
-        mapping = yield home._txn.store().conduit.send_homechild_objectwith(home, name, resourceID, externalID, accepted)
</del><ins>+    def objectWith(cls, home, name=None, resourceID=None, bindUID=None, accepted=True):
+        mapping = yield home._txn.store().conduit.send_homechild_objectwith(home, name, resourceID, bindUID, accepted)
</ins><span class="cx"> 
</span><span class="cx">         if mapping:
</span><del>-            child = yield cls.internalize(home, mapping)
</del><ins>+            child = yield cls.deserialize(home, mapping)
</ins><span class="cx">             returnValue(child)
</span><span class="cx">         else:
</span><span class="cx">             returnValue(None)
</span><span class="lines">@@ -310,15 +360,14 @@
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><del>-    def syncToken(self):
</del><ins>+    def syncTokenRevision(self):
</ins><span class="cx">         if self._syncTokenRevision is None:
</span><span class="cx">             try:
</span><del>-                token = yield self._txn.store().conduit.send_homechild_synctoken(self)
-                self._syncTokenRevision = self.revisionFromToken(token)
</del><ins>+                revision = yield self._txn.store().conduit.send_homechild_synctokenrevision(self)
</ins><span class="cx">             except NonExistentExternalShare:
</span><span class="cx">                 yield self.fixNonExistentExternalShare()
</span><span class="cx">                 raise ExternalShareFailed(&quot;External share does not exist&quot;)
</span><del>-        returnValue((&quot;%s_%s&quot; % (self._externalID, self._syncTokenRevision,)))
</del><ins>+        returnValue(revision)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="lines">@@ -343,7 +392,17 @@
</span><span class="cx">         returnValue(results)
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    @inlineCallbacks
+    def sharingBindRecords(self):
+        results = yield self._txn.store().conduit.send_homechild_sharing_records(self)
+        returnValue(dict([(k, self._bindRecordClass.deserialize(v),) for k, v in results.items()]))
</ins><span class="cx"> 
</span><ins>+
+    def migrateBindRecords(self, bindUID):
+        return self._txn.store().conduit.send_homechild_migrate_sharing_records(self, bindUID)
+
+
+
</ins><span class="cx"> class CommonObjectResourceExternal(CommonObjectResource):
</span><span class="cx">     &quot;&quot;&quot;
</span><span class="cx">     A CommonObjectResource for a resource not hosted on this system, but on another pod. This will forward
</span><span class="lines">@@ -358,7 +417,7 @@
</span><span class="cx">         results = []
</span><span class="cx">         if mapping_list:
</span><span class="cx">             for mapping in mapping_list:
</span><del>-                child = yield cls.internalize(parent, mapping)
</del><ins>+                child = yield cls.deserialize(parent, mapping)
</ins><span class="cx">                 results.append(child)
</span><span class="cx">         returnValue(results)
</span><span class="cx"> 
</span><span class="lines">@@ -371,23 +430,19 @@
</span><span class="cx">         results = []
</span><span class="cx">         if mapping_list:
</span><span class="cx">             for mapping in mapping_list:
</span><del>-                child = yield cls.internalize(parent, mapping)
</del><ins>+                child = yield cls.deserialize(parent, mapping)
</ins><span class="cx">                 results.append(child)
</span><span class="cx">         returnValue(results)
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><del>-    @inlineCallbacks
</del><span class="cx">     def listObjects(cls, parent):
</span><del>-        results = yield parent._txn.store().conduit.send_objectresource_listobjects(parent)
-        returnValue(results)
</del><ins>+        return parent._txn.store().conduit.send_objectresource_listobjects(parent)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><del>-    @inlineCallbacks
</del><span class="cx">     def countObjects(cls, parent):
</span><del>-        result = yield parent._txn.store().conduit.send_objectresource_countobjects(parent)
-        returnValue(result)
</del><ins>+        return parent._txn.store().conduit.send_objectresource_countobjects(parent)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><span class="lines">@@ -396,24 +451,20 @@
</span><span class="cx">         mapping = yield parent._txn.store().conduit.send_objectresource_objectwith(parent, name, uid, resourceID)
</span><span class="cx"> 
</span><span class="cx">         if mapping:
</span><del>-            child = yield cls.internalize(parent, mapping)
</del><ins>+            child = yield cls.deserialize(parent, mapping)
</ins><span class="cx">             returnValue(child)
</span><span class="cx">         else:
</span><span class="cx">             returnValue(None)
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><del>-    @inlineCallbacks
</del><span class="cx">     def resourceNameForUID(cls, parent, uid):
</span><del>-        result = yield parent._txn.store().conduit.send_objectresource_resourcenameforuid(parent, uid)
-        returnValue(result)
</del><ins>+        return parent._txn.store().conduit.send_objectresource_resourcenameforuid(parent, uid)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><del>-    @inlineCallbacks
</del><span class="cx">     def resourceUIDForName(cls, parent, name):
</span><del>-        result = yield parent._txn.store().conduit.send_objectresource_resourceuidforname(parent, name)
-        returnValue(result)
</del><ins>+        return parent._txn.store().conduit.send_objectresource_resourceuidforname(parent, name)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><span class="lines">@@ -422,7 +473,7 @@
</span><span class="cx">         mapping = yield parent._txn.store().conduit.send_objectresource_create(parent, name, str(component), options=options)
</span><span class="cx"> 
</span><span class="cx">         if mapping:
</span><del>-            child = yield cls.internalize(parent, mapping)
</del><ins>+            child = yield cls.deserialize(parent, mapping)
</ins><span class="cx">             returnValue(child)
</span><span class="cx">         else:
</span><span class="cx">             returnValue(None)
</span><span class="lines">@@ -444,6 +495,46 @@
</span><span class="cx">         returnValue(self._cachedComponent)
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    def remove(self):
+        return self._txn.store().conduit.send_objectresource_remove(self)
+
+
+
+class NotificationCollectionExternal(NotificationCollection):
+    &quot;&quot;&quot;
+    A NotificationCollection for a resource not hosted on this system, but on another pod. This will forward
+    specific apis to the other pod using cross-pod requests.
+    &quot;&quot;&quot;
+
+    @classmethod
+    def notificationsWithUID(cls, txn, uid, create=False):
+        return super(NotificationCollectionExternal, cls).notificationsWithUID(txn, uid, status=_HOME_STATUS_EXTERNAL, create=create)
+
+
+    def initFromStore(self):
+        &quot;&quot;&quot;
+        NoOp for an external share as there are no properties.
+        &quot;&quot;&quot;
+        return succeed(self)
+
+
</ins><span class="cx">     @inlineCallbacks
</span><del>-    def remove(self):
-        yield self._txn.store().conduit.send_objectresource_remove(self)
</del><ins>+    def notificationObjectRecords(self):
+        results = yield self._txn.store().conduit.send_notification_all_records(self)
+        returnValue(map(NotificationObjectRecord.deserialize, results))
+
+
+    def setStatus(self, newStatus):
+        return self._txn.store().conduit.send_notification_set_status(self, newStatus)
+
+
+    def setLocalStatus(self, newStatus):
+        &quot;&quot;&quot;
+        Set the status on the object in the local store not the remote one.
+
+        @param newStatus: the new status to set
+        @type newStatus: L{int}
+        &quot;&quot;&quot;
+        return super(NotificationCollectionExternal, self).setStatus(newStatus)
+
+NotificationCollection._externalClass = NotificationCollectionExternal
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_imippyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_imippy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_imip.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_imip.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_imip.py                                (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_imip.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,102 @@
</span><ins>+# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twext.enterprise.dal.record import SerializableRecord, fromTable
+from twext.enterprise.dal.syntax import utcNowSQL
+from twext.python.log import Logger
+from twisted.internet.defer import inlineCallbacks, returnValue
+from txdav.common.datastore.sql_tables import schema
+from txdav.common.icommondatastore import InvalidIMIPTokenValues
+from uuid import uuid4
+
+log = Logger()
+
+&quot;&quot;&quot;
+Classes and methods that relate to iMIP objects in the SQL store.
+&quot;&quot;&quot;
+
+class iMIPTokensRecord(SerializableRecord, fromTable(schema.IMIP_TOKENS)):
+    &quot;&quot;&quot;
+    @DynamicAttrs
+    L{Record} for L{schema.IMIP_TOKENS}.
+    &quot;&quot;&quot;
+    pass
+
+
+
+class imipAPIMixin(object):
+    &quot;&quot;&quot;
+    A mixin for L{CommonStoreTransaction} that covers the iMIP API.
+    &quot;&quot;&quot;
+
+    # Create IMIP token
+    @inlineCallbacks
+    def imipCreateToken(self, organizer, attendee, icaluid, token=None):
+        if not (organizer and attendee and icaluid):
+            raise InvalidIMIPTokenValues()
+
+        if token is None:
+            token = str(uuid4())
+
+        try:
+            record = yield iMIPTokensRecord.create(
+                self,
+                token=token,
+                organizer=organizer,
+                attendee=attendee,
+                icaluid=icaluid
+            )
+        except Exception:
+            # TODO: is it okay if someone else created the same row just now?
+            record = yield self.imipGetToken(organizer, attendee, icaluid)
+        returnValue(record)
+
+
+    # Lookup IMIP organizer+attendee+icaluid for token
+    def imipLookupByToken(self, token):
+        return iMIPTokensRecord.querysimple(self, token=token)
+
+
+    # Lookup IMIP token for organizer+attendee+icaluid
+    @inlineCallbacks
+    def imipGetToken(self, organizer, attendee, icaluid):
+        records = yield iMIPTokensRecord.querysimple(
+            self,
+            organizer=organizer,
+            attendee=attendee,
+            icaluid=icaluid,
+        )
+        if records:
+            # update the timestamp
+            record = records[0]
+            yield record.update(accessed=utcNowSQL)
+        else:
+            record = None
+        returnValue(record)
+
+
+    # Remove IMIP token
+    def imipRemoveToken(self, token):
+        return iMIPTokensRecord.deletesimple(self, token=token)
+
+
+    # Purge old IMIP tokens
+    def purgeOldIMIPTokens(self, olderThan):
+        &quot;&quot;&quot;
+        @type olderThan: datetime
+        &quot;&quot;&quot;
+        return iMIPTokensRecord.delete(self, iMIPTokensRecord.accessed &lt; olderThan)
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_notificationpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_notificationpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_notification.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_notification.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_notification.py                                (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_notification.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,884 @@
</span><ins>+# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twext.enterprise.dal.record import SerializableRecord, fromTable
+from twext.enterprise.dal.syntax import Select, Parameter, Insert, \
+    SavepointAction, Delete, Max, Len, Update
+from twext.enterprise.util import parseSQLTimestamp
+from twext.internet.decorate import memoizedKey
+from twext.python.clsprop import classproperty
+from twext.python.log import Logger
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.python.util import FancyEqMixin
+from twistedcaldav.dateops import datetimeMktime
+from txdav.base.propertystore.sql import PropertyStore
+from txdav.common.datastore.sql_tables import schema, _HOME_STATUS_NORMAL, \
+    _HOME_STATUS_EXTERNAL, _HOME_STATUS_DISABLED, _HOME_STATUS_MIGRATING
+from txdav.common.datastore.sql_util import _SharedSyncLogic
+from txdav.common.icommondatastore import RecordNotAllowedError
+from txdav.common.idirectoryservice import DirectoryRecordNotFoundError
+from txdav.common.inotifications import INotificationCollection, \
+    INotificationObject
+from txdav.idav import ChangeCategory
+from txweb2.dav.noneprops import NonePropertyStore
+from txweb2.http_headers import MimeType
+from zope.interface.declarations import implements
+import hashlib
+import json
+
+&quot;&quot;&quot;
+Classes and methods that relate to the Notification collection in the SQL store.
+&quot;&quot;&quot;
+class NotificationCollection(FancyEqMixin, _SharedSyncLogic):
+    log = Logger()
+
+    implements(INotificationCollection)
+
+    compareAttributes = (
+        &quot;_ownerUID&quot;,
+        &quot;_resourceID&quot;,
+    )
+
+    _revisionsSchema = schema.NOTIFICATION_OBJECT_REVISIONS
+    _homeSchema = schema.NOTIFICATION_HOME
+
+    _externalClass = None
+
+
+    @classmethod
+    def makeClass(cls, transaction, homeData):
+        &quot;&quot;&quot;
+        Build the actual home class taking into account the possibility that we might need to
+        switch in the external version of the class.
+
+        @param transaction: transaction
+        @type transaction: L{CommonStoreTransaction}
+        @param homeData: home table column data
+        @type homeData: C{list}
+        &quot;&quot;&quot;
+
+        status = homeData[cls.homeColumns().index(cls._homeSchema.STATUS)]
+        if status == _HOME_STATUS_EXTERNAL:
+            home = cls._externalClass(transaction, homeData)
+        else:
+            home = cls(transaction, homeData)
+        return home.initFromStore()
+
+
+    @classmethod
+    def homeColumns(cls):
+        &quot;&quot;&quot;
+        Return a list of column names to retrieve when doing an ownerUID-&gt;home lookup.
+        &quot;&quot;&quot;
+
+        # Common behavior is to have created and modified
+
+        return (
+            cls._homeSchema.RESOURCE_ID,
+            cls._homeSchema.OWNER_UID,
+            cls._homeSchema.STATUS,
+        )
+
+
+    @classmethod
+    def homeAttributes(cls):
+        &quot;&quot;&quot;
+        Return a list of attributes names to map L{homeColumns} to.
+        &quot;&quot;&quot;
+
+        # Common behavior is to have created and modified
+
+        return (
+            &quot;_resourceID&quot;,
+            &quot;_ownerUID&quot;,
+            &quot;_status&quot;,
+        )
+
+
+    def __init__(self, txn, homeData):
+
+        self._txn = txn
+
+        for attr, value in zip(self.homeAttributes(), homeData):
+            setattr(self, attr, value)
+
+        self._txn = txn
+        self._dataVersion = None
+        self._notifications = {}
+        self._notificationNames = None
+        self._syncTokenRevision = None
+
+        # Make sure we have push notifications setup to push on this collection
+        # as well as the home it is in
+        self._notifiers = dict([(factory_name, factory.newNotifier(self),) for factory_name, factory in txn._notifierFactories.items()])
+
+
+    @inlineCallbacks
+    def initFromStore(self):
+        &quot;&quot;&quot;
+        Initialize this object from the store.
+        &quot;&quot;&quot;
+
+        yield self._loadPropertyStore()
+        returnValue(self)
+
+
+    @property
+    def _home(self):
+        &quot;&quot;&quot;
+        L{NotificationCollection} serves as its own C{_home} for the purposes of
+        working with L{_SharedSyncLogic}.
+        &quot;&quot;&quot;
+        return self
+
+
+    @classmethod
+    def notificationsWithUID(cls, txn, uid, status=None, create=False):
+        return cls.notificationsWith(txn, None, uid, status=status, create=create)
+
+
+    @classmethod
+    def notificationsWithResourceID(cls, txn, rid):
+        return cls.notificationsWith(txn, rid, None)
+
+
+    @classmethod
+    @inlineCallbacks
+    def notificationsWith(cls, txn, rid, uid, status=None, create=False):
+        &quot;&quot;&quot;
+        @param uid: I'm going to assume uid is utf-8 encoded bytes
+        &quot;&quot;&quot;
+        if rid is not None:
+            query = cls._homeSchema.RESOURCE_ID == rid
+        elif uid is not None:
+            query = cls._homeSchema.OWNER_UID == uid
+            if status is not None:
+                query = query.And(cls._homeSchema.STATUS == status)
+            else:
+                statusSet = (_HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL,)
+                if txn._allowDisabled:
+                    statusSet += (_HOME_STATUS_DISABLED,)
+                query = query.And(cls._homeSchema.STATUS.In(statusSet))
+        else:
+            raise AssertionError(&quot;One of rid or uid must be set&quot;)
+
+        results = yield Select(
+            cls.homeColumns(),
+            From=cls._homeSchema,
+            Where=query,
+        ).on(txn)
+
+        if len(results) &gt; 1:
+            # Pick the best one in order: normal, disabled and external
+            byStatus = dict([(result[cls.homeColumns().index(cls._homeSchema.STATUS)], result) for result in results])
+            result = byStatus.get(_HOME_STATUS_NORMAL)
+            if result is None:
+                result = byStatus.get(_HOME_STATUS_DISABLED)
+            if result is None:
+                result = byStatus.get(_HOME_STATUS_EXTERNAL)
+        elif results:
+            result = results[0]
+        else:
+            result = None
+
+        if result:
+            # Return object that already exists in the store
+            homeObject = yield cls.makeClass(txn, result)
+            returnValue(homeObject)
+        else:
+            # Can only create when uid is specified
+            if not create or uid is None:
+                returnValue(None)
+
+            # Determine if the user is local or external
+            record = yield txn.directoryService().recordWithUID(uid.decode(&quot;utf-8&quot;))
+            if record is None:
+                raise DirectoryRecordNotFoundError(&quot;Cannot create home for UID since no directory record exists: {}&quot;.format(uid))
+
+            if status is None:
+                createStatus = _HOME_STATUS_NORMAL if record.thisServer() else _HOME_STATUS_EXTERNAL
+            elif status == _HOME_STATUS_MIGRATING:
+                if record.thisServer():
+                    raise RecordNotAllowedError(&quot;Cannot migrate a user data for a user already hosted on this server&quot;)
+                createStatus = status
+            elif status in (_HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL,):
+                createStatus = status
+            else:
+                raise RecordNotAllowedError(&quot;Cannot create home with status {}: {}&quot;.format(status, uid))
+
+            # Use savepoint so we can do a partial rollback if there is a race
+            # condition where this row has already been inserted
+            savepoint = SavepointAction(&quot;notificationsWithUID&quot;)
+            yield savepoint.acquire(txn)
+
+            try:
+                resourceid = (yield Insert(
+                    {
+                        cls._homeSchema.OWNER_UID: uid,
+                        cls._homeSchema.STATUS: createStatus,
+                    },
+                    Return=cls._homeSchema.RESOURCE_ID
+                ).on(txn))[0][0]
+            except Exception:
+                # FIXME: Really want to trap the pg.DatabaseError but in a non-
+                # DB specific manner
+                yield savepoint.rollback(txn)
+
+                # Retry the query - row may exist now, if not re-raise
+                results = yield Select(
+                    cls.homeColumns(),
+                    From=cls._homeSchema,
+                    Where=query,
+                ).on(txn)
+                if results:
+                    homeObject = yield cls.makeClass(txn, results[0])
+                    returnValue(homeObject)
+                else:
+                    raise
+            else:
+                yield savepoint.release(txn)
+
+                # Note that we must not cache the owner_uid-&gt;resource_id
+                # mapping in the query cacher when creating as we don't want that to appear
+                # until AFTER the commit
+                results = yield Select(
+                    cls.homeColumns(),
+                    From=cls._homeSchema,
+                    Where=cls._homeSchema.RESOURCE_ID == resourceid,
+                ).on(txn)
+                homeObject = yield cls.makeClass(txn, results[0])
+                if homeObject.normal():
+                    yield homeObject._initSyncToken()
+                    yield homeObject.notifyChanged()
+                returnValue(homeObject)
+
+
+    @inlineCallbacks
+    def _loadPropertyStore(self):
+        self._propertyStore = yield PropertyStore.load(
+            self._ownerUID,
+            self._ownerUID,
+            None,
+            self._txn,
+            self._resourceID,
+            notifyCallback=self.notifyChanged
+        )
+
+
+    def __repr__(self):
+        return &quot;&lt;%s: %s&gt;&quot; % (self.__class__.__name__, self._resourceID)
+
+
+    def id(self):
+        &quot;&quot;&quot;
+        Retrieve the store identifier for this collection.
+
+        @return: store identifier.
+        @rtype: C{int}
+        &quot;&quot;&quot;
+        return self._resourceID
+
+
+    @classproperty
+    def _dataVersionQuery(cls):
+        nh = cls._homeSchema
+        return Select(
+            [nh.DATAVERSION], From=nh,
+            Where=nh.RESOURCE_ID == Parameter(&quot;resourceID&quot;)
+        )
+
+
+    @inlineCallbacks
+    def dataVersion(self):
+        if self._dataVersion is None:
+            self._dataVersion = (yield self._dataVersionQuery.on(
+                self._txn, resourceID=self._resourceID))[0][0]
+        returnValue(self._dataVersion)
+
+
+    def name(self):
+        return &quot;notification&quot;
+
+
+    def uid(self):
+        return self._ownerUID
+
+
+    def status(self):
+        return self._status
+
+
+    @inlineCallbacks
+    def setStatus(self, newStatus):
+        &quot;&quot;&quot;
+        Mark this home as being purged.
+        &quot;&quot;&quot;
+        # Only if different
+        if self._status != newStatus:
+            yield Update(
+                {self._homeSchema.STATUS: newStatus},
+                Where=(self._homeSchema.RESOURCE_ID == self._resourceID),
+            ).on(self._txn)
+            self._status = newStatus
+
+
+    def normal(self):
+        &quot;&quot;&quot;
+        Is this an normal (internal) home.
+
+        @return: a L{bool}.
+        &quot;&quot;&quot;
+        return self._status == _HOME_STATUS_NORMAL
+
+
+    def external(self):
+        &quot;&quot;&quot;
+        Is this an external home.
+
+        @return: a L{bool}.
+        &quot;&quot;&quot;
+        return self._status == _HOME_STATUS_EXTERNAL
+
+
+    def owned(self):
+        return True
+
+
+    def ownerHome(self):
+        return self._home
+
+
+    def viewerHome(self):
+        return self._home
+
+
+    def notificationObjectRecords(self):
+        return NotificationObjectRecord.querysimple(self._txn, notificationHomeResourceID=self.id())
+
+
+    @inlineCallbacks
+    def notificationObjects(self):
+        results = (yield NotificationObject.loadAllObjects(self))
+        for result in results:
+            self._notifications[result.uid()] = result
+        self._notificationNames = sorted([result.name() for result in results])
+        returnValue(results)
+
+    _notificationUIDsForHomeQuery = Select(
+        [schema.NOTIFICATION.NOTIFICATION_UID], From=schema.NOTIFICATION,
+        Where=schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID ==
+        Parameter(&quot;resourceID&quot;))
+
+
+    @inlineCallbacks
+    def listNotificationObjects(self):
+        if self._notificationNames is None:
+            rows = yield self._notificationUIDsForHomeQuery.on(
+                self._txn, resourceID=self._resourceID)
+            self._notificationNames = sorted([row[0] for row in rows])
+        returnValue(self._notificationNames)
+
+
+    # used by _SharedSyncLogic.resourceNamesSinceRevision()
+    def listObjectResources(self):
+        return self.listNotificationObjects()
+
+
+    def _nameToUID(self, name):
+        &quot;&quot;&quot;
+        Based on the file-backed implementation, the 'name' is just uid +
+        &quot;.xml&quot;.
+        &quot;&quot;&quot;
+        return name.rsplit(&quot;.&quot;, 1)[0]
+
+
+    def notificationObjectWithName(self, name):
+        return self.notificationObjectWithUID(self._nameToUID(name))
+
+
+    @memoizedKey(&quot;uid&quot;, &quot;_notifications&quot;)
+    @inlineCallbacks
+    def notificationObjectWithUID(self, uid):
+        &quot;&quot;&quot;
+        Create an empty notification object first then have it initialize itself
+        from the store.
+        &quot;&quot;&quot;
+        no = NotificationObject(self, uid)
+        no = (yield no.initFromStore())
+        returnValue(no)
+
+
+    @inlineCallbacks
+    def writeNotificationObject(self, uid, notificationtype, notificationdata):
+
+        inserting = False
+        notificationObject = yield self.notificationObjectWithUID(uid)
+        if notificationObject is None:
+            notificationObject = NotificationObject(self, uid)
+            inserting = True
+        yield notificationObject.setData(uid, notificationtype, notificationdata, inserting=inserting)
+        if inserting:
+            yield self._insertRevision(&quot;%s.xml&quot; % (uid,))
+            if self._notificationNames is not None:
+                self._notificationNames.append(notificationObject.uid())
+        else:
+            yield self._updateRevision(&quot;%s.xml&quot; % (uid,))
+        yield self.notifyChanged()
+        returnValue(notificationObject)
+
+
+    def removeNotificationObjectWithName(self, name):
+        if self._notificationNames is not None:
+            self._notificationNames.remove(self._nameToUID(name))
+        return self.removeNotificationObjectWithUID(self._nameToUID(name))
+
+    _removeByUIDQuery = Delete(
+        From=schema.NOTIFICATION,
+        Where=(schema.NOTIFICATION.NOTIFICATION_UID == Parameter(&quot;uid&quot;)).And(
+            schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID
+            == Parameter(&quot;resourceID&quot;)))
+
+
+    @inlineCallbacks
+    def removeNotificationObjectWithUID(self, uid):
+        yield self._removeByUIDQuery.on(
+            self._txn, uid=uid, resourceID=self._resourceID)
+        self._notifications.pop(uid, None)
+        yield self._deleteRevision(&quot;%s.xml&quot; % (uid,))
+        yield self.notifyChanged()
+
+    _initSyncTokenQuery = Insert(
+        {
+            _revisionsSchema.HOME_RESOURCE_ID : Parameter(&quot;resourceID&quot;),
+            _revisionsSchema.RESOURCE_NAME    : None,
+            _revisionsSchema.REVISION         : schema.REVISION_SEQ,
+            _revisionsSchema.DELETED          : False
+        }, Return=_revisionsSchema.REVISION
+    )
+
+
+    @inlineCallbacks
+    def _initSyncToken(self):
+        self._syncTokenRevision = (yield self._initSyncTokenQuery.on(
+            self._txn, resourceID=self._resourceID))[0][0]
+
+    _syncTokenQuery = Select(
+        [Max(_revisionsSchema.REVISION)], From=_revisionsSchema,
+        Where=_revisionsSchema.HOME_RESOURCE_ID == Parameter(&quot;resourceID&quot;)
+    )
+
+
+    @inlineCallbacks
+    def syncToken(self):
+        if self._syncTokenRevision is None:
+            self._syncTokenRevision = yield self.syncTokenRevision()
+        returnValue(&quot;%s_%s&quot; % (self._resourceID, self._syncTokenRevision))
+
+
+    @inlineCallbacks
+    def syncTokenRevision(self):
+        revision = (yield self._syncTokenQuery.on(self._txn, resourceID=self._resourceID))[0][0]
+        if revision is None:
+            revision = int((yield self._txn.calendarserverValue(&quot;MIN-VALID-REVISION&quot;)))
+        returnValue(revision)
+
+
+    def properties(self):
+        return self._propertyStore
+
+
+    def addNotifier(self, factory_name, notifier):
+        if self._notifiers is None:
+            self._notifiers = {}
+        self._notifiers[factory_name] = notifier
+
+
+    def getNotifier(self, factory_name):
+        return self._notifiers.get(factory_name)
+
+
+    def notifierID(self):
+        return (self._txn._homeClass[self._txn._primaryHomeType]._notifierPrefix, &quot;%s/notification&quot; % (self.ownerHome().uid(),),)
+
+
+    def parentNotifierID(self):
+        return (self._txn._homeClass[self._txn._primaryHomeType]._notifierPrefix, &quot;%s&quot; % (self.ownerHome().uid(),),)
+
+
+    @inlineCallbacks
+    def notifyChanged(self, category=ChangeCategory.default):
+        &quot;&quot;&quot;
+        Send notifications, change sync token and bump last modified because
+        the resource has changed.  We ensure we only do this once per object
+        per transaction.
+        &quot;&quot;&quot;
+        if self._txn.isNotifiedAlready(self):
+            returnValue(None)
+        self._txn.notificationAddedForObject(self)
+
+        # Send notifications
+        if self._notifiers:
+            # cache notifiers run in post commit
+            notifier = self._notifiers.get(&quot;cache&quot;, None)
+            if notifier:
+                self._txn.postCommit(notifier.notify)
+            # push notifiers add their work items immediately
+            notifier = self._notifiers.get(&quot;push&quot;, None)
+            if notifier:
+                yield notifier.notify(self._txn, priority=category.value)
+
+        returnValue(None)
+
+
+    @classproperty
+    def _completelyNewRevisionQuery(cls):
+        rev = cls._revisionsSchema
+        return Insert({rev.HOME_RESOURCE_ID: Parameter(&quot;homeID&quot;),
+                       # rev.RESOURCE_ID: Parameter(&quot;resourceID&quot;),
+                       rev.RESOURCE_NAME: Parameter(&quot;name&quot;),
+                       rev.REVISION: schema.REVISION_SEQ,
+                       rev.DELETED: False},
+                      Return=rev.REVISION)
+
+
+    def _maybeNotify(self):
+        &quot;&quot;&quot;
+        Emit a push notification after C{_changeRevision}.
+        &quot;&quot;&quot;
+        return self.notifyChanged()
+
+
+    @inlineCallbacks
+    def remove(self):
+        &quot;&quot;&quot;
+        Remove DB rows corresponding to this notification home.
+        &quot;&quot;&quot;
+        # Delete NOTIFICATION rows
+        no = schema.NOTIFICATION
+        kwds = {&quot;ResourceID&quot;: self._resourceID}
+        yield Delete(
+            From=no,
+            Where=(
+                no.NOTIFICATION_HOME_RESOURCE_ID == Parameter(&quot;ResourceID&quot;)
+            ),
+        ).on(self._txn, **kwds)
+
+        # Delete NOTIFICATION_HOME (will cascade to NOTIFICATION_OBJECT_REVISIONS)
+        nh = schema.NOTIFICATION_HOME
+        yield Delete(
+            From=nh,
+            Where=(
+                nh.RESOURCE_ID == Parameter(&quot;ResourceID&quot;)
+            ),
+        ).on(self._txn, **kwds)
+
+
+
+class NotificationObjectRecord(SerializableRecord, fromTable(schema.NOTIFICATION)):
+    &quot;&quot;&quot;
+    @DynamicAttrs
+    L{Record} for L{schema.NOTIFICATION}.
+    &quot;&quot;&quot;
+    pass
+
+
+
+class NotificationObject(FancyEqMixin, object):
+    &quot;&quot;&quot;
+    This used to store XML data and an XML element for the type. But we are now switching it
+    to use JSON internally. The app layer will convert that to XML and fill in the &quot;blanks&quot; as
+    needed for the app.
+    &quot;&quot;&quot;
+    log = Logger()
+
+    implements(INotificationObject)
+
+    compareAttributes = (
+        &quot;_resourceID&quot;,
+        &quot;_home&quot;,
+    )
+
+    _objectSchema = schema.NOTIFICATION
+
+    def __init__(self, home, uid):
+        self._home = home
+        self._resourceID = None
+        self._uid = uid
+        self._md5 = None
+        self._size = None
+        self._created = None
+        self._modified = None
+        self._notificationType = None
+        self._notificationData = None
+
+
+    def __repr__(self):
+        return &quot;&lt;%s: %s&gt;&quot; % (self.__class__.__name__, self._resourceID)
+
+
+    @classproperty
+    def _allColumnsByHomeIDQuery(cls):
+        &quot;&quot;&quot;
+        DAL query to load all columns by home ID.
+        &quot;&quot;&quot;
+        obj = cls._objectSchema
+        return Select(
+            [obj.RESOURCE_ID, obj.NOTIFICATION_UID, obj.MD5,
+             Len(obj.NOTIFICATION_DATA), obj.NOTIFICATION_TYPE, obj.CREATED, obj.MODIFIED],
+            From=obj,
+            Where=(obj.NOTIFICATION_HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;))
+        )
+
+
+    @classmethod
+    @inlineCallbacks
+    def loadAllObjects(cls, parent):
+        &quot;&quot;&quot;
+        Load all child objects and return a list of them. This must create the
+        child classes and initialize them using &quot;batched&quot; SQL operations to keep
+        this constant wrt the number of children. This is an optimization for
+        Depth:1 operations on the collection.
+        &quot;&quot;&quot;
+
+        results = []
+
+        # Load from the main table first
+        dataRows = (
+            yield cls._allColumnsByHomeIDQuery.on(parent._txn,
+                                                  homeID=parent._resourceID))
+
+        if dataRows:
+            # Get property stores for all these child resources (if any found)
+            propertyStores = (yield PropertyStore.forMultipleResources(
+                parent.uid(),
+                None,
+                None,
+                parent._txn,
+                schema.NOTIFICATION.RESOURCE_ID,
+                schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID,
+                parent._resourceID,
+            ))
+
+        # Create the actual objects merging in properties
+        for row in dataRows:
+            child = cls(parent, None)
+            (child._resourceID,
+             child._uid,
+             child._md5,
+             child._size,
+             child._notificationType,
+             child._created,
+             child._modified,) = tuple(row)
+            try:
+                child._notificationType = json.loads(child._notificationType)
+            except ValueError:
+                pass
+            if isinstance(child._notificationType, unicode):
+                child._notificationType = child._notificationType.encode(&quot;utf-8&quot;)
+            child._loadPropertyStore(
+                props=propertyStores.get(child._resourceID, None)
+            )
+            results.append(child)
+
+        returnValue(results)
+
+
+    @classproperty
+    def _oneNotificationQuery(cls):
+        no = cls._objectSchema
+        return Select(
+            [
+                no.RESOURCE_ID,
+                no.MD5,
+                Len(no.NOTIFICATION_DATA),
+                no.NOTIFICATION_TYPE,
+                no.CREATED,
+                no.MODIFIED
+            ],
+            From=no,
+            Where=(no.NOTIFICATION_UID ==
+                   Parameter(&quot;uid&quot;)).And(no.NOTIFICATION_HOME_RESOURCE_ID ==
+                                         Parameter(&quot;homeID&quot;)))
+
+
+    @inlineCallbacks
+    def initFromStore(self):
+        &quot;&quot;&quot;
+        Initialise this object from the store, based on its UID and home
+        resource ID. We read in and cache all the extra metadata from the DB to
+        avoid having to do DB queries for those individually later.
+
+        @return: L{self} if object exists in the DB, else C{None}
+        &quot;&quot;&quot;
+        rows = (yield self._oneNotificationQuery.on(
+            self._txn, uid=self._uid, homeID=self._home._resourceID))
+        if rows:
+            (self._resourceID,
+             self._md5,
+             self._size,
+             self._notificationType,
+             self._created,
+             self._modified,) = tuple(rows[0])
+            try:
+                self._notificationType = json.loads(self._notificationType)
+            except ValueError:
+                pass
+            if isinstance(self._notificationType, unicode):
+                self._notificationType = self._notificationType.encode(&quot;utf-8&quot;)
+            self._loadPropertyStore()
+            returnValue(self)
+        else:
+            returnValue(None)
+
+
+    def _loadPropertyStore(self, props=None, created=False):
+        if props is None:
+            props = NonePropertyStore(self._home.uid())
+        self._propertyStore = props
+
+
+    def properties(self):
+        return self._propertyStore
+
+
+    def id(self):
+        &quot;&quot;&quot;
+        Retrieve the store identifier for this object.
+
+        @return: store identifier.
+        @rtype: C{int}
+        &quot;&quot;&quot;
+        return self._resourceID
+
+
+    @property
+    def _txn(self):
+        return self._home._txn
+
+
+    def notificationCollection(self):
+        return self._home
+
+
+    def uid(self):
+        return self._uid
+
+
+    def name(self):
+        return self.uid() + &quot;.xml&quot;
+
+
+    @classproperty
+    def _newNotificationQuery(cls):
+        no = cls._objectSchema
+        return Insert(
+            {
+                no.NOTIFICATION_HOME_RESOURCE_ID: Parameter(&quot;homeID&quot;),
+                no.NOTIFICATION_UID: Parameter(&quot;uid&quot;),
+                no.NOTIFICATION_TYPE: Parameter(&quot;notificationType&quot;),
+                no.NOTIFICATION_DATA: Parameter(&quot;notificationData&quot;),
+                no.MD5: Parameter(&quot;md5&quot;),
+            },
+            Return=[no.RESOURCE_ID, no.CREATED, no.MODIFIED]
+        )
+
+
+    @classproperty
+    def _updateNotificationQuery(cls):
+        no = cls._objectSchema
+        return Update(
+            {
+                no.NOTIFICATION_TYPE: Parameter(&quot;notificationType&quot;),
+                no.NOTIFICATION_DATA: Parameter(&quot;notificationData&quot;),
+                no.MD5: Parameter(&quot;md5&quot;),
+            },
+            Where=(no.NOTIFICATION_HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)).And(
+                no.NOTIFICATION_UID == Parameter(&quot;uid&quot;)),
+            Return=no.MODIFIED
+        )
+
+
+    @inlineCallbacks
+    def setData(self, uid, notificationtype, notificationdata, inserting=False):
+        &quot;&quot;&quot;
+        Set the object resource data and update and cached metadata.
+        &quot;&quot;&quot;
+
+        notificationtext = json.dumps(notificationdata)
+        self._notificationType = notificationtype
+        self._md5 = hashlib.md5(notificationtext).hexdigest()
+        self._size = len(notificationtext)
+        if inserting:
+            rows = yield self._newNotificationQuery.on(
+                self._txn, homeID=self._home._resourceID, uid=uid,
+                notificationType=json.dumps(self._notificationType),
+                notificationData=notificationtext, md5=self._md5
+            )
+            self._resourceID, self._created, self._modified = rows[0]
+            self._loadPropertyStore()
+        else:
+            rows = yield self._updateNotificationQuery.on(
+                self._txn, homeID=self._home._resourceID, uid=uid,
+                notificationType=json.dumps(self._notificationType),
+                notificationData=notificationtext, md5=self._md5
+            )
+            self._modified = rows[0][0]
+        self._notificationData = notificationdata
+
+    _notificationDataFromID = Select(
+        [_objectSchema.NOTIFICATION_DATA], From=_objectSchema,
+        Where=_objectSchema.RESOURCE_ID == Parameter(&quot;resourceID&quot;))
+
+
+    @inlineCallbacks
+    def notificationData(self):
+        if self._notificationData is None:
+            self._notificationData = (yield self._notificationDataFromID.on(self._txn, resourceID=self._resourceID))[0][0]
+            try:
+                self._notificationData = json.loads(self._notificationData)
+            except ValueError:
+                pass
+            if isinstance(self._notificationData, unicode):
+                self._notificationData = self._notificationData.encode(&quot;utf-8&quot;)
+        returnValue(self._notificationData)
+
+
+    def contentType(self):
+        &quot;&quot;&quot;
+        The content type of NotificationObjects is text/xml.
+        &quot;&quot;&quot;
+        return MimeType.fromString(&quot;text/xml&quot;)
+
+
+    def md5(self):
+        return self._md5
+
+
+    def size(self):
+        return self._size
+
+
+    def notificationType(self):
+        return self._notificationType
+
+
+    def created(self):
+        return datetimeMktime(parseSQLTimestamp(self._created))
+
+
+    def modified(self):
+        return datetimeMktime(parseSQLTimestamp(self._modified))
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_schemacurrentoracledialectsql"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/sql_schema/current-oracle-dialect.sql (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_schema/current-oracle-dialect.sql        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/current-oracle-dialect.sql        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -29,9 +29,10 @@
</span><span class="cx"> 
</span><span class="cx"> create table CALENDAR_HOME (
</span><span class="cx">     &quot;RESOURCE_ID&quot; integer primary key,
</span><del>-    &quot;OWNER_UID&quot; nvarchar2(255) unique,
</del><ins>+    &quot;OWNER_UID&quot; nvarchar2(255),
</ins><span class="cx">     &quot;STATUS&quot; integer default 0 not null,
</span><del>-    &quot;DATAVERSION&quot; integer default 0 not null
</del><ins>+    &quot;DATAVERSION&quot; integer default 0 not null, 
+    unique (&quot;OWNER_UID&quot;, &quot;STATUS&quot;)
</ins><span class="cx"> );
</span><span class="cx"> 
</span><span class="cx"> create table HOME_STATUS (
</span><span class="lines">@@ -42,6 +43,8 @@
</span><span class="cx"> insert into HOME_STATUS (DESCRIPTION, ID) values ('normal', 0);
</span><span class="cx"> insert into HOME_STATUS (DESCRIPTION, ID) values ('external', 1);
</span><span class="cx"> insert into HOME_STATUS (DESCRIPTION, ID) values ('purging', 2);
</span><ins>+insert into HOME_STATUS (DESCRIPTION, ID) values ('migrating', 3);
+insert into HOME_STATUS (DESCRIPTION, ID) values ('disabled', 4);
</ins><span class="cx"> create table CALENDAR (
</span><span class="cx">     &quot;RESOURCE_ID&quot; integer primary key
</span><span class="cx"> );
</span><span class="lines">@@ -68,11 +71,20 @@
</span><span class="cx">     &quot;MODIFIED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
</span><span class="cx"> );
</span><span class="cx"> 
</span><ins>+create table CALENDAR_MIGRATION (
+    &quot;CALENDAR_HOME_RESOURCE_ID&quot; integer references CALENDAR_HOME on delete cascade,
+    &quot;REMOTE_RESOURCE_ID&quot; integer not null,
+    &quot;LOCAL_RESOURCE_ID&quot; integer references CALENDAR on delete cascade,
+    &quot;LAST_SYNC_TOKEN&quot; nvarchar2(255), 
+    primary key (&quot;CALENDAR_HOME_RESOURCE_ID&quot;, &quot;REMOTE_RESOURCE_ID&quot;)
+);
+
</ins><span class="cx"> create table NOTIFICATION_HOME (
</span><span class="cx">     &quot;RESOURCE_ID&quot; integer primary key,
</span><del>-    &quot;OWNER_UID&quot; nvarchar2(255) unique,
</del><ins>+    &quot;OWNER_UID&quot; nvarchar2(255),
</ins><span class="cx">     &quot;STATUS&quot; integer default 0 not null,
</span><del>-    &quot;DATAVERSION&quot; integer default 0 not null
</del><ins>+    &quot;DATAVERSION&quot; integer default 0 not null, 
+    unique (&quot;OWNER_UID&quot;, &quot;STATUS&quot;)
</ins><span class="cx"> );
</span><span class="cx"> 
</span><span class="cx"> create table NOTIFICATION (
</span><span class="lines">@@ -90,11 +102,11 @@
</span><span class="cx"> create table CALENDAR_BIND (
</span><span class="cx">     &quot;CALENDAR_HOME_RESOURCE_ID&quot; integer not null references CALENDAR_HOME,
</span><span class="cx">     &quot;CALENDAR_RESOURCE_ID&quot; integer not null references CALENDAR on delete cascade,
</span><del>-    &quot;EXTERNAL_ID&quot; integer default null,
</del><span class="cx">     &quot;CALENDAR_RESOURCE_NAME&quot; nvarchar2(255),
</span><span class="cx">     &quot;BIND_MODE&quot; integer not null,
</span><span class="cx">     &quot;BIND_STATUS&quot; integer not null,
</span><span class="cx">     &quot;BIND_REVISION&quot; integer default 0 not null,
</span><ins>+    &quot;BIND_UID&quot; nvarchar2(36) default null,
</ins><span class="cx">     &quot;MESSAGE&quot; nclob,
</span><span class="cx">     &quot;TRANSP&quot; integer default 0 not null,
</span><span class="cx">     &quot;ALARM_VEVENT_TIMED&quot; nclob default null,
</span><span class="lines">@@ -208,6 +220,13 @@
</span><span class="cx">     primary key (&quot;TIME_RANGE_INSTANCE_ID&quot;, &quot;USER_ID&quot;)
</span><span class="cx"> );
</span><span class="cx"> 
</span><ins>+create table CALENDAR_OBJECT_MIGRATION (
+    &quot;CALENDAR_HOME_RESOURCE_ID&quot; integer references CALENDAR_HOME on delete cascade,
+    &quot;REMOTE_RESOURCE_ID&quot; integer not null,
+    &quot;LOCAL_RESOURCE_ID&quot; integer references CALENDAR_OBJECT on delete cascade, 
+    primary key (&quot;CALENDAR_HOME_RESOURCE_ID&quot;, &quot;REMOTE_RESOURCE_ID&quot;)
+);
+
</ins><span class="cx"> create table ATTACHMENT (
</span><span class="cx">     &quot;ATTACHMENT_ID&quot; integer primary key,
</span><span class="cx">     &quot;CALENDAR_HOME_RESOURCE_ID&quot; integer not null references CALENDAR_HOME,
</span><span class="lines">@@ -228,6 +247,13 @@
</span><span class="cx">     unique (&quot;MANAGED_ID&quot;, &quot;CALENDAR_OBJECT_RESOURCE_ID&quot;)
</span><span class="cx"> );
</span><span class="cx"> 
</span><ins>+create table ATTACHMENT_MIGRATION (
+    &quot;CALENDAR_HOME_RESOURCE_ID&quot; integer references CALENDAR_HOME on delete cascade,
+    &quot;REMOTE_RESOURCE_ID&quot; integer not null,
+    &quot;LOCAL_RESOURCE_ID&quot; integer references ATTACHMENT on delete cascade, 
+    primary key (&quot;CALENDAR_HOME_RESOURCE_ID&quot;, &quot;REMOTE_RESOURCE_ID&quot;)
+);
+
</ins><span class="cx"> create table RESOURCE_PROPERTY (
</span><span class="cx">     &quot;RESOURCE_ID&quot; integer not null,
</span><span class="cx">     &quot;NAME&quot; nvarchar2(255),
</span><span class="lines">@@ -239,9 +265,10 @@
</span><span class="cx"> create table ADDRESSBOOK_HOME (
</span><span class="cx">     &quot;RESOURCE_ID&quot; integer primary key,
</span><span class="cx">     &quot;ADDRESSBOOK_PROPERTY_STORE_ID&quot; integer not null,
</span><del>-    &quot;OWNER_UID&quot; nvarchar2(255) unique,
</del><ins>+    &quot;OWNER_UID&quot; nvarchar2(255),
</ins><span class="cx">     &quot;STATUS&quot; integer default 0 not null,
</span><del>-    &quot;DATAVERSION&quot; integer default 0 not null
</del><ins>+    &quot;DATAVERSION&quot; integer default 0 not null, 
+    unique (&quot;OWNER_UID&quot;, &quot;STATUS&quot;)
</ins><span class="cx"> );
</span><span class="cx"> 
</span><span class="cx"> create table ADDRESSBOOK_HOME_METADATA (
</span><span class="lines">@@ -254,11 +281,11 @@
</span><span class="cx"> create table SHARED_ADDRESSBOOK_BIND (
</span><span class="cx">     &quot;ADDRESSBOOK_HOME_RESOURCE_ID&quot; integer not null references ADDRESSBOOK_HOME,
</span><span class="cx">     &quot;OWNER_HOME_RESOURCE_ID&quot; integer not null references ADDRESSBOOK_HOME on delete cascade,
</span><del>-    &quot;EXTERNAL_ID&quot; integer default null,
</del><span class="cx">     &quot;ADDRESSBOOK_RESOURCE_NAME&quot; nvarchar2(255),
</span><span class="cx">     &quot;BIND_MODE&quot; integer not null,
</span><span class="cx">     &quot;BIND_STATUS&quot; integer not null,
</span><span class="cx">     &quot;BIND_REVISION&quot; integer default 0 not null,
</span><ins>+    &quot;BIND_UID&quot; nvarchar2(36) default null,
</ins><span class="cx">     &quot;MESSAGE&quot; nclob, 
</span><span class="cx">     primary key (&quot;ADDRESSBOOK_HOME_RESOURCE_ID&quot;, &quot;OWNER_HOME_RESOURCE_ID&quot;), 
</span><span class="cx">     unique (&quot;ADDRESSBOOK_HOME_RESOURCE_ID&quot;, &quot;ADDRESSBOOK_RESOURCE_NAME&quot;)
</span><span class="lines">@@ -308,11 +335,11 @@
</span><span class="cx"> create table SHARED_GROUP_BIND (
</span><span class="cx">     &quot;ADDRESSBOOK_HOME_RESOURCE_ID&quot; integer not null references ADDRESSBOOK_HOME,
</span><span class="cx">     &quot;GROUP_RESOURCE_ID&quot; integer not null references ADDRESSBOOK_OBJECT on delete cascade,
</span><del>-    &quot;EXTERNAL_ID&quot; integer default null,
</del><span class="cx">     &quot;GROUP_ADDRESSBOOK_NAME&quot; nvarchar2(255),
</span><span class="cx">     &quot;BIND_MODE&quot; integer not null,
</span><span class="cx">     &quot;BIND_STATUS&quot; integer not null,
</span><span class="cx">     &quot;BIND_REVISION&quot; integer default 0 not null,
</span><ins>+    &quot;BIND_UID&quot; nvarchar2(36) default null,
</ins><span class="cx">     &quot;MESSAGE&quot; nclob, 
</span><span class="cx">     primary key (&quot;ADDRESSBOOK_HOME_RESOURCE_ID&quot;, &quot;GROUP_RESOURCE_ID&quot;), 
</span><span class="cx">     unique (&quot;ADDRESSBOOK_HOME_RESOURCE_ID&quot;, &quot;GROUP_ADDRESSBOOK_NAME&quot;)
</span><span class="lines">@@ -607,7 +634,7 @@
</span><span class="cx">     &quot;VALUE&quot; nvarchar2(255)
</span><span class="cx"> );
</span><span class="cx"> 
</span><del>-insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '51');
</del><ins>+insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '52');
</ins><span class="cx"> insert into CALENDARSERVER (NAME, VALUE) values ('CALENDAR-DATAVERSION', '6');
</span><span class="cx"> insert into CALENDARSERVER (NAME, VALUE) values ('ADDRESSBOOK-DATAVERSION', '2');
</span><span class="cx"> insert into CALENDARSERVER (NAME, VALUE) values ('NOTIFICATION-DATAVERSION', '1');
</span><span class="lines">@@ -624,6 +651,10 @@
</span><span class="cx">     DEFAULT_POLLS
</span><span class="cx"> );
</span><span class="cx"> 
</span><ins>+create index CALENDAR_MIGRATION_LO_0525c72b on CALENDAR_MIGRATION (
+    LOCAL_RESOURCE_ID
+);
+
</ins><span class="cx"> create index NOTIFICATION_NOTIFICA_f891f5f9 on NOTIFICATION (
</span><span class="cx">     NOTIFICATION_HOME_RESOURCE_ID
</span><span class="cx"> );
</span><span class="lines">@@ -659,6 +690,15 @@
</span><span class="cx">     CALENDAR_OBJECT_RESOURCE_ID
</span><span class="cx"> );
</span><span class="cx"> 
</span><ins>+create index CALENDAR_OBJECT_MIGRA_0502cbef on CALENDAR_OBJECT_MIGRATION (
+    CALENDAR_HOME_RESOURCE_ID,
+    LOCAL_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_MIGRA_3577efd9 on CALENDAR_OBJECT_MIGRATION (
+    LOCAL_RESOURCE_ID
+);
+
</ins><span class="cx"> create index ATTACHMENT_CALENDAR_H_0078845c on ATTACHMENT (
</span><span class="cx">     CALENDAR_HOME_RESOURCE_ID
</span><span class="cx"> );
</span><span class="lines">@@ -671,6 +711,15 @@
</span><span class="cx">     CALENDAR_OBJECT_RESOURCE_ID
</span><span class="cx"> );
</span><span class="cx"> 
</span><ins>+create index ATTACHMENT_MIGRATION__804bf85e on ATTACHMENT_MIGRATION (
+    CALENDAR_HOME_RESOURCE_ID,
+    LOCAL_RESOURCE_ID
+);
+
+create index ATTACHMENT_MIGRATION__816947fe on ATTACHMENT_MIGRATION (
+    LOCAL_RESOURCE_ID
+);
+
</ins><span class="cx"> create index SHARED_ADDRESSBOOK_BI_e9a2e6d4 on SHARED_ADDRESSBOOK_BIND (
</span><span class="cx">     OWNER_HOME_RESOURCE_ID
</span><span class="cx"> );
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_schemacurrentsql"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/sql_schema/current.sql (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_schema/current.sql        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/current.sql        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -70,9 +70,11 @@
</span><span class="cx"> 
</span><span class="cx"> create table CALENDAR_HOME (
</span><span class="cx">   RESOURCE_ID      integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
</span><del>-  OWNER_UID        varchar(255) not null unique,                                -- implicit index
</del><ins>+  OWNER_UID        varchar(255) not null,                                                -- implicit index
</ins><span class="cx">   STATUS           integer      default 0 not null,                             -- enum HOME_STATUS
</span><del>-  DATAVERSION      integer      default 0 not null
</del><ins>+  DATAVERSION      integer      default 0 not null,
+  
+  unique (OWNER_UID, STATUS)        -- implicit index
</ins><span class="cx"> );
</span><span class="cx"> 
</span><span class="cx"> -- Enumeration of statuses
</span><span class="lines">@@ -85,6 +87,8 @@
</span><span class="cx"> insert into HOME_STATUS values (0, 'normal' );
</span><span class="cx"> insert into HOME_STATUS values (1, 'external');
</span><span class="cx"> insert into HOME_STATUS values (2, 'purging');
</span><ins>+insert into HOME_STATUS values (3, 'migrating');
+insert into HOME_STATUS values (4, 'disabled');
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx"> --------------
</span><span class="lines">@@ -135,15 +139,34 @@
</span><span class="cx"> );
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+------------------------
+-- Calendar Migration --
+------------------------
+
+create table CALENDAR_MIGRATION (
+  CALENDAR_HOME_RESOURCE_ID                integer references CALENDAR_HOME on delete cascade,
+  REMOTE_RESOURCE_ID                        integer not null,
+  LOCAL_RESOURCE_ID                                integer        references CALENDAR on delete cascade,
+  LAST_SYNC_TOKEN                                varchar(255),
+   
+  primary key (CALENDAR_HOME_RESOURCE_ID, REMOTE_RESOURCE_ID) -- implicit index
+);
+
+create index CALENDAR_MIGRATION_LOCAL_RESOURCE_ID on
+  CALENDAR_MIGRATION(LOCAL_RESOURCE_ID);
+
+
</ins><span class="cx"> ---------------------------
</span><span class="cx"> -- Sharing Notifications --
</span><span class="cx"> ---------------------------
</span><span class="cx"> 
</span><span class="cx"> create table NOTIFICATION_HOME (
</span><span class="cx">   RESOURCE_ID integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
</span><del>-  OWNER_UID   varchar(255) not null unique,                                -- implicit index
</del><ins>+  OWNER_UID   varchar(255) not null,                                           -- implicit index
</ins><span class="cx">   STATUS      integer      default 0 not null,                             -- enum HOME_STATUS
</span><del>-  DATAVERSION integer      default 0 not null
</del><ins>+  DATAVERSION integer      default 0 not null,
+    
+  unique (OWNER_UID, STATUS)        -- implicit index
</ins><span class="cx"> );
</span><span class="cx"> 
</span><span class="cx"> create table NOTIFICATION (
</span><span class="lines">@@ -172,11 +195,11 @@
</span><span class="cx"> create table CALENDAR_BIND (
</span><span class="cx">   CALENDAR_HOME_RESOURCE_ID integer      not null references CALENDAR_HOME,
</span><span class="cx">   CALENDAR_RESOURCE_ID      integer      not null references CALENDAR on delete cascade,
</span><del>-  EXTERNAL_ID               integer      default null,
</del><span class="cx">   CALENDAR_RESOURCE_NAME    varchar(255) not null,
</span><span class="cx">   BIND_MODE                 integer      not null, -- enum CALENDAR_BIND_MODE
</span><span class="cx">   BIND_STATUS               integer      not null, -- enum CALENDAR_BIND_STATUS
</span><span class="cx">   BIND_REVISION             integer      default 0 not null,
</span><ins>+  BIND_UID                  varchar(36)  default null,
</ins><span class="cx">   MESSAGE                   text,
</span><span class="cx">   TRANSP                    integer      default 0 not null, -- enum CALENDAR_TRANSP
</span><span class="cx">   ALARM_VEVENT_TIMED        text         default null,
</span><span class="lines">@@ -363,6 +386,24 @@
</span><span class="cx"> );
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+-------------------------------
+-- Calendar Object Migration --
+-------------------------------
+
+create table CALENDAR_OBJECT_MIGRATION (
+  CALENDAR_HOME_RESOURCE_ID                integer references CALENDAR_HOME on delete cascade,
+  REMOTE_RESOURCE_ID                        integer not null,
+  LOCAL_RESOURCE_ID                                integer        references CALENDAR_OBJECT on delete cascade,
+   
+  primary key (CALENDAR_HOME_RESOURCE_ID, REMOTE_RESOURCE_ID) -- implicit index
+);
+
+create index CALENDAR_OBJECT_MIGRATION_HOME_LOCAL on
+  CALENDAR_OBJECT_MIGRATION(CALENDAR_HOME_RESOURCE_ID, LOCAL_RESOURCE_ID);
+create index CALENDAR_OBJECT_MIGRATION_LOCAL_RESOURCE_ID on
+  CALENDAR_OBJECT_MIGRATION(LOCAL_RESOURCE_ID);
+
+
</ins><span class="cx"> ----------------
</span><span class="cx"> -- Attachment --
</span><span class="cx"> ----------------
</span><span class="lines">@@ -400,6 +441,24 @@
</span><span class="cx"> create index ATTACHMENT_CALENDAR_OBJECT_CALENDAR_OBJECT_RESOURCE_ID on
</span><span class="cx">   ATTACHMENT_CALENDAR_OBJECT(CALENDAR_OBJECT_RESOURCE_ID);
</span><span class="cx"> 
</span><ins>+-----------------------------------
+-- Calendar Attachment Migration --
+-----------------------------------
+
+create table ATTACHMENT_MIGRATION (
+  CALENDAR_HOME_RESOURCE_ID                integer references CALENDAR_HOME on delete cascade,
+  REMOTE_RESOURCE_ID                        integer not null,
+  LOCAL_RESOURCE_ID                                integer        references ATTACHMENT on delete cascade,
+   
+  primary key (CALENDAR_HOME_RESOURCE_ID, REMOTE_RESOURCE_ID) -- implicit index
+);
+
+create index ATTACHMENT_MIGRATION_HOME_LOCAL on
+  ATTACHMENT_MIGRATION(CALENDAR_HOME_RESOURCE_ID, LOCAL_RESOURCE_ID);
+create index ATTACHMENT_MIGRATION_LOCAL_RESOURCE_ID on
+  ATTACHMENT_MIGRATION(LOCAL_RESOURCE_ID);
+
+
</ins><span class="cx"> -----------------------
</span><span class="cx"> -- Resource Property --
</span><span class="cx"> -----------------------
</span><span class="lines">@@ -421,9 +480,11 @@
</span><span class="cx"> create table ADDRESSBOOK_HOME (
</span><span class="cx">   RESOURCE_ID                   integer         primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
</span><span class="cx">   ADDRESSBOOK_PROPERTY_STORE_ID integer         default nextval('RESOURCE_ID_SEQ') not null,    -- implicit index
</span><del>-  OWNER_UID                     varchar(255)    not null unique,                                -- implicit index
</del><ins>+  OWNER_UID                     varchar(255)    not null,
</ins><span class="cx">   STATUS                        integer         default 0 not null,                             -- enum HOME_STATUS
</span><del>-  DATAVERSION                   integer         default 0 not null
</del><ins>+  DATAVERSION                   integer         default 0 not null,
+    
+  unique (OWNER_UID, STATUS)        -- implicit index
</ins><span class="cx"> );
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="lines">@@ -448,11 +509,11 @@
</span><span class="cx"> create table SHARED_ADDRESSBOOK_BIND (
</span><span class="cx">   ADDRESSBOOK_HOME_RESOURCE_ID          integer         not null references ADDRESSBOOK_HOME,
</span><span class="cx">   OWNER_HOME_RESOURCE_ID                integer         not null references ADDRESSBOOK_HOME on delete cascade,
</span><del>-  EXTERNAL_ID                           integer         default null,
</del><span class="cx">   ADDRESSBOOK_RESOURCE_NAME             varchar(255)    not null,
</span><span class="cx">   BIND_MODE                             integer         not null, -- enum CALENDAR_BIND_MODE
</span><span class="cx">   BIND_STATUS                           integer         not null, -- enum CALENDAR_BIND_STATUS
</span><span class="cx">   BIND_REVISION                         integer         default 0 not null,
</span><ins>+  BIND_UID                              varchar(36)     default null,
</ins><span class="cx">   MESSAGE                               text,                     -- FIXME: xml?
</span><span class="cx"> 
</span><span class="cx">   primary key (ADDRESSBOOK_HOME_RESOURCE_ID, OWNER_HOME_RESOURCE_ID), -- implicit index
</span><span class="lines">@@ -549,11 +610,11 @@
</span><span class="cx"> create table SHARED_GROUP_BIND (
</span><span class="cx">   ADDRESSBOOK_HOME_RESOURCE_ID      integer      not null references ADDRESSBOOK_HOME,
</span><span class="cx">   GROUP_RESOURCE_ID                 integer      not null references ADDRESSBOOK_OBJECT on delete cascade,
</span><del>-  EXTERNAL_ID                       integer      default null,
</del><span class="cx">   GROUP_ADDRESSBOOK_NAME            varchar(255) not null,
</span><span class="cx">   BIND_MODE                         integer      not null, -- enum CALENDAR_BIND_MODE
</span><span class="cx">   BIND_STATUS                       integer      not null, -- enum CALENDAR_BIND_STATUS
</span><span class="cx">   BIND_REVISION                     integer      default 0 not null,
</span><ins>+  BIND_UID                          varchar(36)  default null,
</ins><span class="cx">   MESSAGE                           text,                  -- FIXME: xml?
</span><span class="cx"> 
</span><span class="cx">   primary key (ADDRESSBOOK_HOME_RESOURCE_ID, GROUP_RESOURCE_ID), -- implicit index
</span><span class="lines">@@ -873,7 +934,7 @@
</span><span class="cx">   DELEGATOR                     varchar(255) not null,
</span><span class="cx">   GROUP_ID                      integer      not null references GROUPS on delete cascade,
</span><span class="cx">   READ_WRITE                    integer      not null, -- 1 = ReadWrite, 0 = ReadOnly
</span><del>-  IS_EXTERNAL                   integer      not null, -- 1 = ReadWrite, 0 = ReadOnly
</del><ins>+  IS_EXTERNAL                   integer      not null, -- 1 = External, 0 = Internal
</ins><span class="cx"> 
</span><span class="cx">   primary key (DELEGATOR, READ_WRITE, GROUP_ID)
</span><span class="cx"> );
</span><span class="lines">@@ -1150,7 +1211,7 @@
</span><span class="cx">   VALUE                         varchar(255)
</span><span class="cx"> );
</span><span class="cx"> 
</span><del>-insert into CALENDARSERVER values ('VERSION', '51');
</del><ins>+insert into CALENDARSERVER values ('VERSION', '52');
</ins><span class="cx"> insert into CALENDARSERVER values ('CALENDAR-DATAVERSION', '6');
</span><span class="cx"> insert into CALENDARSERVER values ('ADDRESSBOOK-DATAVERSION', '2');
</span><span class="cx"> insert into CALENDARSERVER values ('NOTIFICATION-DATAVERSION', '1');
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_schemaoldoracledialectv51sqlfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_schemaoldoracledialectv51sql"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_schema/old/oracle-dialect/v51.sql (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_schema/old/oracle-dialect/v51.sql) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_schema/old/oracle-dialect/v51.sql                                (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/old/oracle-dialect/v51.sql        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,939 @@
</span><ins>+create sequence RESOURCE_ID_SEQ;
+create sequence JOB_SEQ;
+create sequence INSTANCE_ID_SEQ;
+create sequence ATTACHMENT_ID_SEQ;
+create sequence REVISION_SEQ;
+create sequence WORKITEM_SEQ;
+create table NODE_INFO (
+    &quot;HOSTNAME&quot; nvarchar2(255),
+    &quot;PID&quot; integer not null,
+    &quot;PORT&quot; integer not null,
+    &quot;TIME&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC' not null, 
+    primary key (&quot;HOSTNAME&quot;, &quot;PORT&quot;)
+);
+
+create table NAMED_LOCK (
+    &quot;LOCK_NAME&quot; nvarchar2(255) primary key
+);
+
+create table JOB (
+    &quot;JOB_ID&quot; integer primary key,
+    &quot;WORK_TYPE&quot; nvarchar2(255),
+    &quot;PRIORITY&quot; integer default 0,
+    &quot;WEIGHT&quot; integer default 0,
+    &quot;NOT_BEFORE&quot; timestamp not null,
+    &quot;ASSIGNED&quot; timestamp default null,
+    &quot;OVERDUE&quot; timestamp default null,
+    &quot;FAILED&quot; integer default 0
+);
+
+create table CALENDAR_HOME (
+    &quot;RESOURCE_ID&quot; integer primary key,
+    &quot;OWNER_UID&quot; nvarchar2(255) unique,
+    &quot;STATUS&quot; integer default 0 not null,
+    &quot;DATAVERSION&quot; integer default 0 not null
+);
+
+create table HOME_STATUS (
+    &quot;ID&quot; integer primary key,
+    &quot;DESCRIPTION&quot; nvarchar2(16) unique
+);
+
+insert into HOME_STATUS (DESCRIPTION, ID) values ('normal', 0);
+insert into HOME_STATUS (DESCRIPTION, ID) values ('external', 1);
+insert into HOME_STATUS (DESCRIPTION, ID) values ('purging', 2);
+create table CALENDAR (
+    &quot;RESOURCE_ID&quot; integer primary key
+);
+
+create table CALENDAR_HOME_METADATA (
+    &quot;RESOURCE_ID&quot; integer primary key references CALENDAR_HOME on delete cascade,
+    &quot;QUOTA_USED_BYTES&quot; integer default 0 not null,
+    &quot;DEFAULT_EVENTS&quot; integer default null references CALENDAR on delete set null,
+    &quot;DEFAULT_TASKS&quot; integer default null references CALENDAR on delete set null,
+    &quot;DEFAULT_POLLS&quot; integer default null references CALENDAR on delete set null,
+    &quot;ALARM_VEVENT_TIMED&quot; nclob default null,
+    &quot;ALARM_VEVENT_ALLDAY&quot; nclob default null,
+    &quot;ALARM_VTODO_TIMED&quot; nclob default null,
+    &quot;ALARM_VTODO_ALLDAY&quot; nclob default null,
+    &quot;AVAILABILITY&quot; nclob default null,
+    &quot;CREATED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    &quot;MODIFIED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table CALENDAR_METADATA (
+    &quot;RESOURCE_ID&quot; integer primary key references CALENDAR on delete cascade,
+    &quot;SUPPORTED_COMPONENTS&quot; nvarchar2(255) default null,
+    &quot;CREATED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    &quot;MODIFIED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table NOTIFICATION_HOME (
+    &quot;RESOURCE_ID&quot; integer primary key,
+    &quot;OWNER_UID&quot; nvarchar2(255) unique,
+    &quot;STATUS&quot; integer default 0 not null,
+    &quot;DATAVERSION&quot; integer default 0 not null
+);
+
+create table NOTIFICATION (
+    &quot;RESOURCE_ID&quot; integer primary key,
+    &quot;NOTIFICATION_HOME_RESOURCE_ID&quot; integer not null references NOTIFICATION_HOME,
+    &quot;NOTIFICATION_UID&quot; nvarchar2(255),
+    &quot;NOTIFICATION_TYPE&quot; nvarchar2(255),
+    &quot;NOTIFICATION_DATA&quot; nclob,
+    &quot;MD5&quot; nchar(32),
+    &quot;CREATED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    &quot;MODIFIED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    unique (&quot;NOTIFICATION_UID&quot;, &quot;NOTIFICATION_HOME_RESOURCE_ID&quot;)
+);
+
+create table CALENDAR_BIND (
+    &quot;CALENDAR_HOME_RESOURCE_ID&quot; integer not null references CALENDAR_HOME,
+    &quot;CALENDAR_RESOURCE_ID&quot; integer not null references CALENDAR on delete cascade,
+    &quot;EXTERNAL_ID&quot; integer default null,
+    &quot;CALENDAR_RESOURCE_NAME&quot; nvarchar2(255),
+    &quot;BIND_MODE&quot; integer not null,
+    &quot;BIND_STATUS&quot; integer not null,
+    &quot;BIND_REVISION&quot; integer default 0 not null,
+    &quot;MESSAGE&quot; nclob,
+    &quot;TRANSP&quot; integer default 0 not null,
+    &quot;ALARM_VEVENT_TIMED&quot; nclob default null,
+    &quot;ALARM_VEVENT_ALLDAY&quot; nclob default null,
+    &quot;ALARM_VTODO_TIMED&quot; nclob default null,
+    &quot;ALARM_VTODO_ALLDAY&quot; nclob default null,
+    &quot;TIMEZONE&quot; nclob default null, 
+    primary key (&quot;CALENDAR_HOME_RESOURCE_ID&quot;, &quot;CALENDAR_RESOURCE_ID&quot;), 
+    unique (&quot;CALENDAR_HOME_RESOURCE_ID&quot;, &quot;CALENDAR_RESOURCE_NAME&quot;)
+);
+
+create table CALENDAR_BIND_MODE (
+    &quot;ID&quot; integer primary key,
+    &quot;DESCRIPTION&quot; nvarchar2(16) unique
+);
+
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('own', 0);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('read', 1);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('write', 2);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('direct', 3);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('indirect', 4);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('group', 5);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('group_read', 6);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('group_write', 7);
+create table CALENDAR_BIND_STATUS (
+    &quot;ID&quot; integer primary key,
+    &quot;DESCRIPTION&quot; nvarchar2(16) unique
+);
+
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('invited', 0);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('accepted', 1);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('declined', 2);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('invalid', 3);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('deleted', 4);
+create table CALENDAR_TRANSP (
+    &quot;ID&quot; integer primary key,
+    &quot;DESCRIPTION&quot; nvarchar2(16) unique
+);
+
+insert into CALENDAR_TRANSP (DESCRIPTION, ID) values ('opaque', 0);
+insert into CALENDAR_TRANSP (DESCRIPTION, ID) values ('transparent', 1);
+create table CALENDAR_OBJECT (
+    &quot;RESOURCE_ID&quot; integer primary key,
+    &quot;CALENDAR_RESOURCE_ID&quot; integer not null references CALENDAR on delete cascade,
+    &quot;RESOURCE_NAME&quot; nvarchar2(255),
+    &quot;ICALENDAR_TEXT&quot; nclob,
+    &quot;ICALENDAR_UID&quot; nvarchar2(255),
+    &quot;ICALENDAR_TYPE&quot; nvarchar2(255),
+    &quot;ATTACHMENTS_MODE&quot; integer default 0 not null,
+    &quot;DROPBOX_ID&quot; nvarchar2(255),
+    &quot;ORGANIZER&quot; nvarchar2(255),
+    &quot;RECURRANCE_MIN&quot; date,
+    &quot;RECURRANCE_MAX&quot; date,
+    &quot;ACCESS&quot; integer default 0 not null,
+    &quot;SCHEDULE_OBJECT&quot; integer default 0,
+    &quot;SCHEDULE_TAG&quot; nvarchar2(36) default null,
+    &quot;SCHEDULE_ETAGS&quot; nclob default null,
+    &quot;PRIVATE_COMMENTS&quot; integer default 0 not null,
+    &quot;MD5&quot; nchar(32),
+    &quot;CREATED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    &quot;MODIFIED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    &quot;DATAVERSION&quot; integer default 0 not null, 
+    unique (&quot;CALENDAR_RESOURCE_ID&quot;, &quot;RESOURCE_NAME&quot;)
+);
+
+create table CALENDAR_OBJ_ATTACHMENTS_MODE (
+    &quot;ID&quot; integer primary key,
+    &quot;DESCRIPTION&quot; nvarchar2(16) unique
+);
+
+insert into CALENDAR_OBJ_ATTACHMENTS_MODE (DESCRIPTION, ID) values ('none', 0);
+insert into CALENDAR_OBJ_ATTACHMENTS_MODE (DESCRIPTION, ID) values ('read', 1);
+insert into CALENDAR_OBJ_ATTACHMENTS_MODE (DESCRIPTION, ID) values ('write', 2);
+create table CALENDAR_ACCESS_TYPE (
+    &quot;ID&quot; integer primary key,
+    &quot;DESCRIPTION&quot; nvarchar2(32) unique
+);
+
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('', 0);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('public', 1);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('private', 2);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('confidential', 3);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('restricted', 4);
+create table TIME_RANGE (
+    &quot;INSTANCE_ID&quot; integer primary key,
+    &quot;CALENDAR_RESOURCE_ID&quot; integer not null references CALENDAR on delete cascade,
+    &quot;CALENDAR_OBJECT_RESOURCE_ID&quot; integer not null references CALENDAR_OBJECT on delete cascade,
+    &quot;FLOATING&quot; integer not null,
+    &quot;START_DATE&quot; timestamp not null,
+    &quot;END_DATE&quot; timestamp not null,
+    &quot;FBTYPE&quot; integer not null,
+    &quot;TRANSPARENT&quot; integer not null
+);
+
+create table FREE_BUSY_TYPE (
+    &quot;ID&quot; integer primary key,
+    &quot;DESCRIPTION&quot; nvarchar2(16) unique
+);
+
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('unknown', 0);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('free', 1);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy', 2);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy-unavailable', 3);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy-tentative', 4);
+create table PERUSER (
+    &quot;TIME_RANGE_INSTANCE_ID&quot; integer not null references TIME_RANGE on delete cascade,
+    &quot;USER_ID&quot; nvarchar2(255),
+    &quot;TRANSPARENT&quot; integer not null,
+    &quot;ADJUSTED_START_DATE&quot; timestamp default null,
+    &quot;ADJUSTED_END_DATE&quot; timestamp default null, 
+    primary key (&quot;TIME_RANGE_INSTANCE_ID&quot;, &quot;USER_ID&quot;)
+);
+
+create table ATTACHMENT (
+    &quot;ATTACHMENT_ID&quot; integer primary key,
+    &quot;CALENDAR_HOME_RESOURCE_ID&quot; integer not null references CALENDAR_HOME,
+    &quot;DROPBOX_ID&quot; nvarchar2(255),
+    &quot;CONTENT_TYPE&quot; nvarchar2(255),
+    &quot;SIZE&quot; integer not null,
+    &quot;MD5&quot; nchar(32),
+    &quot;CREATED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    &quot;MODIFIED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    &quot;PATH&quot; nvarchar2(1024)
+);
+
+create table ATTACHMENT_CALENDAR_OBJECT (
+    &quot;ATTACHMENT_ID&quot; integer not null references ATTACHMENT on delete cascade,
+    &quot;MANAGED_ID&quot; nvarchar2(255),
+    &quot;CALENDAR_OBJECT_RESOURCE_ID&quot; integer not null references CALENDAR_OBJECT on delete cascade, 
+    primary key (&quot;ATTACHMENT_ID&quot;, &quot;CALENDAR_OBJECT_RESOURCE_ID&quot;), 
+    unique (&quot;MANAGED_ID&quot;, &quot;CALENDAR_OBJECT_RESOURCE_ID&quot;)
+);
+
+create table RESOURCE_PROPERTY (
+    &quot;RESOURCE_ID&quot; integer not null,
+    &quot;NAME&quot; nvarchar2(255),
+    &quot;VALUE&quot; nclob,
+    &quot;VIEWER_UID&quot; nvarchar2(255), 
+    primary key (&quot;RESOURCE_ID&quot;, &quot;NAME&quot;, &quot;VIEWER_UID&quot;)
+);
+
+create table ADDRESSBOOK_HOME (
+    &quot;RESOURCE_ID&quot; integer primary key,
+    &quot;ADDRESSBOOK_PROPERTY_STORE_ID&quot; integer not null,
+    &quot;OWNER_UID&quot; nvarchar2(255) unique,
+    &quot;STATUS&quot; integer default 0 not null,
+    &quot;DATAVERSION&quot; integer default 0 not null
+);
+
+create table ADDRESSBOOK_HOME_METADATA (
+    &quot;RESOURCE_ID&quot; integer primary key references ADDRESSBOOK_HOME on delete cascade,
+    &quot;QUOTA_USED_BYTES&quot; integer default 0 not null,
+    &quot;CREATED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    &quot;MODIFIED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table SHARED_ADDRESSBOOK_BIND (
+    &quot;ADDRESSBOOK_HOME_RESOURCE_ID&quot; integer not null references ADDRESSBOOK_HOME,
+    &quot;OWNER_HOME_RESOURCE_ID&quot; integer not null references ADDRESSBOOK_HOME on delete cascade,
+    &quot;EXTERNAL_ID&quot; integer default null,
+    &quot;ADDRESSBOOK_RESOURCE_NAME&quot; nvarchar2(255),
+    &quot;BIND_MODE&quot; integer not null,
+    &quot;BIND_STATUS&quot; integer not null,
+    &quot;BIND_REVISION&quot; integer default 0 not null,
+    &quot;MESSAGE&quot; nclob, 
+    primary key (&quot;ADDRESSBOOK_HOME_RESOURCE_ID&quot;, &quot;OWNER_HOME_RESOURCE_ID&quot;), 
+    unique (&quot;ADDRESSBOOK_HOME_RESOURCE_ID&quot;, &quot;ADDRESSBOOK_RESOURCE_NAME&quot;)
+);
+
+create table ADDRESSBOOK_OBJECT (
+    &quot;RESOURCE_ID&quot; integer primary key,
+    &quot;ADDRESSBOOK_HOME_RESOURCE_ID&quot; integer not null references ADDRESSBOOK_HOME on delete cascade,
+    &quot;RESOURCE_NAME&quot; nvarchar2(255),
+    &quot;VCARD_TEXT&quot; nclob,
+    &quot;VCARD_UID&quot; nvarchar2(255),
+    &quot;KIND&quot; integer not null,
+    &quot;MD5&quot; nchar(32),
+    &quot;CREATED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    &quot;MODIFIED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    &quot;DATAVERSION&quot; integer default 0 not null, 
+    unique (&quot;ADDRESSBOOK_HOME_RESOURCE_ID&quot;, &quot;RESOURCE_NAME&quot;), 
+    unique (&quot;ADDRESSBOOK_HOME_RESOURCE_ID&quot;, &quot;VCARD_UID&quot;)
+);
+
+create table ADDRESSBOOK_OBJECT_KIND (
+    &quot;ID&quot; integer primary key,
+    &quot;DESCRIPTION&quot; nvarchar2(16) unique
+);
+
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('person', 0);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('group', 1);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('resource', 2);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('location', 3);
+create table ABO_MEMBERS (
+    &quot;GROUP_ID&quot; integer not null,
+    &quot;ADDRESSBOOK_ID&quot; integer not null references ADDRESSBOOK_HOME on delete cascade,
+    &quot;MEMBER_ID&quot; integer not null,
+    &quot;REVISION&quot; integer not null,
+    &quot;REMOVED&quot; integer default 0 not null,
+    &quot;MODIFIED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    primary key (&quot;GROUP_ID&quot;, &quot;MEMBER_ID&quot;, &quot;REVISION&quot;)
+);
+
+create table ABO_FOREIGN_MEMBERS (
+    &quot;GROUP_ID&quot; integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+    &quot;ADDRESSBOOK_ID&quot; integer not null references ADDRESSBOOK_HOME on delete cascade,
+    &quot;MEMBER_ADDRESS&quot; nvarchar2(255), 
+    primary key (&quot;GROUP_ID&quot;, &quot;MEMBER_ADDRESS&quot;)
+);
+
+create table SHARED_GROUP_BIND (
+    &quot;ADDRESSBOOK_HOME_RESOURCE_ID&quot; integer not null references ADDRESSBOOK_HOME,
+    &quot;GROUP_RESOURCE_ID&quot; integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+    &quot;EXTERNAL_ID&quot; integer default null,
+    &quot;GROUP_ADDRESSBOOK_NAME&quot; nvarchar2(255),
+    &quot;BIND_MODE&quot; integer not null,
+    &quot;BIND_STATUS&quot; integer not null,
+    &quot;BIND_REVISION&quot; integer default 0 not null,
+    &quot;MESSAGE&quot; nclob, 
+    primary key (&quot;ADDRESSBOOK_HOME_RESOURCE_ID&quot;, &quot;GROUP_RESOURCE_ID&quot;), 
+    unique (&quot;ADDRESSBOOK_HOME_RESOURCE_ID&quot;, &quot;GROUP_ADDRESSBOOK_NAME&quot;)
+);
+
+create table CALENDAR_OBJECT_REVISIONS (
+    &quot;CALENDAR_HOME_RESOURCE_ID&quot; integer not null references CALENDAR_HOME,
+    &quot;CALENDAR_RESOURCE_ID&quot; integer references CALENDAR,
+    &quot;CALENDAR_NAME&quot; nvarchar2(255) default null,
+    &quot;RESOURCE_NAME&quot; nvarchar2(255),
+    &quot;REVISION&quot; integer not null,
+    &quot;DELETED&quot; integer not null,
+    &quot;MODIFIED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    unique (&quot;CALENDAR_HOME_RESOURCE_ID&quot;, &quot;CALENDAR_RESOURCE_ID&quot;, &quot;CALENDAR_NAME&quot;, &quot;RESOURCE_NAME&quot;)
+);
+
+create table ADDRESSBOOK_OBJECT_REVISIONS (
+    &quot;ADDRESSBOOK_HOME_RESOURCE_ID&quot; integer not null references ADDRESSBOOK_HOME,
+    &quot;OWNER_HOME_RESOURCE_ID&quot; integer references ADDRESSBOOK_HOME,
+    &quot;ADDRESSBOOK_NAME&quot; nvarchar2(255) default null,
+    &quot;OBJECT_RESOURCE_ID&quot; integer default 0,
+    &quot;RESOURCE_NAME&quot; nvarchar2(255),
+    &quot;REVISION&quot; integer not null,
+    &quot;DELETED&quot; integer not null,
+    &quot;MODIFIED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    unique (&quot;ADDRESSBOOK_HOME_RESOURCE_ID&quot;, &quot;OWNER_HOME_RESOURCE_ID&quot;, &quot;ADDRESSBOOK_NAME&quot;, &quot;RESOURCE_NAME&quot;)
+);
+
+create table NOTIFICATION_OBJECT_REVISIONS (
+    &quot;NOTIFICATION_HOME_RESOURCE_ID&quot; integer not null references NOTIFICATION_HOME on delete cascade,
+    &quot;RESOURCE_NAME&quot; nvarchar2(255),
+    &quot;REVISION&quot; integer not null,
+    &quot;DELETED&quot; integer not null,
+    &quot;MODIFIED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    unique (&quot;NOTIFICATION_HOME_RESOURCE_ID&quot;, &quot;RESOURCE_NAME&quot;)
+);
+
+create table APN_SUBSCRIPTIONS (
+    &quot;TOKEN&quot; nvarchar2(255),
+    &quot;RESOURCE_KEY&quot; nvarchar2(255),
+    &quot;MODIFIED&quot; integer not null,
+    &quot;SUBSCRIBER_GUID&quot; nvarchar2(255),
+    &quot;USER_AGENT&quot; nvarchar2(255) default null,
+    &quot;IP_ADDR&quot; nvarchar2(255) default null, 
+    primary key (&quot;TOKEN&quot;, &quot;RESOURCE_KEY&quot;)
+);
+
+create table IMIP_TOKENS (
+    &quot;TOKEN&quot; nvarchar2(255),
+    &quot;ORGANIZER&quot; nvarchar2(255),
+    &quot;ATTENDEE&quot; nvarchar2(255),
+    &quot;ICALUID&quot; nvarchar2(255),
+    &quot;ACCESSED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC', 
+    primary key (&quot;ORGANIZER&quot;, &quot;ATTENDEE&quot;, &quot;ICALUID&quot;)
+);
+
+create table IMIP_INVITATION_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB,
+    &quot;FROM_ADDR&quot; nvarchar2(255),
+    &quot;TO_ADDR&quot; nvarchar2(255),
+    &quot;ICALENDAR_TEXT&quot; nclob
+);
+
+create table IMIP_POLLING_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB
+);
+
+create table IMIP_REPLY_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB,
+    &quot;ORGANIZER&quot; nvarchar2(255),
+    &quot;ATTENDEE&quot; nvarchar2(255),
+    &quot;ICALENDAR_TEXT&quot; nclob
+);
+
+create table PUSH_NOTIFICATION_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB,
+    &quot;PUSH_ID&quot; nvarchar2(255),
+    &quot;PUSH_PRIORITY&quot; integer not null
+);
+
+create table GROUP_CACHER_POLLING_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB
+);
+
+create table GROUP_REFRESH_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB,
+    &quot;GROUP_UID&quot; nvarchar2(255)
+);
+
+create table GROUP_DELEGATE_CHANGES_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB,
+    &quot;DELEGATOR_UID&quot; nvarchar2(255),
+    &quot;READ_DELEGATE_UID&quot; nvarchar2(255),
+    &quot;WRITE_DELEGATE_UID&quot; nvarchar2(255)
+);
+
+create table GROUPS (
+    &quot;GROUP_ID&quot; integer primary key,
+    &quot;NAME&quot; nvarchar2(255),
+    &quot;GROUP_UID&quot; nvarchar2(255) unique,
+    &quot;MEMBERSHIP_HASH&quot; nvarchar2(255),
+    &quot;EXTANT&quot; integer default 1,
+    &quot;CREATED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+    &quot;MODIFIED&quot; timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table GROUP_MEMBERSHIP (
+    &quot;GROUP_ID&quot; integer not null references GROUPS on delete cascade,
+    &quot;MEMBER_UID&quot; nvarchar2(255), 
+    primary key (&quot;GROUP_ID&quot;, &quot;MEMBER_UID&quot;)
+);
+
+create table GROUP_ATTENDEE_RECONCILE_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB,
+    &quot;RESOURCE_ID&quot; integer not null references CALENDAR_OBJECT on delete cascade,
+    &quot;GROUP_ID&quot; integer not null references GROUPS on delete cascade
+);
+
+create table GROUP_ATTENDEE (
+    &quot;GROUP_ID&quot; integer not null references GROUPS on delete cascade,
+    &quot;RESOURCE_ID&quot; integer not null references CALENDAR_OBJECT on delete cascade,
+    &quot;MEMBERSHIP_HASH&quot; nvarchar2(255), 
+    primary key (&quot;GROUP_ID&quot;, &quot;RESOURCE_ID&quot;)
+);
+
+create table GROUP_SHAREE_RECONCILE_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB,
+    &quot;CALENDAR_ID&quot; integer not null references CALENDAR on delete cascade,
+    &quot;GROUP_ID&quot; integer not null references GROUPS on delete cascade
+);
+
+create table GROUP_SHAREE (
+    &quot;GROUP_ID&quot; integer not null references GROUPS on delete cascade,
+    &quot;CALENDAR_ID&quot; integer not null references CALENDAR on delete cascade,
+    &quot;GROUP_BIND_MODE&quot; integer not null,
+    &quot;MEMBERSHIP_HASH&quot; nvarchar2(255), 
+    primary key (&quot;GROUP_ID&quot;, &quot;CALENDAR_ID&quot;)
+);
+
+create table DELEGATES (
+    &quot;DELEGATOR&quot; nvarchar2(255),
+    &quot;DELEGATE&quot; nvarchar2(255),
+    &quot;READ_WRITE&quot; integer not null, 
+    primary key (&quot;DELEGATOR&quot;, &quot;READ_WRITE&quot;, &quot;DELEGATE&quot;)
+);
+
+create table DELEGATE_GROUPS (
+    &quot;DELEGATOR&quot; nvarchar2(255),
+    &quot;GROUP_ID&quot; integer not null references GROUPS on delete cascade,
+    &quot;READ_WRITE&quot; integer not null,
+    &quot;IS_EXTERNAL&quot; integer not null, 
+    primary key (&quot;DELEGATOR&quot;, &quot;READ_WRITE&quot;, &quot;GROUP_ID&quot;)
+);
+
+create table EXTERNAL_DELEGATE_GROUPS (
+    &quot;DELEGATOR&quot; nvarchar2(255) primary key,
+    &quot;GROUP_UID_READ&quot; nvarchar2(255),
+    &quot;GROUP_UID_WRITE&quot; nvarchar2(255)
+);
+
+create table CALENDAR_OBJECT_SPLITTER_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB,
+    &quot;RESOURCE_ID&quot; integer not null references CALENDAR_OBJECT on delete cascade
+);
+
+create table CALENDAR_OBJECT_UPGRADE_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB,
+    &quot;RESOURCE_ID&quot; integer not null references CALENDAR_OBJECT on delete cascade
+);
+
+create table FIND_MIN_VALID_REVISION_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB
+);
+
+create table REVISION_CLEANUP_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB
+);
+
+create table INBOX_CLEANUP_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB
+);
+
+create table CLEANUP_ONE_INBOX_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB,
+    &quot;HOME_ID&quot; integer not null unique references CALENDAR_HOME on delete cascade
+);
+
+create table SCHEDULE_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB,
+    &quot;ICALENDAR_UID&quot; nvarchar2(255),
+    &quot;WORK_TYPE&quot; nvarchar2(255)
+);
+
+create table SCHEDULE_REFRESH_WORK (
+    &quot;WORK_ID&quot; integer primary key references SCHEDULE_WORK on delete cascade,
+    &quot;HOME_RESOURCE_ID&quot; integer not null references CALENDAR_HOME on delete cascade,
+    &quot;RESOURCE_ID&quot; integer not null references CALENDAR_OBJECT on delete cascade,
+    &quot;ATTENDEE_COUNT&quot; integer
+);
+
+create table SCHEDULE_REFRESH_ATTENDEES (
+    &quot;RESOURCE_ID&quot; integer not null references CALENDAR_OBJECT on delete cascade,
+    &quot;ATTENDEE&quot; nvarchar2(255), 
+    primary key (&quot;RESOURCE_ID&quot;, &quot;ATTENDEE&quot;)
+);
+
+create table SCHEDULE_AUTO_REPLY_WORK (
+    &quot;WORK_ID&quot; integer primary key references SCHEDULE_WORK on delete cascade,
+    &quot;HOME_RESOURCE_ID&quot; integer not null references CALENDAR_HOME on delete cascade,
+    &quot;RESOURCE_ID&quot; integer not null references CALENDAR_OBJECT on delete cascade,
+    &quot;PARTSTAT&quot; nvarchar2(255)
+);
+
+create table SCHEDULE_ORGANIZER_WORK (
+    &quot;WORK_ID&quot; integer primary key references SCHEDULE_WORK on delete cascade,
+    &quot;SCHEDULE_ACTION&quot; integer not null,
+    &quot;HOME_RESOURCE_ID&quot; integer not null references CALENDAR_HOME on delete cascade,
+    &quot;RESOURCE_ID&quot; integer,
+    &quot;ICALENDAR_TEXT_OLD&quot; nclob,
+    &quot;ICALENDAR_TEXT_NEW&quot; nclob,
+    &quot;ATTENDEE_COUNT&quot; integer,
+    &quot;SMART_MERGE&quot; integer
+);
+
+create table SCHEDULE_ACTION (
+    &quot;ID&quot; integer primary key,
+    &quot;DESCRIPTION&quot; nvarchar2(16) unique
+);
+
+insert into SCHEDULE_ACTION (DESCRIPTION, ID) values ('create', 0);
+insert into SCHEDULE_ACTION (DESCRIPTION, ID) values ('modify', 1);
+insert into SCHEDULE_ACTION (DESCRIPTION, ID) values ('modify-cancelled', 2);
+insert into SCHEDULE_ACTION (DESCRIPTION, ID) values ('remove', 3);
+create table SCHEDULE_ORGANIZER_SEND_WORK (
+    &quot;WORK_ID&quot; integer primary key references SCHEDULE_WORK on delete cascade,
+    &quot;SCHEDULE_ACTION&quot; integer not null,
+    &quot;HOME_RESOURCE_ID&quot; integer not null references CALENDAR_HOME on delete cascade,
+    &quot;RESOURCE_ID&quot; integer,
+    &quot;ATTENDEE&quot; nvarchar2(255),
+    &quot;ITIP_MSG&quot; nclob,
+    &quot;NO_REFRESH&quot; integer
+);
+
+create table SCHEDULE_REPLY_WORK (
+    &quot;WORK_ID&quot; integer primary key references SCHEDULE_WORK on delete cascade,
+    &quot;HOME_RESOURCE_ID&quot; integer not null references CALENDAR_HOME on delete cascade,
+    &quot;RESOURCE_ID&quot; integer,
+    &quot;ITIP_MSG&quot; nclob
+);
+
+create table PRINCIPAL_PURGE_POLLING_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB
+);
+
+create table PRINCIPAL_PURGE_CHECK_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB,
+    &quot;UID&quot; nvarchar2(255)
+);
+
+create table PRINCIPAL_PURGE_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB,
+    &quot;UID&quot; nvarchar2(255)
+);
+
+create table PRINCIPAL_PURGE_HOME_WORK (
+    &quot;WORK_ID&quot; integer primary key,
+    &quot;JOB_ID&quot; integer not null references JOB,
+    &quot;HOME_RESOURCE_ID&quot; integer not null references CALENDAR_HOME on delete cascade
+);
+
+create table CALENDARSERVER (
+    &quot;NAME&quot; nvarchar2(255) primary key,
+    &quot;VALUE&quot; nvarchar2(255)
+);
+
+insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '51');
+insert into CALENDARSERVER (NAME, VALUE) values ('CALENDAR-DATAVERSION', '6');
+insert into CALENDARSERVER (NAME, VALUE) values ('ADDRESSBOOK-DATAVERSION', '2');
+insert into CALENDARSERVER (NAME, VALUE) values ('NOTIFICATION-DATAVERSION', '1');
+insert into CALENDARSERVER (NAME, VALUE) values ('MIN-VALID-REVISION', '1');
+create index CALENDAR_HOME_METADAT_3cb9049e on CALENDAR_HOME_METADATA (
+    DEFAULT_EVENTS
+);
+
+create index CALENDAR_HOME_METADAT_d55e5548 on CALENDAR_HOME_METADATA (
+    DEFAULT_TASKS
+);
+
+create index CALENDAR_HOME_METADAT_910264ce on CALENDAR_HOME_METADATA (
+    DEFAULT_POLLS
+);
+
+create index NOTIFICATION_NOTIFICA_f891f5f9 on NOTIFICATION (
+    NOTIFICATION_HOME_RESOURCE_ID
+);
+
+create index CALENDAR_BIND_RESOURC_e57964d4 on CALENDAR_BIND (
+    CALENDAR_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_CALEN_a9a453a9 on CALENDAR_OBJECT (
+    CALENDAR_RESOURCE_ID,
+    ICALENDAR_UID
+);
+
+create index CALENDAR_OBJECT_CALEN_c4dc619c on CALENDAR_OBJECT (
+    CALENDAR_RESOURCE_ID,
+    RECURRANCE_MAX,
+    RECURRANCE_MIN
+);
+
+create index CALENDAR_OBJECT_ICALE_82e731d5 on CALENDAR_OBJECT (
+    ICALENDAR_UID
+);
+
+create index CALENDAR_OBJECT_DROPB_de041d80 on CALENDAR_OBJECT (
+    DROPBOX_ID
+);
+
+create index TIME_RANGE_CALENDAR_R_beb6e7eb on TIME_RANGE (
+    CALENDAR_RESOURCE_ID
+);
+
+create index TIME_RANGE_CALENDAR_O_acf37bd1 on TIME_RANGE (
+    CALENDAR_OBJECT_RESOURCE_ID
+);
+
+create index ATTACHMENT_CALENDAR_H_0078845c on ATTACHMENT (
+    CALENDAR_HOME_RESOURCE_ID
+);
+
+create index ATTACHMENT_DROPBOX_ID_5073cf23 on ATTACHMENT (
+    DROPBOX_ID
+);
+
+create index ATTACHMENT_CALENDAR_O_81508484 on ATTACHMENT_CALENDAR_OBJECT (
+    CALENDAR_OBJECT_RESOURCE_ID
+);
+
+create index SHARED_ADDRESSBOOK_BI_e9a2e6d4 on SHARED_ADDRESSBOOK_BIND (
+    OWNER_HOME_RESOURCE_ID
+);
+
+create index ABO_MEMBERS_ADDRESSBO_4effa879 on ABO_MEMBERS (
+    ADDRESSBOOK_ID
+);
+
+create index ABO_MEMBERS_MEMBER_ID_8d66adcf on ABO_MEMBERS (
+    MEMBER_ID
+);
+
+create index ABO_FOREIGN_MEMBERS_A_1fd2c5e9 on ABO_FOREIGN_MEMBERS (
+    ADDRESSBOOK_ID
+);
+
+create index SHARED_GROUP_BIND_RES_cf52f95d on SHARED_GROUP_BIND (
+    GROUP_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_REVIS_6d9d929c on CALENDAR_OBJECT_REVISIONS (
+    CALENDAR_RESOURCE_ID,
+    RESOURCE_NAME,
+    DELETED,
+    REVISION
+);
+
+create index CALENDAR_OBJECT_REVIS_265c8acf on CALENDAR_OBJECT_REVISIONS (
+    CALENDAR_RESOURCE_ID,
+    REVISION
+);
+
+create index CALENDAR_OBJECT_REVIS_550b1c56 on CALENDAR_OBJECT_REVISIONS (
+    CALENDAR_HOME_RESOURCE_ID,
+    REVISION
+);
+
+create index ADDRESSBOOK_OBJECT_RE_00fe8288 on ADDRESSBOOK_OBJECT_REVISIONS (
+    OWNER_HOME_RESOURCE_ID,
+    RESOURCE_NAME,
+    DELETED,
+    REVISION
+);
+
+create index ADDRESSBOOK_OBJECT_RE_45004780 on ADDRESSBOOK_OBJECT_REVISIONS (
+    OWNER_HOME_RESOURCE_ID,
+    REVISION
+);
+
+create index NOTIFICATION_OBJECT_R_036a9cee on NOTIFICATION_OBJECT_REVISIONS (
+    NOTIFICATION_HOME_RESOURCE_ID,
+    REVISION
+);
+
+create index APN_SUBSCRIPTIONS_RES_9610d78e on APN_SUBSCRIPTIONS (
+    RESOURCE_KEY
+);
+
+create index IMIP_TOKENS_TOKEN_e94b918f on IMIP_TOKENS (
+    TOKEN
+);
+
+create index IMIP_INVITATION_WORK__586d064c on IMIP_INVITATION_WORK (
+    JOB_ID
+);
+
+create index IMIP_POLLING_WORK_JOB_d5535891 on IMIP_POLLING_WORK (
+    JOB_ID
+);
+
+create index IMIP_REPLY_WORK_JOB_I_bf4ae73e on IMIP_REPLY_WORK (
+    JOB_ID
+);
+
+create index PUSH_NOTIFICATION_WOR_8bbab117 on PUSH_NOTIFICATION_WORK (
+    JOB_ID
+);
+
+create index PUSH_NOTIFICATION_WOR_3a3ee588 on PUSH_NOTIFICATION_WORK (
+    PUSH_ID
+);
+
+create index GROUP_CACHER_POLLING__6eb3151c on GROUP_CACHER_POLLING_WORK (
+    JOB_ID
+);
+
+create index GROUP_REFRESH_WORK_JO_717ede20 on GROUP_REFRESH_WORK (
+    JOB_ID
+);
+
+create index GROUP_REFRESH_WORK_GR_0325f3a8 on GROUP_REFRESH_WORK (
+    GROUP_UID
+);
+
+create index GROUP_DELEGATE_CHANGE_8bf9e6d8 on GROUP_DELEGATE_CHANGES_WORK (
+    JOB_ID
+);
+
+create index GROUP_DELEGATE_CHANGE_d8f7af69 on GROUP_DELEGATE_CHANGES_WORK (
+    DELEGATOR_UID
+);
+
+create index GROUPS_GROUP_UID_b35cce23 on GROUPS (
+    GROUP_UID
+);
+
+create index GROUP_MEMBERSHIP_MEMB_0ca508e8 on GROUP_MEMBERSHIP (
+    MEMBER_UID
+);
+
+create index GROUP_ATTENDEE_RECONC_da73d3c2 on GROUP_ATTENDEE_RECONCILE_WORK (
+    JOB_ID
+);
+
+create index GROUP_ATTENDEE_RECONC_b894ee7a on GROUP_ATTENDEE_RECONCILE_WORK (
+    RESOURCE_ID
+);
+
+create index GROUP_ATTENDEE_RECONC_5eabc549 on GROUP_ATTENDEE_RECONCILE_WORK (
+    GROUP_ID
+);
+
+create index GROUP_ATTENDEE_RESOUR_855124dc on GROUP_ATTENDEE (
+    RESOURCE_ID
+);
+
+create index GROUP_SHAREE_RECONCIL_9aad0858 on GROUP_SHAREE_RECONCILE_WORK (
+    JOB_ID
+);
+
+create index GROUP_SHAREE_RECONCIL_4dc60f78 on GROUP_SHAREE_RECONCILE_WORK (
+    CALENDAR_ID
+);
+
+create index GROUP_SHAREE_RECONCIL_1d14c921 on GROUP_SHAREE_RECONCILE_WORK (
+    GROUP_ID
+);
+
+create index GROUP_SHAREE_CALENDAR_28a88850 on GROUP_SHAREE (
+    CALENDAR_ID
+);
+
+create index DELEGATE_TO_DELEGATOR_5e149b11 on DELEGATES (
+    DELEGATE,
+    READ_WRITE,
+    DELEGATOR
+);
+
+create index DELEGATE_GROUPS_GROUP_25117446 on DELEGATE_GROUPS (
+    GROUP_ID
+);
+
+create index CALENDAR_OBJECT_SPLIT_af71dcda on CALENDAR_OBJECT_SPLITTER_WORK (
+    RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_SPLIT_33603b72 on CALENDAR_OBJECT_SPLITTER_WORK (
+    JOB_ID
+);
+
+create index CALENDAR_OBJECT_UPGRA_a5c181eb on CALENDAR_OBJECT_UPGRADE_WORK (
+    RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_UPGRA_39d6f8f9 on CALENDAR_OBJECT_UPGRADE_WORK (
+    JOB_ID
+);
+
+create index FIND_MIN_VALID_REVISI_78d17400 on FIND_MIN_VALID_REVISION_WORK (
+    JOB_ID
+);
+
+create index REVISION_CLEANUP_WORK_eb062686 on REVISION_CLEANUP_WORK (
+    JOB_ID
+);
+
+create index INBOX_CLEANUP_WORK_JO_799132bd on INBOX_CLEANUP_WORK (
+    JOB_ID
+);
+
+create index CLEANUP_ONE_INBOX_WOR_375dac36 on CLEANUP_ONE_INBOX_WORK (
+    JOB_ID
+);
+
+create index SCHEDULE_WORK_JOB_ID_65e810ee on SCHEDULE_WORK (
+    JOB_ID
+);
+
+create index SCHEDULE_WORK_ICALEND_089f33dc on SCHEDULE_WORK (
+    ICALENDAR_UID
+);
+
+create index SCHEDULE_REFRESH_WORK_26084c7b on SCHEDULE_REFRESH_WORK (
+    HOME_RESOURCE_ID
+);
+
+create index SCHEDULE_REFRESH_WORK_989efe54 on SCHEDULE_REFRESH_WORK (
+    RESOURCE_ID
+);
+
+create index SCHEDULE_REFRESH_ATTE_83053b91 on SCHEDULE_REFRESH_ATTENDEES (
+    RESOURCE_ID,
+    ATTENDEE
+);
+
+create index SCHEDULE_AUTO_REPLY_W_0256478d on SCHEDULE_AUTO_REPLY_WORK (
+    HOME_RESOURCE_ID
+);
+
+create index SCHEDULE_AUTO_REPLY_W_0755e754 on SCHEDULE_AUTO_REPLY_WORK (
+    RESOURCE_ID
+);
+
+create index SCHEDULE_ORGANIZER_WO_18ce4edd on SCHEDULE_ORGANIZER_WORK (
+    HOME_RESOURCE_ID
+);
+
+create index SCHEDULE_ORGANIZER_WO_14702035 on SCHEDULE_ORGANIZER_WORK (
+    RESOURCE_ID
+);
+
+create index SCHEDULE_ORGANIZER_SE_9ec9f827 on SCHEDULE_ORGANIZER_SEND_WORK (
+    HOME_RESOURCE_ID
+);
+
+create index SCHEDULE_ORGANIZER_SE_699fefc4 on SCHEDULE_ORGANIZER_SEND_WORK (
+    RESOURCE_ID
+);
+
+create index SCHEDULE_REPLY_WORK_H_745af8cf on SCHEDULE_REPLY_WORK (
+    HOME_RESOURCE_ID
+);
+
+create index SCHEDULE_REPLY_WORK_R_11bd3fbb on SCHEDULE_REPLY_WORK (
+    RESOURCE_ID
+);
+
+create index PRINCIPAL_PURGE_POLLI_6383e68a on PRINCIPAL_PURGE_POLLING_WORK (
+    JOB_ID
+);
+
+create index PRINCIPAL_PURGE_CHECK_b0c024c1 on PRINCIPAL_PURGE_CHECK_WORK (
+    JOB_ID
+);
+
+create index PRINCIPAL_PURGE_CHECK_198388a5 on PRINCIPAL_PURGE_CHECK_WORK (
+    UID
+);
+
+create index PRINCIPAL_PURGE_WORK__7a8141a3 on PRINCIPAL_PURGE_WORK (
+    JOB_ID
+);
+
+create index PRINCIPAL_PURGE_WORK__db35cfdc on PRINCIPAL_PURGE_WORK (
+    UID
+);
+
+create index PRINCIPAL_PURGE_HOME__f35eea7a on PRINCIPAL_PURGE_HOME_WORK (
+    JOB_ID
+);
+
+create index PRINCIPAL_PURGE_HOME__967e4480 on PRINCIPAL_PURGE_HOME_WORK (
+    HOME_RESOURCE_ID
+);
+
+-- Extra schema to add to current-oracle-dialect.sql
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_schemaoldpostgresdialectv51sqlfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_schemaoldpostgresdialectv51sql"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_schema/old/postgres-dialect/v51.sql (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_schema/old/postgres-dialect/v51.sql) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_schema/old/postgres-dialect/v51.sql                                (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/old/postgres-dialect/v51.sql        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,1157 @@
</span><ins>+-- -*- test-case-name: txdav.caldav.datastore.test.test_sql,txdav.carddav.datastore.test.test_sql -*-
+
+----
+-- Copyright (c) 2010-2015 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+
+-----------------
+-- Resource ID --
+-----------------
+
+create sequence RESOURCE_ID_SEQ;
+
+
+-------------------------
+-- Cluster Bookkeeping --
+-------------------------
+
+-- Information about a process connected to this database.
+
+-- Note that this must match the node info schema in twext.enterprise.queue.
+create table NODE_INFO (
+  HOSTNAME  varchar(255) not null,
+  PID       integer      not null,
+  PORT      integer      not null,
+  TIME      timestamp    not null default timezone('UTC', CURRENT_TIMESTAMP),
+
+  primary key (HOSTNAME, PORT)
+);
+
+-- Unique named locks.  This table should always be empty, but rows are
+-- temporarily created in order to prevent undesirable concurrency.
+create table NAMED_LOCK (
+    LOCK_NAME varchar(255) primary key
+);
+
+
+--------------------
+-- Jobs           --
+--------------------
+
+create sequence JOB_SEQ;
+
+create table JOB (
+  JOB_ID      integer primary key default nextval('JOB_SEQ'), --implicit index
+  WORK_TYPE   varchar(255) not null,
+  PRIORITY    integer default 0,
+  WEIGHT      integer default 0,
+  NOT_BEFORE  timestamp not null,
+  ASSIGNED    timestamp default null,
+  OVERDUE     timestamp default null,
+  FAILED      integer default 0
+);
+
+-------------------
+-- Calendar Home --
+-------------------
+
+create table CALENDAR_HOME (
+  RESOURCE_ID      integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  OWNER_UID        varchar(255) not null unique,                                -- implicit index
+  STATUS           integer      default 0 not null,                             -- enum HOME_STATUS
+  DATAVERSION      integer      default 0 not null
+);
+
+-- Enumeration of statuses
+
+create table HOME_STATUS (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into HOME_STATUS values (0, 'normal' );
+insert into HOME_STATUS values (1, 'external');
+insert into HOME_STATUS values (2, 'purging');
+
+
+--------------
+-- Calendar --
+--------------
+
+create table CALENDAR (
+  RESOURCE_ID integer   primary key default nextval('RESOURCE_ID_SEQ') -- implicit index
+);
+
+
+----------------------------
+-- Calendar Home Metadata --
+----------------------------
+
+create table CALENDAR_HOME_METADATA (
+  RESOURCE_ID              integer     primary key references CALENDAR_HOME on delete cascade, -- implicit index
+  QUOTA_USED_BYTES         integer     default 0 not null,
+  DEFAULT_EVENTS           integer     default null references CALENDAR on delete set null,
+  DEFAULT_TASKS            integer     default null references CALENDAR on delete set null,
+  DEFAULT_POLLS            integer     default null references CALENDAR on delete set null,
+  ALARM_VEVENT_TIMED       text        default null,
+  ALARM_VEVENT_ALLDAY      text        default null,
+  ALARM_VTODO_TIMED        text        default null,
+  ALARM_VTODO_ALLDAY       text        default null,
+  AVAILABILITY             text        default null,
+  CREATED                  timestamp   default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                 timestamp   default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+create index CALENDAR_HOME_METADATA_DEFAULT_EVENTS on
+  CALENDAR_HOME_METADATA(DEFAULT_EVENTS);
+create index CALENDAR_HOME_METADATA_DEFAULT_TASKS on
+  CALENDAR_HOME_METADATA(DEFAULT_TASKS);
+create index CALENDAR_HOME_METADATA_DEFAULT_POLLS on
+  CALENDAR_HOME_METADATA(DEFAULT_POLLS);
+
+
+-----------------------
+-- Calendar Metadata --
+-----------------------
+
+create table CALENDAR_METADATA (
+  RESOURCE_ID           integer      primary key references CALENDAR on delete cascade, -- implicit index
+  SUPPORTED_COMPONENTS  varchar(255) default null,
+  CREATED               timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED              timestamp    default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+---------------------------
+-- Sharing Notifications --
+---------------------------
+
+create table NOTIFICATION_HOME (
+  RESOURCE_ID integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  OWNER_UID   varchar(255) not null unique,                                -- implicit index
+  STATUS      integer      default 0 not null,                             -- enum HOME_STATUS
+  DATAVERSION integer      default 0 not null
+);
+
+create table NOTIFICATION (
+  RESOURCE_ID                   integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  NOTIFICATION_HOME_RESOURCE_ID integer      not null references NOTIFICATION_HOME,
+  NOTIFICATION_UID              varchar(255) not null,
+  NOTIFICATION_TYPE             varchar(255) not null,
+  NOTIFICATION_DATA             text         not null,
+  MD5                           char(32)     not null,
+  CREATED                       timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                      timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+
+  unique (NOTIFICATION_UID, NOTIFICATION_HOME_RESOURCE_ID) -- implicit index
+);
+
+create index NOTIFICATION_NOTIFICATION_HOME_RESOURCE_ID on
+  NOTIFICATION(NOTIFICATION_HOME_RESOURCE_ID);
+
+
+-------------------
+-- Calendar Bind --
+-------------------
+
+-- Joins CALENDAR_HOME and CALENDAR
+
+create table CALENDAR_BIND (
+  CALENDAR_HOME_RESOURCE_ID integer      not null references CALENDAR_HOME,
+  CALENDAR_RESOURCE_ID      integer      not null references CALENDAR on delete cascade,
+  EXTERNAL_ID               integer      default null,
+  CALENDAR_RESOURCE_NAME    varchar(255) not null,
+  BIND_MODE                 integer      not null, -- enum CALENDAR_BIND_MODE
+  BIND_STATUS               integer      not null, -- enum CALENDAR_BIND_STATUS
+  BIND_REVISION             integer      default 0 not null,
+  MESSAGE                   text,
+  TRANSP                    integer      default 0 not null, -- enum CALENDAR_TRANSP
+  ALARM_VEVENT_TIMED        text         default null,
+  ALARM_VEVENT_ALLDAY       text         default null,
+  ALARM_VTODO_TIMED         text         default null,
+  ALARM_VTODO_ALLDAY        text         default null,
+  TIMEZONE                  text         default null,
+
+  primary key (CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_ID), -- implicit index
+  unique (CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_NAME)     -- implicit index
+);
+
+create index CALENDAR_BIND_RESOURCE_ID on
+  CALENDAR_BIND(CALENDAR_RESOURCE_ID);
+
+-- Enumeration of calendar bind modes
+
+create table CALENDAR_BIND_MODE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_BIND_MODE values (0, 'own'  );
+insert into CALENDAR_BIND_MODE values (1, 'read' );
+insert into CALENDAR_BIND_MODE values (2, 'write');
+insert into CALENDAR_BIND_MODE values (3, 'direct');
+insert into CALENDAR_BIND_MODE values (4, 'indirect');
+insert into CALENDAR_BIND_MODE values (5, 'group');
+insert into CALENDAR_BIND_MODE values (6, 'group_read');
+insert into CALENDAR_BIND_MODE values (7, 'group_write');
+
+-- Enumeration of statuses
+
+create table CALENDAR_BIND_STATUS (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_BIND_STATUS values (0, 'invited' );
+insert into CALENDAR_BIND_STATUS values (1, 'accepted');
+insert into CALENDAR_BIND_STATUS values (2, 'declined');
+insert into CALENDAR_BIND_STATUS values (3, 'invalid');
+insert into CALENDAR_BIND_STATUS values (4, 'deleted');
+
+
+-- Enumeration of transparency
+
+create table CALENDAR_TRANSP (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_TRANSP values (0, 'opaque' );
+insert into CALENDAR_TRANSP values (1, 'transparent');
+
+
+---------------------
+-- Calendar Object --
+---------------------
+
+create table CALENDAR_OBJECT (
+  RESOURCE_ID          integer      primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  CALENDAR_RESOURCE_ID integer      not null references CALENDAR on delete cascade,
+  RESOURCE_NAME        varchar(255) not null,
+  ICALENDAR_TEXT       text         not null,
+  ICALENDAR_UID        varchar(255) not null,
+  ICALENDAR_TYPE       varchar(255) not null,
+  ATTACHMENTS_MODE     integer      default 0 not null, -- enum CALENDAR_OBJ_ATTACHMENTS_MODE
+  DROPBOX_ID           varchar(255),
+  ORGANIZER            varchar(255),
+  RECURRANCE_MIN       date,        -- minimum date that recurrences have been expanded to.
+  RECURRANCE_MAX       date,        -- maximum date that recurrences have been expanded to.
+  ACCESS               integer      default 0 not null,
+  SCHEDULE_OBJECT      boolean      default false,
+  SCHEDULE_TAG         varchar(36)  default null,
+  SCHEDULE_ETAGS       text         default null,
+  PRIVATE_COMMENTS     boolean      default false not null,
+  MD5                  char(32)     not null,
+  CREATED              timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED             timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  DATAVERSION          integer      default 0 not null,
+
+  unique (CALENDAR_RESOURCE_ID, RESOURCE_NAME) -- implicit index
+
+  -- since the 'inbox' is a 'calendar resource' for the purpose of storing
+  -- calendar objects, this constraint has to be selectively enforced by the
+  -- application layer.
+
+  -- unique (CALENDAR_RESOURCE_ID, ICALENDAR_UID)
+);
+
+create index CALENDAR_OBJECT_CALENDAR_RESOURCE_ID_AND_ICALENDAR_UID on
+  CALENDAR_OBJECT(CALENDAR_RESOURCE_ID, ICALENDAR_UID);
+
+create index CALENDAR_OBJECT_CALENDAR_RESOURCE_ID_RECURRANCE_MAX_MIN on
+  CALENDAR_OBJECT(CALENDAR_RESOURCE_ID, RECURRANCE_MAX, RECURRANCE_MIN);
+
+create index CALENDAR_OBJECT_ICALENDAR_UID on
+  CALENDAR_OBJECT(ICALENDAR_UID);
+
+create index CALENDAR_OBJECT_DROPBOX_ID on
+  CALENDAR_OBJECT(DROPBOX_ID);
+
+-- Enumeration of attachment modes
+
+create table CALENDAR_OBJ_ATTACHMENTS_MODE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_OBJ_ATTACHMENTS_MODE values (0, 'none' );
+insert into CALENDAR_OBJ_ATTACHMENTS_MODE values (1, 'read' );
+insert into CALENDAR_OBJ_ATTACHMENTS_MODE values (2, 'write');
+
+
+-- Enumeration of calendar access types
+
+create table CALENDAR_ACCESS_TYPE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(32) not null unique
+);
+
+insert into CALENDAR_ACCESS_TYPE values (0, ''             );
+insert into CALENDAR_ACCESS_TYPE values (1, 'public'       );
+insert into CALENDAR_ACCESS_TYPE values (2, 'private'      );
+insert into CALENDAR_ACCESS_TYPE values (3, 'confidential' );
+insert into CALENDAR_ACCESS_TYPE values (4, 'restricted'   );
+
+
+-----------------
+-- Instance ID --
+-----------------
+
+create sequence INSTANCE_ID_SEQ;
+
+
+----------------
+-- Time Range --
+----------------
+
+create table TIME_RANGE (
+  INSTANCE_ID                 integer        primary key default nextval('INSTANCE_ID_SEQ'), -- implicit index
+  CALENDAR_RESOURCE_ID        integer        not null references CALENDAR on delete cascade,
+  CALENDAR_OBJECT_RESOURCE_ID integer        not null references CALENDAR_OBJECT on delete cascade,
+  FLOATING                    boolean        not null,
+  START_DATE                  timestamp      not null,
+  END_DATE                    timestamp      not null,
+  FBTYPE                      integer        not null,
+  TRANSPARENT                 boolean        not null
+);
+
+create index TIME_RANGE_CALENDAR_RESOURCE_ID on
+  TIME_RANGE(CALENDAR_RESOURCE_ID);
+create index TIME_RANGE_CALENDAR_OBJECT_RESOURCE_ID on
+  TIME_RANGE(CALENDAR_OBJECT_RESOURCE_ID);
+
+
+-- Enumeration of free/busy types
+
+create table FREE_BUSY_TYPE (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into FREE_BUSY_TYPE values (0, 'unknown'         );
+insert into FREE_BUSY_TYPE values (1, 'free'            );
+insert into FREE_BUSY_TYPE values (2, 'busy'            );
+insert into FREE_BUSY_TYPE values (3, 'busy-unavailable');
+insert into FREE_BUSY_TYPE values (4, 'busy-tentative'  );
+
+
+-------------------
+-- Per-user data --
+-------------------
+
+create table PERUSER (
+  TIME_RANGE_INSTANCE_ID      integer      not null references TIME_RANGE on delete cascade,
+  USER_ID                     varchar(255) not null,
+  TRANSPARENT                 boolean      not null,
+  ADJUSTED_START_DATE         timestamp    default null,
+  ADJUSTED_END_DATE           timestamp    default null,
+  
+  primary key (TIME_RANGE_INSTANCE_ID, USER_ID)    -- implicit index
+);
+
+
+----------------
+-- Attachment --
+----------------
+
+create sequence ATTACHMENT_ID_SEQ;
+
+create table ATTACHMENT (
+  ATTACHMENT_ID               integer           primary key default nextval('ATTACHMENT_ID_SEQ'), -- implicit index
+  CALENDAR_HOME_RESOURCE_ID   integer           not null references CALENDAR_HOME,
+  DROPBOX_ID                  varchar(255),
+  CONTENT_TYPE                varchar(255)      not null,
+  SIZE                        integer           not null,
+  MD5                         char(32)          not null,
+  CREATED                     timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                    timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+  PATH                        varchar(1024)     not null
+);
+
+create index ATTACHMENT_CALENDAR_HOME_RESOURCE_ID on
+  ATTACHMENT(CALENDAR_HOME_RESOURCE_ID);
+
+create index ATTACHMENT_DROPBOX_ID on
+  ATTACHMENT(DROPBOX_ID);
+
+-- Many-to-many relationship between attachments and calendar objects
+create table ATTACHMENT_CALENDAR_OBJECT (
+  ATTACHMENT_ID                  integer      not null references ATTACHMENT on delete cascade,
+  MANAGED_ID                     varchar(255) not null,
+  CALENDAR_OBJECT_RESOURCE_ID    integer      not null references CALENDAR_OBJECT on delete cascade,
+
+  primary key (ATTACHMENT_ID, CALENDAR_OBJECT_RESOURCE_ID), -- implicit index
+  unique (MANAGED_ID, CALENDAR_OBJECT_RESOURCE_ID) --implicit index
+);
+
+create index ATTACHMENT_CALENDAR_OBJECT_CALENDAR_OBJECT_RESOURCE_ID on
+  ATTACHMENT_CALENDAR_OBJECT(CALENDAR_OBJECT_RESOURCE_ID);
+
+-----------------------
+-- Resource Property --
+-----------------------
+
+create table RESOURCE_PROPERTY (
+  RESOURCE_ID integer      not null, -- foreign key: *.RESOURCE_ID
+  NAME        varchar(255) not null,
+  VALUE       text         not null, -- FIXME: xml?
+  VIEWER_UID  varchar(255),
+
+  primary key (RESOURCE_ID, NAME, VIEWER_UID) -- implicit index
+);
+
+
+----------------------
+-- AddressBook Home --
+----------------------
+
+create table ADDRESSBOOK_HOME (
+  RESOURCE_ID                   integer         primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+  ADDRESSBOOK_PROPERTY_STORE_ID integer         default nextval('RESOURCE_ID_SEQ') not null,    -- implicit index
+  OWNER_UID                     varchar(255)    not null unique,                                -- implicit index
+  STATUS                        integer         default 0 not null,                             -- enum HOME_STATUS
+  DATAVERSION                   integer         default 0 not null
+);
+
+
+-------------------------------
+-- AddressBook Home Metadata --
+-------------------------------
+
+create table ADDRESSBOOK_HOME_METADATA (
+  RESOURCE_ID      integer      primary key references ADDRESSBOOK_HOME on delete cascade, -- implicit index
+  QUOTA_USED_BYTES integer      default 0 not null,
+  CREATED          timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED         timestamp    default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+-----------------------------
+-- Shared AddressBook Bind --
+-----------------------------
+
+-- Joins sharee ADDRESSBOOK_HOME and owner ADDRESSBOOK_HOME
+
+create table SHARED_ADDRESSBOOK_BIND (
+  ADDRESSBOOK_HOME_RESOURCE_ID          integer         not null references ADDRESSBOOK_HOME,
+  OWNER_HOME_RESOURCE_ID                integer         not null references ADDRESSBOOK_HOME on delete cascade,
+  EXTERNAL_ID                           integer         default null,
+  ADDRESSBOOK_RESOURCE_NAME             varchar(255)    not null,
+  BIND_MODE                             integer         not null, -- enum CALENDAR_BIND_MODE
+  BIND_STATUS                           integer         not null, -- enum CALENDAR_BIND_STATUS
+  BIND_REVISION                         integer         default 0 not null,
+  MESSAGE                               text,                     -- FIXME: xml?
+
+  primary key (ADDRESSBOOK_HOME_RESOURCE_ID, OWNER_HOME_RESOURCE_ID), -- implicit index
+  unique (ADDRESSBOOK_HOME_RESOURCE_ID, ADDRESSBOOK_RESOURCE_NAME)     -- implicit index
+);
+
+create index SHARED_ADDRESSBOOK_BIND_RESOURCE_ID on
+  SHARED_ADDRESSBOOK_BIND(OWNER_HOME_RESOURCE_ID);
+
+
+------------------------
+-- AddressBook Object --
+------------------------
+
+create table ADDRESSBOOK_OBJECT (
+  RESOURCE_ID                   integer         primary key default nextval('RESOURCE_ID_SEQ'),    -- implicit index
+  ADDRESSBOOK_HOME_RESOURCE_ID  integer         not null references ADDRESSBOOK_HOME on delete cascade,
+  RESOURCE_NAME                 varchar(255)    not null,
+  VCARD_TEXT                    text            not null,
+  VCARD_UID                     varchar(255)    not null,
+  KIND                          integer         not null,  -- enum ADDRESSBOOK_OBJECT_KIND
+  MD5                           char(32)        not null,
+  CREATED                       timestamp       default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                      timestamp       default timezone('UTC', CURRENT_TIMESTAMP),
+  DATAVERSION                   integer         default 0 not null,
+
+  unique (ADDRESSBOOK_HOME_RESOURCE_ID, RESOURCE_NAME), -- implicit index
+  unique (ADDRESSBOOK_HOME_RESOURCE_ID, VCARD_UID)      -- implicit index
+);
+
+
+-----------------------------
+-- AddressBook Object kind --
+-----------------------------
+
+create table ADDRESSBOOK_OBJECT_KIND (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into ADDRESSBOOK_OBJECT_KIND values (0, 'person');
+insert into ADDRESSBOOK_OBJECT_KIND values (1, 'group' );
+insert into ADDRESSBOOK_OBJECT_KIND values (2, 'resource');
+insert into ADDRESSBOOK_OBJECT_KIND values (3, 'location');
+
+
+----------------------------------
+-- Revisions, forward reference --
+----------------------------------
+
+create sequence REVISION_SEQ;
+
+---------------------------------
+-- Address Book Object Members --
+---------------------------------
+
+create table ABO_MEMBERS (
+  GROUP_ID        integer     not null, -- references ADDRESSBOOK_OBJECT on delete cascade,   -- AddressBook Object's (kind=='group') RESOURCE_ID
+  ADDRESSBOOK_ID  integer     not null references ADDRESSBOOK_HOME on delete cascade,
+  MEMBER_ID       integer     not null, -- references ADDRESSBOOK_OBJECT,                     -- member AddressBook Object's RESOURCE_ID
+  REVISION        integer     default nextval('REVISION_SEQ') not null,
+  REMOVED         boolean     default false not null,
+  MODIFIED        timestamp   default timezone('UTC', CURRENT_TIMESTAMP),
+
+  primary key (GROUP_ID, MEMBER_ID, REVISION) -- implicit index
+);
+
+create index ABO_MEMBERS_ADDRESSBOOK_ID on
+  ABO_MEMBERS(ADDRESSBOOK_ID);
+create index ABO_MEMBERS_MEMBER_ID on
+  ABO_MEMBERS(MEMBER_ID);
+
+------------------------------------------
+-- Address Book Object Foreign Members  --
+------------------------------------------
+
+create table ABO_FOREIGN_MEMBERS (
+  GROUP_ID           integer      not null references ADDRESSBOOK_OBJECT on delete cascade,  -- AddressBook Object's (kind=='group') RESOURCE_ID
+  ADDRESSBOOK_ID     integer      not null references ADDRESSBOOK_HOME on delete cascade,
+  MEMBER_ADDRESS     varchar(255) not null,                                                  -- member AddressBook Object's 'calendar' address
+
+  primary key (GROUP_ID, MEMBER_ADDRESS) -- implicit index
+);
+
+create index ABO_FOREIGN_MEMBERS_ADDRESSBOOK_ID on
+  ABO_FOREIGN_MEMBERS(ADDRESSBOOK_ID);
+
+-----------------------
+-- Shared Group Bind --
+-----------------------
+
+-- Joins ADDRESSBOOK_HOME and ADDRESSBOOK_OBJECT (kind == group)
+
+create table SHARED_GROUP_BIND (
+  ADDRESSBOOK_HOME_RESOURCE_ID      integer      not null references ADDRESSBOOK_HOME,
+  GROUP_RESOURCE_ID                 integer      not null references ADDRESSBOOK_OBJECT on delete cascade,
+  EXTERNAL_ID                       integer      default null,
+  GROUP_ADDRESSBOOK_NAME            varchar(255) not null,
+  BIND_MODE                         integer      not null, -- enum CALENDAR_BIND_MODE
+  BIND_STATUS                       integer      not null, -- enum CALENDAR_BIND_STATUS
+  BIND_REVISION                     integer      default 0 not null,
+  MESSAGE                           text,                  -- FIXME: xml?
+
+  primary key (ADDRESSBOOK_HOME_RESOURCE_ID, GROUP_RESOURCE_ID), -- implicit index
+  unique (ADDRESSBOOK_HOME_RESOURCE_ID, GROUP_ADDRESSBOOK_NAME)  -- implicit index
+);
+
+create index SHARED_GROUP_BIND_RESOURCE_ID on
+  SHARED_GROUP_BIND(GROUP_RESOURCE_ID);
+
+
+---------------
+-- Revisions --
+---------------
+
+-- create sequence REVISION_SEQ;
+
+
+-------------------------------
+-- Calendar Object Revisions --
+-------------------------------
+
+create table CALENDAR_OBJECT_REVISIONS (
+  CALENDAR_HOME_RESOURCE_ID integer      not null references CALENDAR_HOME,
+  CALENDAR_RESOURCE_ID      integer      references CALENDAR,
+  CALENDAR_NAME             varchar(255) default null,
+  RESOURCE_NAME             varchar(255),
+  REVISION                  integer      default nextval('REVISION_SEQ') not null,
+  DELETED                   boolean      not null,
+  MODIFIED                  timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  
+  unique(CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_ID, CALENDAR_NAME, RESOURCE_NAME)    -- implicit index
+);
+
+create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_RESOURCE_NAME_DELETED_REVISION
+  on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, RESOURCE_NAME, DELETED, REVISION);
+
+create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_REVISION
+  on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, REVISION);
+
+create index CALENDAR_OBJECT_REVISIONS_HOME_RESOURCE_ID_REVISION
+  on CALENDAR_OBJECT_REVISIONS(CALENDAR_HOME_RESOURCE_ID, REVISION);
+
+
+----------------------------------
+-- AddressBook Object Revisions --
+----------------------------------
+
+create table ADDRESSBOOK_OBJECT_REVISIONS (
+  ADDRESSBOOK_HOME_RESOURCE_ID  integer      not null references ADDRESSBOOK_HOME,
+  OWNER_HOME_RESOURCE_ID        integer      references ADDRESSBOOK_HOME,
+  ADDRESSBOOK_NAME              varchar(255) default null,
+  OBJECT_RESOURCE_ID            integer      default 0,
+  RESOURCE_NAME                 varchar(255),
+  REVISION                      integer      default nextval('REVISION_SEQ') not null,
+  DELETED                       boolean      not null,
+  MODIFIED                      timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+  
+  unique(ADDRESSBOOK_HOME_RESOURCE_ID, OWNER_HOME_RESOURCE_ID, ADDRESSBOOK_NAME, RESOURCE_NAME)    -- implicit index
+);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_OWNER_HOME_RESOURCE_ID_RESOURCE_NAME_DELETED_REVISION
+  on ADDRESSBOOK_OBJECT_REVISIONS(OWNER_HOME_RESOURCE_ID, RESOURCE_NAME, DELETED, REVISION);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_OWNER_HOME_RESOURCE_ID_REVISION
+  on ADDRESSBOOK_OBJECT_REVISIONS(OWNER_HOME_RESOURCE_ID, REVISION);
+
+
+-----------------------------------
+-- Notification Object Revisions --
+-----------------------------------
+
+create table NOTIFICATION_OBJECT_REVISIONS (
+  NOTIFICATION_HOME_RESOURCE_ID integer      not null references NOTIFICATION_HOME on delete cascade,
+  RESOURCE_NAME                 varchar(255),
+  REVISION                      integer      default nextval('REVISION_SEQ') not null,
+  DELETED                       boolean      not null,
+  MODIFIED                      timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+
+  unique (NOTIFICATION_HOME_RESOURCE_ID, RESOURCE_NAME) -- implicit index
+);
+
+create index NOTIFICATION_OBJECT_REVISIONS_RESOURCE_ID_REVISION
+  on NOTIFICATION_OBJECT_REVISIONS(NOTIFICATION_HOME_RESOURCE_ID, REVISION);
+
+
+-------------------------------------------
+-- Apple Push Notification Subscriptions --
+-------------------------------------------
+
+create table APN_SUBSCRIPTIONS (
+  TOKEN                         varchar(255) not null,
+  RESOURCE_KEY                  varchar(255) not null,
+  MODIFIED                      integer      not null,
+  SUBSCRIBER_GUID               varchar(255) not null,
+  USER_AGENT                    varchar(255) default null,
+  IP_ADDR                       varchar(255) default null,
+
+  primary key (TOKEN, RESOURCE_KEY) -- implicit index
+);
+
+create index APN_SUBSCRIPTIONS_RESOURCE_KEY
+  on APN_SUBSCRIPTIONS(RESOURCE_KEY);
+
+
+-----------------
+-- IMIP Tokens --
+-----------------
+
+create table IMIP_TOKENS (
+  TOKEN                         varchar(255) not null,
+  ORGANIZER                     varchar(255) not null,
+  ATTENDEE                      varchar(255) not null,
+  ICALUID                       varchar(255) not null,
+  ACCESSED                      timestamp    default timezone('UTC', CURRENT_TIMESTAMP),
+
+  primary key (ORGANIZER, ATTENDEE, ICALUID) -- implicit index
+);
+
+create index IMIP_TOKENS_TOKEN
+  on IMIP_TOKENS(TOKEN);
+
+
+----------------
+-- Work Items --
+----------------
+
+create sequence WORKITEM_SEQ;
+
+
+---------------------------
+-- IMIP Inivitation Work --
+---------------------------
+
+create table IMIP_INVITATION_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  FROM_ADDR                     varchar(255) not null,
+  TO_ADDR                       varchar(255) not null,
+  ICALENDAR_TEXT                text         not null
+);
+
+create index IMIP_INVITATION_WORK_JOB_ID on
+  IMIP_INVITATION_WORK(JOB_ID);
+
+-----------------------
+-- IMIP Polling Work --
+-----------------------
+
+create table IMIP_POLLING_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer      references JOB not null
+);
+
+create index IMIP_POLLING_WORK_JOB_ID on
+  IMIP_POLLING_WORK(JOB_ID);
+
+
+---------------------
+-- IMIP Reply Work --
+---------------------
+
+create table IMIP_REPLY_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  ORGANIZER                     varchar(255) not null,
+  ATTENDEE                      varchar(255) not null,
+  ICALENDAR_TEXT                text         not null
+);
+
+create index IMIP_REPLY_WORK_JOB_ID on
+  IMIP_REPLY_WORK(JOB_ID);
+
+
+------------------------
+-- Push Notifications --
+------------------------
+
+create table PUSH_NOTIFICATION_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  PUSH_ID                       varchar(255) not null,
+  PUSH_PRIORITY                 integer      not null -- 1:low 5:medium 10:high
+);
+
+create index PUSH_NOTIFICATION_WORK_JOB_ID on
+  PUSH_NOTIFICATION_WORK(JOB_ID);
+create index PUSH_NOTIFICATION_WORK_PUSH_ID on
+  PUSH_NOTIFICATION_WORK(PUSH_ID);
+
+-----------------
+-- GroupCacher --
+-----------------
+
+create table GROUP_CACHER_POLLING_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer      references JOB not null
+);
+
+create index GROUP_CACHER_POLLING_WORK_JOB_ID on
+  GROUP_CACHER_POLLING_WORK(JOB_ID);
+
+create table GROUP_REFRESH_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  GROUP_UID                     varchar(255) not null
+);
+
+create index GROUP_REFRESH_WORK_JOB_ID on
+  GROUP_REFRESH_WORK(JOB_ID);
+create index GROUP_REFRESH_WORK_GROUP_UID on
+  GROUP_REFRESH_WORK(GROUP_UID);
+
+create table GROUP_DELEGATE_CHANGES_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  DELEGATOR_UID                 varchar(255) not null,
+  READ_DELEGATE_UID             varchar(255) not null,
+  WRITE_DELEGATE_UID            varchar(255) not null
+);
+
+create index GROUP_DELEGATE_CHANGES_WORK_JOB_ID on
+  GROUP_DELEGATE_CHANGES_WORK(JOB_ID);
+create index GROUP_DELEGATE_CHANGES_WORK_DELEGATOR_UID on
+  GROUP_DELEGATE_CHANGES_WORK(DELEGATOR_UID);
+
+create table GROUPS (
+  GROUP_ID                      integer      primary key default nextval('RESOURCE_ID_SEQ'),    -- implicit index
+  NAME                          varchar(255) not null,
+  GROUP_UID                     varchar(255) not null unique,
+  MEMBERSHIP_HASH               varchar(255) not null,
+  EXTANT                        integer default 1,
+  CREATED                       timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+  MODIFIED                      timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+create index GROUPS_GROUP_UID on
+  GROUPS(GROUP_UID);
+
+create table GROUP_MEMBERSHIP (
+  GROUP_ID                     integer not null references GROUPS on delete cascade,
+  MEMBER_UID                   varchar(255) not null,
+
+  primary key (GROUP_ID, MEMBER_UID)
+);
+
+create index GROUP_MEMBERSHIP_MEMBER on
+  GROUP_MEMBERSHIP(MEMBER_UID);
+
+create table GROUP_ATTENDEE_RECONCILE_WORK (
+  WORK_ID                       integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer not null references JOB,
+  RESOURCE_ID                   integer not null references CALENDAR_OBJECT on delete cascade,
+  GROUP_ID                      integer not null references GROUPS on delete cascade
+);
+
+create index GROUP_ATTENDEE_RECONCILE_WORK_JOB_ID on
+  GROUP_ATTENDEE_RECONCILE_WORK(JOB_ID);
+create index GROUP_ATTENDEE_RECONCILE_WORK_RESOURCE_ID on
+  GROUP_ATTENDEE_RECONCILE_WORK(RESOURCE_ID);
+create index GROUP_ATTENDEE_RECONCILE_WORK_GROUP_ID on
+  GROUP_ATTENDEE_RECONCILE_WORK(GROUP_ID);
+
+
+create table GROUP_ATTENDEE (
+  GROUP_ID                      integer not null references GROUPS on delete cascade,
+  RESOURCE_ID                   integer not null references CALENDAR_OBJECT on delete cascade,
+  MEMBERSHIP_HASH               varchar(255) not null,
+
+  primary key (GROUP_ID, RESOURCE_ID)
+);
+
+create index GROUP_ATTENDEE_RESOURCE_ID on
+  GROUP_ATTENDEE(RESOURCE_ID);
+
+
+create table GROUP_SHAREE_RECONCILE_WORK (
+  WORK_ID                       integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer not null references JOB,
+  CALENDAR_ID                   integer        not null references CALENDAR on delete cascade,
+  GROUP_ID                      integer not null references GROUPS on delete cascade
+);
+
+create index GROUP_SHAREE_RECONCILE_WORK_JOB_ID on
+  GROUP_SHAREE_RECONCILE_WORK(JOB_ID);
+create index GROUP_SHAREE_RECONCILE_WORK_CALENDAR_ID on
+  GROUP_SHAREE_RECONCILE_WORK(CALENDAR_ID);
+create index GROUP_SHAREE_RECONCILE_WORK_GROUP_ID on
+  GROUP_SHAREE_RECONCILE_WORK(GROUP_ID);
+
+
+create table GROUP_SHAREE (
+  GROUP_ID                      integer not null references GROUPS on delete cascade,
+  CALENDAR_ID                                      integer not null references CALENDAR on delete cascade,
+  GROUP_BIND_MODE               integer not null, -- enum CALENDAR_BIND_MODE
+  MEMBERSHIP_HASH               varchar(255) not null,
+
+  primary key (GROUP_ID, CALENDAR_ID)
+);
+
+create index GROUP_SHAREE_CALENDAR_ID on
+  GROUP_SHAREE(CALENDAR_ID);
+
+---------------
+-- Delegates --
+---------------
+
+create table DELEGATES (
+  DELEGATOR                     varchar(255) not null,
+  DELEGATE                      varchar(255) not null,
+  READ_WRITE                    integer      not null, -- 1 = ReadWrite, 0 = ReadOnly
+
+  primary key (DELEGATOR, READ_WRITE, DELEGATE)
+);
+create index DELEGATE_TO_DELEGATOR on
+  DELEGATES(DELEGATE, READ_WRITE, DELEGATOR);
+
+create table DELEGATE_GROUPS (
+  DELEGATOR                     varchar(255) not null,
+  GROUP_ID                      integer      not null references GROUPS on delete cascade,
+  READ_WRITE                    integer      not null, -- 1 = ReadWrite, 0 = ReadOnly
+  IS_EXTERNAL                   integer      not null, -- 1 = ReadWrite, 0 = ReadOnly
+
+  primary key (DELEGATOR, READ_WRITE, GROUP_ID)
+);
+create index DELEGATE_GROUPS_GROUP_ID on
+  DELEGATE_GROUPS(GROUP_ID);
+
+create table EXTERNAL_DELEGATE_GROUPS (
+  DELEGATOR                     varchar(255) primary key,
+  GROUP_UID_READ                varchar(255),
+  GROUP_UID_WRITE               varchar(255)
+);
+
+--------------------------
+-- Object Splitter Work --
+--------------------------
+
+create table CALENDAR_OBJECT_SPLITTER_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  RESOURCE_ID                   integer      not null references CALENDAR_OBJECT on delete cascade
+);
+
+create index CALENDAR_OBJECT_SPLITTER_WORK_RESOURCE_ID on
+  CALENDAR_OBJECT_SPLITTER_WORK(RESOURCE_ID);
+create index CALENDAR_OBJECT_SPLITTER_WORK_JOB_ID on
+  CALENDAR_OBJECT_SPLITTER_WORK(JOB_ID);
+
+-------------------------
+-- Object Upgrade Work --
+-------------------------
+
+create table CALENDAR_OBJECT_UPGRADE_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  RESOURCE_ID                   integer      not null references CALENDAR_OBJECT on delete cascade
+);
+
+create index CALENDAR_OBJECT_UPGRADE_WORK_RESOURCE_ID on
+  CALENDAR_OBJECT_UPGRADE_WORK(RESOURCE_ID);
+create index CALENDAR_OBJECT_UPGRADE_WORK_JOB_ID on
+  CALENDAR_OBJECT_UPGRADE_WORK(JOB_ID);
+
+---------------------------
+-- Revision Cleanup Work --
+---------------------------
+
+create table FIND_MIN_VALID_REVISION_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer      references JOB not null
+);
+
+create index FIND_MIN_VALID_REVISION_WORK_JOB_ID on
+  FIND_MIN_VALID_REVISION_WORK(JOB_ID);
+
+create table REVISION_CLEANUP_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer      references JOB not null
+);
+
+create index REVISION_CLEANUP_WORK_JOB_ID on
+  REVISION_CLEANUP_WORK(JOB_ID);
+
+------------------------
+-- Inbox Cleanup Work --
+------------------------
+
+create table INBOX_CLEANUP_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer      references JOB not null
+);
+
+create index INBOX_CLEANUP_WORK_JOB_ID on
+   INBOX_CLEANUP_WORK(JOB_ID);
+
+create table CLEANUP_ONE_INBOX_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  HOME_ID                       integer      not null unique references CALENDAR_HOME on delete cascade -- implicit index
+);
+
+create index CLEANUP_ONE_INBOX_WORK_JOB_ID on
+  CLEANUP_ONE_INBOX_WORK(JOB_ID);
+
+-------------------
+-- Schedule Work --
+-------------------
+
+create table SCHEDULE_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  ICALENDAR_UID                 varchar(255) not null,
+  WORK_TYPE                     varchar(255) not null
+);
+
+create index SCHEDULE_WORK_JOB_ID on
+  SCHEDULE_WORK(JOB_ID);
+create index SCHEDULE_WORK_ICALENDAR_UID on
+  SCHEDULE_WORK(ICALENDAR_UID);
+
+---------------------------
+-- Schedule Refresh Work --
+---------------------------
+
+create table SCHEDULE_REFRESH_WORK (
+  WORK_ID                       integer      primary key references SCHEDULE_WORK on delete cascade, -- implicit index
+  HOME_RESOURCE_ID              integer      not null references CALENDAR_HOME on delete cascade,
+  RESOURCE_ID                   integer      not null references CALENDAR_OBJECT on delete cascade,
+  ATTENDEE_COUNT                integer
+);
+
+create index SCHEDULE_REFRESH_WORK_HOME_RESOURCE_ID on
+  SCHEDULE_REFRESH_WORK(HOME_RESOURCE_ID);
+create index SCHEDULE_REFRESH_WORK_RESOURCE_ID on
+  SCHEDULE_REFRESH_WORK(RESOURCE_ID);
+
+create table SCHEDULE_REFRESH_ATTENDEES (
+  RESOURCE_ID                   integer      not null references CALENDAR_OBJECT on delete cascade,
+  ATTENDEE                      varchar(255) not null,
+
+  primary key (RESOURCE_ID, ATTENDEE)
+);
+
+create index SCHEDULE_REFRESH_ATTENDEES_RESOURCE_ID_ATTENDEE on
+  SCHEDULE_REFRESH_ATTENDEES(RESOURCE_ID, ATTENDEE);
+
+------------------------------
+-- Schedule Auto Reply Work --
+------------------------------
+
+create table SCHEDULE_AUTO_REPLY_WORK (
+  WORK_ID                       integer      primary key references SCHEDULE_WORK on delete cascade, -- implicit index
+  HOME_RESOURCE_ID              integer      not null references CALENDAR_HOME on delete cascade,
+  RESOURCE_ID                   integer      not null references CALENDAR_OBJECT on delete cascade,
+  PARTSTAT                      varchar(255) not null
+);
+
+create index SCHEDULE_AUTO_REPLY_WORK_HOME_RESOURCE_ID on
+  SCHEDULE_AUTO_REPLY_WORK(HOME_RESOURCE_ID);
+create index SCHEDULE_AUTO_REPLY_WORK_RESOURCE_ID on
+  SCHEDULE_AUTO_REPLY_WORK(RESOURCE_ID);
+
+-----------------------------
+-- Schedule Organizer Work --
+-----------------------------
+
+create table SCHEDULE_ORGANIZER_WORK (
+  WORK_ID                       integer      primary key references SCHEDULE_WORK on delete cascade, -- implicit index
+  SCHEDULE_ACTION               integer      not null, -- Enum SCHEDULE_ACTION
+  HOME_RESOURCE_ID              integer      not null references CALENDAR_HOME on delete cascade,
+  RESOURCE_ID                   integer,     -- this references a possibly non-existent CALENDAR_OBJECT
+  ICALENDAR_TEXT_OLD            text,
+  ICALENDAR_TEXT_NEW            text,
+  ATTENDEE_COUNT                integer,
+  SMART_MERGE                   boolean
+);
+
+create index SCHEDULE_ORGANIZER_WORK_HOME_RESOURCE_ID on
+  SCHEDULE_ORGANIZER_WORK(HOME_RESOURCE_ID);
+create index SCHEDULE_ORGANIZER_WORK_RESOURCE_ID on
+  SCHEDULE_ORGANIZER_WORK(RESOURCE_ID);
+
+-- Enumeration of schedule actions
+
+create table SCHEDULE_ACTION (
+  ID          integer     primary key,
+  DESCRIPTION varchar(16) not null unique
+);
+
+insert into SCHEDULE_ACTION values (0, 'create');
+insert into SCHEDULE_ACTION values (1, 'modify');
+insert into SCHEDULE_ACTION values (2, 'modify-cancelled');
+insert into SCHEDULE_ACTION values (3, 'remove');
+
+----------------------------------
+-- Schedule Organizer Send Work --
+----------------------------------
+
+create table SCHEDULE_ORGANIZER_SEND_WORK (
+  WORK_ID                       integer      primary key references SCHEDULE_WORK on delete cascade, -- implicit index
+  SCHEDULE_ACTION               integer      not null, -- Enum SCHEDULE_ACTION
+  HOME_RESOURCE_ID              integer      not null references CALENDAR_HOME on delete cascade,
+  RESOURCE_ID                   integer,     -- this references a possibly non-existent CALENDAR_OBJECT
+  ATTENDEE                      varchar(255) not null,
+  ITIP_MSG                      text,
+  NO_REFRESH                    boolean
+);
+
+create index SCHEDULE_ORGANIZER_SEND_WORK_HOME_RESOURCE_ID on
+  SCHEDULE_ORGANIZER_SEND_WORK(HOME_RESOURCE_ID);
+create index SCHEDULE_ORGANIZER_SEND_WORK_RESOURCE_ID on
+  SCHEDULE_ORGANIZER_SEND_WORK(RESOURCE_ID);
+
+-------------------------
+-- Schedule Reply Work --
+-------------------------
+
+create table SCHEDULE_REPLY_WORK (
+  WORK_ID                       integer      primary key references SCHEDULE_WORK on delete cascade, -- implicit index
+  HOME_RESOURCE_ID              integer      not null references CALENDAR_HOME on delete cascade,
+  RESOURCE_ID                   integer,     -- this references a possibly non-existent CALENDAR_OBJECT
+  ITIP_MSG                      text
+);
+
+create index SCHEDULE_REPLY_WORK_HOME_RESOURCE_ID on
+  SCHEDULE_REPLY_WORK(HOME_RESOURCE_ID);
+create index SCHEDULE_REPLY_WORK_RESOURCE_ID on
+  SCHEDULE_REPLY_WORK(RESOURCE_ID);
+
+----------------------------------
+-- Principal Purge Polling Work --
+----------------------------------
+
+create table PRINCIPAL_PURGE_POLLING_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer      references JOB not null
+);
+
+create index PRINCIPAL_PURGE_POLLING_WORK_JOB_ID on
+  PRINCIPAL_PURGE_POLLING_WORK(JOB_ID);
+
+--------------------------------
+-- Principal Purge Check Work --
+--------------------------------
+
+create table PRINCIPAL_PURGE_CHECK_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  UID                           varchar(255) not null
+);
+
+create index PRINCIPAL_PURGE_CHECK_WORK_JOB_ID on
+  PRINCIPAL_PURGE_CHECK_WORK(JOB_ID);
+create index PRINCIPAL_PURGE_CHECK_WORK_UID on
+  PRINCIPAL_PURGE_CHECK_WORK(UID);
+
+--------------------------
+-- Principal Purge Work --
+--------------------------
+
+create table PRINCIPAL_PURGE_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  UID                           varchar(255) not null
+);
+
+create index PRINCIPAL_PURGE_WORK_JOB_ID on
+  PRINCIPAL_PURGE_WORK(JOB_ID);
+create index PRINCIPAL_PURGE_WORK_UID on
+  PRINCIPAL_PURGE_WORK(UID);
+
+
+--------------------------------
+-- Principal Home Remove Work --
+--------------------------------
+
+create table PRINCIPAL_PURGE_HOME_WORK (
+  WORK_ID                       integer      primary key default nextval('WORKITEM_SEQ'), -- implicit index
+  JOB_ID                        integer      references JOB not null,
+  HOME_RESOURCE_ID              integer      not null references CALENDAR_HOME on delete cascade
+);
+
+create index PRINCIPAL_PURGE_HOME_WORK_JOB_ID on
+  PRINCIPAL_PURGE_HOME_WORK(JOB_ID);
+create index PRINCIPAL_PURGE_HOME_HOME_RESOURCE_ID on
+  PRINCIPAL_PURGE_HOME_WORK(HOME_RESOURCE_ID);
+
+
+--------------------
+-- Schema Version --
+--------------------
+
+create table CALENDARSERVER (
+  NAME                          varchar(255) primary key, -- implicit index
+  VALUE                         varchar(255)
+);
+
+insert into CALENDARSERVER values ('VERSION', '51');
+insert into CALENDARSERVER values ('CALENDAR-DATAVERSION', '6');
+insert into CALENDARSERVER values ('ADDRESSBOOK-DATAVERSION', '2');
+insert into CALENDARSERVER values ('NOTIFICATION-DATAVERSION', '1');
+insert into CALENDARSERVER values ('MIN-VALID-REVISION', '1');
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_schemaupgradesoracledialectupgrade_from_51_to_52sqlfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_schemaupgradesoracledialectupgrade_from_51_to_52sql"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_51_to_52.sql (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_51_to_52.sql) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_51_to_52.sql                                (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_51_to_52.sql        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,102 @@
</span><ins>+----
+-- Copyright (c) 2012-2015 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+---------------------------------------------------
+-- Upgrade database schema from VERSION 51 to 52 --
+---------------------------------------------------
+
+-- New status value
+insert into HOME_STATUS (DESCRIPTION, ID) values ('migrating', 3);
+insert into HOME_STATUS (DESCRIPTION, ID) values ('disabled', 4);
+
+-- Home constraints
+alter table CALENDAR_HOME
+        drop unique (OWNER_UID);
+alter table CALENDAR_HOME
+        add unique (OWNER_UID, STATUS);
+
+alter table ADDRESSBOOK_HOME
+        drop unique (OWNER_UID);
+alter table ADDRESSBOOK_HOME
+        add unique (OWNER_UID, STATUS);
+
+alter table NOTIFICATION_HOME
+        drop unique (OWNER_UID);
+alter table NOTIFICATION_HOME
+        add unique (OWNER_UID, STATUS);
+
+-- Change columns
+alter table CALENDAR_BIND
+        drop column EXTERNAL_ID
+        add (&quot;BIND_UID&quot; nvarchar2(36) default null);
+
+alter table SHARED_ADDRESSBOOK_BIND
+        drop column EXTERNAL_ID
+        add (&quot;BIND_UID&quot; nvarchar2(36) default null);
+
+alter table SHARED_GROUP_BIND
+        drop column EXTERNAL_ID
+        add (&quot;BIND_UID&quot; nvarchar2(36) default null);
+
+
+-- New table
+create table CALENDAR_MIGRATION (
+    &quot;CALENDAR_HOME_RESOURCE_ID&quot; integer references CALENDAR_HOME on delete cascade,
+    &quot;REMOTE_RESOURCE_ID&quot; integer not null,
+    &quot;LOCAL_RESOURCE_ID&quot; integer references CALENDAR on delete cascade,
+    &quot;LAST_SYNC_TOKEN&quot; nvarchar2(255), 
+    primary key (&quot;CALENDAR_HOME_RESOURCE_ID&quot;, &quot;REMOTE_RESOURCE_ID&quot;)
+);
+
+create index CALENDAR_MIGRATION_LO_0525c72b on CALENDAR_MIGRATION (
+    LOCAL_RESOURCE_ID
+);
+
+-- New table
+create table CALENDAR_OBJECT_MIGRATION (
+    &quot;CALENDAR_HOME_RESOURCE_ID&quot; integer references CALENDAR_HOME on delete cascade,
+    &quot;REMOTE_RESOURCE_ID&quot; integer not null,
+    &quot;LOCAL_RESOURCE_ID&quot; integer references CALENDAR_OBJECT on delete cascade, 
+    primary key (&quot;CALENDAR_HOME_RESOURCE_ID&quot;, &quot;REMOTE_RESOURCE_ID&quot;)
+);
+
+create index CALENDAR_OBJECT_MIGRA_0502cbef on CALENDAR_OBJECT_MIGRATION (
+    CALENDAR_HOME_RESOURCE_ID,
+    LOCAL_RESOURCE_ID
+);
+create index CALENDAR_OBJECT_MIGRA_3577efd9 on CALENDAR_OBJECT_MIGRATION (
+    LOCAL_RESOURCE_ID
+);
+
+-- New table
+create table ATTACHMENT_MIGRATION (
+    &quot;CALENDAR_HOME_RESOURCE_ID&quot; integer references CALENDAR_HOME on delete cascade,
+    &quot;REMOTE_RESOURCE_ID&quot; integer not null,
+    &quot;LOCAL_RESOURCE_ID&quot; integer references ATTACHMENT on delete cascade, 
+    primary key (&quot;CALENDAR_HOME_RESOURCE_ID&quot;, &quot;REMOTE_RESOURCE_ID&quot;)
+);
+
+create index ATTACHMENT_MIGRATION__804bf85e on ATTACHMENT_MIGRATION (
+    CALENDAR_HOME_RESOURCE_ID,
+    LOCAL_RESOURCE_ID
+);
+create index ATTACHMENT_MIGRATION__816947fe on ATTACHMENT_MIGRATION (
+    LOCAL_RESOURCE_ID
+);
+
+
+-- update the version
+update CALENDARSERVER set VALUE = '52' where NAME = 'VERSION';
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_schemaupgradespostgresdialectupgrade_from_51_to_52sqlfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_schemaupgradespostgresdialectupgrade_from_51_to_52sql"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_51_to_52.sql (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_51_to_52.sql) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_51_to_52.sql                                (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_51_to_52.sql        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,97 @@
</span><ins>+----
+-- Copyright (c) 2012-2015 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+---------------------------------------------------
+-- Upgrade database schema from VERSION 51 to 52 --
+---------------------------------------------------
+
+-- New status value
+insert into HOME_STATUS values (3, 'migrating');
+insert into HOME_STATUS values (4, 'disabled');
+
+-- Home constraints
+alter table CALENDAR_HOME
+        drop constraint CALENDAR_HOME_OWNER_UID_KEY,
+        add unique (OWNER_UID, STATUS);
+
+alter table ADDRESSBOOK_HOME
+        drop constraint ADDRESSBOOK_HOME_OWNER_UID_KEY,
+        add unique (OWNER_UID, STATUS);
+
+alter table NOTIFICATION_HOME
+        drop constraint NOTIFICATION_HOME_OWNER_UID_KEY,
+        add unique (OWNER_UID, STATUS);
+
+-- Change columns
+alter table CALENDAR_BIND
+        drop column EXTERNAL_ID,
+        add column BIND_UID varchar(36) default null;
+
+alter table SHARED_ADDRESSBOOK_BIND
+        drop column EXTERNAL_ID,
+        add column BIND_UID varchar(36) default null;
+
+alter table SHARED_GROUP_BIND
+        drop column EXTERNAL_ID,
+        add column BIND_UID varchar(36) default null;
+
+
+-- New table
+create table CALENDAR_MIGRATION (
+  CALENDAR_HOME_RESOURCE_ID                integer references CALENDAR_HOME on delete cascade,
+  REMOTE_RESOURCE_ID                        integer not null,
+  LOCAL_RESOURCE_ID                                integer        references CALENDAR on delete cascade,
+  LAST_SYNC_TOKEN                                varchar(255),
+   
+  primary key (CALENDAR_HOME_RESOURCE_ID, REMOTE_RESOURCE_ID) -- implicit index
+);
+
+create index CALENDAR_MIGRATION_LOCAL_RESOURCE_ID on
+  CALENDAR_MIGRATION(LOCAL_RESOURCE_ID);
+
+  
+-- New table
+create table CALENDAR_OBJECT_MIGRATION (
+  CALENDAR_HOME_RESOURCE_ID                integer references CALENDAR_HOME on delete cascade,
+  REMOTE_RESOURCE_ID                        integer not null,
+  LOCAL_RESOURCE_ID                                integer        references CALENDAR_OBJECT on delete cascade,
+   
+  primary key (CALENDAR_HOME_RESOURCE_ID, REMOTE_RESOURCE_ID) -- implicit index
+);
+
+create index CALENDAR_OBJECT_MIGRATION_HOME_LOCAL on
+  CALENDAR_OBJECT_MIGRATION(CALENDAR_HOME_RESOURCE_ID, LOCAL_RESOURCE_ID);
+create index CALENDAR_OBJECT_MIGRATION_LOCAL_RESOURCE_ID on
+  CALENDAR_OBJECT_MIGRATION(LOCAL_RESOURCE_ID);
+
+  
+-- New table
+create table ATTACHMENT_MIGRATION (
+  CALENDAR_HOME_RESOURCE_ID                integer references CALENDAR_HOME on delete cascade,
+  REMOTE_RESOURCE_ID                        integer not null,
+  LOCAL_RESOURCE_ID                                integer        references ATTACHMENT on delete cascade,
+   
+  primary key (CALENDAR_HOME_RESOURCE_ID, REMOTE_RESOURCE_ID) -- implicit index
+);
+
+create index ATTACHMENT_MIGRATION_HOME_LOCAL on
+  ATTACHMENT_MIGRATION(CALENDAR_HOME_RESOURCE_ID, LOCAL_RESOURCE_ID);
+create index ATTACHMENT_MIGRATION_LOCAL_RESOURCE_ID on
+  ATTACHMENT_MIGRATION(LOCAL_RESOURCE_ID);
+
+
+-- update the version
+update CALENDARSERVER set VALUE = '52' where NAME = 'VERSION';
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_sharingpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_sharingpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_sharing.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_sharing.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_sharing.py                                (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_sharing.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,1470 @@
</span><ins>+# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from collections import namedtuple
+from pycalendar.datetime import DateTime
+
+from twext.enterprise.dal.syntax import Insert, Parameter, Update, Delete, \
+    Select, Max
+from twext.python.clsprop import classproperty
+from twext.python.log import Logger
+
+from twisted.internet.defer import inlineCallbacks, returnValue, succeed
+
+from txdav.base.propertystore.base import PropertyName
+from txdav.common.datastore.sql_tables import _BIND_MODE_OWN, _BIND_MODE_DIRECT, \
+    _BIND_MODE_INDIRECT, _BIND_STATUS_ACCEPTED, _BIND_STATUS_DECLINED, \
+    _BIND_STATUS_INVITED, _BIND_STATUS_INVALID, _BIND_STATUS_DELETED, \
+    _HOME_STATUS_EXTERNAL
+from txdav.common.icommondatastore import ExternalShareFailed, \
+    HomeChildNameAlreadyExistsError, AllRetriesFailed
+from txdav.xml import element
+
+from uuid import uuid4
+
+
+log = Logger()
+
+&quot;&quot;&quot;
+Classes and methods that relate to sharing in the SQL store.
+&quot;&quot;&quot;
+
+class SharingHomeMixIn(object):
+    &quot;&quot;&quot;
+    Common class for CommonHome to implement sharing operations
+    &quot;&quot;&quot;
+
+    @inlineCallbacks
+    def acceptShare(self, shareUID, summary=None):
+        &quot;&quot;&quot;
+        This share is being accepted.
+        &quot;&quot;&quot;
+
+        shareeView = yield self.anyObjectWithShareUID(shareUID)
+        if shareeView is not None:
+            yield shareeView.acceptShare(summary)
+
+        returnValue(shareeView)
+
+
+    @inlineCallbacks
+    def declineShare(self, shareUID):
+        &quot;&quot;&quot;
+        This share is being declined.
+        &quot;&quot;&quot;
+
+        shareeView = yield self.anyObjectWithShareUID(shareUID)
+        if shareeView is not None:
+            yield shareeView.declineShare()
+
+        returnValue(shareeView is not None)
+
+
+    #
+    # External (cross-pod) sharing - entry point is the sharee's home collection.
+    #
+    @inlineCallbacks
+    def processExternalInvite(
+        self, ownerUID, ownerName, shareUID, bindMode, bindUID, summary,
+        copy_invite_properties, supported_components=None
+    ):
+        &quot;&quot;&quot;
+        External invite received.
+        &quot;&quot;&quot;
+
+        # Get the owner home - create external one if not present
+        ownerHome = yield self._txn.homeWithUID(
+            self._homeType, ownerUID, status=_HOME_STATUS_EXTERNAL, create=True
+        )
+        if ownerHome is None or not ownerHome.external():
+            raise ExternalShareFailed(&quot;Invalid owner UID: {}&quot;.format(ownerUID))
+
+        # Try to find owner calendar via its external id
+        ownerView = yield ownerHome.childWithBindUID(bindUID)
+        if ownerView is None:
+            ownerView = yield ownerHome.createCollectionForExternalShare(ownerName, bindUID, supported_components)
+
+        # Now carry out the share operation
+        if bindMode == _BIND_MODE_DIRECT:
+            shareeView = yield ownerView.directShareWithUser(
+                self.uid(), shareName=shareUID
+            )
+        else:
+            shareeView = yield ownerView.inviteUIDToShare(
+                self.uid(), bindMode, summary, shareName=shareUID
+            )
+
+        shareeView.setInviteCopyProperties(copy_invite_properties)
+
+
+    @inlineCallbacks
+    def processExternalUninvite(self, ownerUID, bindUID, shareUID):
+        &quot;&quot;&quot;
+        External invite received.
+        &quot;&quot;&quot;
+
+        # Get the owner home
+        ownerHome = yield self._txn.homeWithUID(self._homeType, ownerUID, status=_HOME_STATUS_EXTERNAL)
+        if ownerHome is None or not ownerHome.external():
+            raise ExternalShareFailed(&quot;Invalid owner UID: {}&quot;.format(ownerUID))
+
+        # Try to find owner calendar via its external id
+        ownerView = yield ownerHome.childWithBindUID(bindUID)
+        if ownerView is None:
+            raise ExternalShareFailed(&quot;Invalid share ID: {}&quot;.format(shareUID))
+
+        # Now carry out the share operation
+        yield ownerView.uninviteUIDFromShare(self.uid())
+
+        # See if there are any references to the external share. If not,
+        # remove it
+        invites = yield ownerView.sharingInvites()
+        if len(invites) == 0:
+            yield ownerHome.removeExternalChild(ownerView)
+
+
+    @inlineCallbacks
+    def processExternalReply(
+        self, ownerUID, shareeUID, shareUID, bindStatus, summary=None
+    ):
+        &quot;&quot;&quot;
+        External invite received.
+        &quot;&quot;&quot;
+
+        # Make sure the shareeUID and shareUID match
+
+        # Get the owner home - create external one if not present
+        shareeHome = yield self._txn.homeWithUID(self._homeType, shareeUID, status=_HOME_STATUS_EXTERNAL)
+        if shareeHome is None or not shareeHome.external():
+            raise ExternalShareFailed(
+                &quot;Invalid sharee UID: {}&quot;.format(shareeUID)
+            )
+
+        # Try to find owner calendar via its external id
+        shareeView = yield shareeHome.anyObjectWithShareUID(shareUID)
+        if shareeView is None:
+            raise ExternalShareFailed(&quot;Invalid share UID: {}&quot;.format(shareUID))
+
+        # Now carry out the share operation
+        if bindStatus == _BIND_STATUS_ACCEPTED:
+            yield shareeHome.acceptShare(shareUID, summary)
+        elif bindStatus == _BIND_STATUS_DECLINED:
+            if shareeView.direct():
+                yield shareeView.deleteShare()
+            else:
+                yield shareeHome.declineShare(shareUID)
+
+
+    @inlineCallbacks
+    def createCollectionForExternalShare(self, name, bindUID, supported_components):
+        &quot;&quot;&quot;
+        Create the L{CommonHomeChild} object that used as a &quot;stub&quot; to represent the external
+        object on the other pod for the sharer.
+
+        @param name: name of the collection
+        @type name: L{str}
+        @param bindUID: id on other pod
+        @type bindUID: L{str}
+        @param supported_components: optional set of support components
+        @type supported_components: L{str}
+        &quot;&quot;&quot;
+        try:
+            ownerView = yield self.createChildWithName(
+                name, bindUID=bindUID
+            )
+        except HomeChildNameAlreadyExistsError:
+            # This is odd - it means we possibly have a left over sharer
+            # collection which the sharer likely removed and re-created
+            # with the same name but now it has a different bindUID and
+            # is not found by the initial query. What we do is check to see
+            # whether any shares still reference the old ID - if they do we
+            # are hosed. If not, we can remove the old item and create a new one.
+            oldOwnerView = yield self.childWithName(name)
+            invites = yield oldOwnerView.sharingInvites()
+            if len(invites) != 0:
+                log.error(
+                    &quot;External invite collection name is present with a &quot;
+                    &quot;different bindUID and still has shares&quot;
+                )
+                raise
+            log.error(
+                &quot;External invite collection name is present with a &quot;
+                &quot;different bindUID - trying to fix&quot;
+            )
+            yield self.removeExternalChild(oldOwnerView)
+            ownerView = yield self.createChildWithName(
+                name, bindUID=bindUID
+            )
+
+        if (
+            supported_components is not None and
+            hasattr(ownerView, &quot;setSupportedComponents&quot;)
+        ):
+            yield ownerView.setSupportedComponents(supported_components)
+
+        returnValue(ownerView)
+
+
+    @inlineCallbacks
+    def sharedToBindRecords(self):
+        &quot;&quot;&quot;
+        Return an L{dict} that maps home/directory uid to a sharing bind record for collections shared to this user.
+        &quot;&quot;&quot;
+
+        # Get shared to bind records
+        records = yield self._childClass._bindRecordClass.query(
+            self._txn,
+            (getattr(self._childClass._bindRecordClass, self._childClass._bindHomeIDAttributeName) == self.id()).And(
+                self._childClass._bindRecordClass.bindMode != _BIND_MODE_OWN
+            )
+        )
+        records = dict([(getattr(record, self._childClass._bindResourceIDAttributeName), record) for record in records])
+        if not records:
+            returnValue({})
+
+        # Look up the owner records for each of the shared to records
+        ownerRecords = yield self._childClass._bindRecordClass.query(
+            self._txn,
+            (getattr(self._childClass._bindRecordClass, self._childClass._bindResourceIDAttributeName).In(records.keys())).And(
+                self._childClass._bindRecordClass.bindMode == _BIND_MODE_OWN
+            )
+        )
+
+        # Important - this method is called when migrating shared-to records to some other pod. For that to work all the
+        # owner records must have a bindUID assigned to them. Normally bindUIDs are assigned the first time an external
+        # share is created, but migration will implicitly create the external share
+        for ownerRecord in ownerRecords:
+            if not ownerRecord.bindUID:
+                yield ownerRecord.update(bindUID=str(uuid4()))
+
+        ownerRecords = dict([(getattr(record, self._childClass._bindResourceIDAttributeName), record) for record in ownerRecords])
+
+        # Look up the metadata records for each of the shared to records
+        metadataRecords = yield self._childClass._metadataRecordClass.query(
+            self._txn,
+            self._childClass._metadataRecordClass.resourceID.In(records.keys()),
+        )
+        metadataRecords = dict([(record.resourceID, record) for record in metadataRecords])
+
+        # Map the owner records to home ownerUIDs
+        homeIDs = dict([(
+            getattr(record, self._childClass._bindHomeIDAttributeName), getattr(record, self._childClass._bindResourceIDAttributeName)
+        ) for record in ownerRecords.values()])
+        homes = yield self._childClass._homeRecordClass.query(
+            self._txn,
+            self._childClass._homeRecordClass.resourceID.In(homeIDs.keys()),
+        )
+        homeMap = dict((homeIDs[home.resourceID], home.ownerUID,) for home in homes)
+
+        returnValue(dict([(homeMap[calendarID], (records[calendarID], ownerRecords[calendarID], metadataRecords[calendarID],),) for calendarID in records]))
+
+
+
+SharingInvitation = namedtuple(
+    &quot;SharingInvitation&quot;,
+    [&quot;uid&quot;, &quot;ownerUID&quot;, &quot;ownerHomeID&quot;, &quot;shareeUID&quot;, &quot;shareeHomeID&quot;, &quot;mode&quot;, &quot;status&quot;, &quot;summary&quot;]
+)
+
+
+
+class SharingMixIn(object):
+    &quot;&quot;&quot;
+    Common class for CommonHomeChild and AddressBookObject
+    &quot;&quot;&quot;
+
+    @classproperty
+    def _bindInsertQuery(cls, **kw):
+        &quot;&quot;&quot;
+        DAL statement to create a bind entry that connects a collection to its
+        home.
+        &quot;&quot;&quot;
+        bind = cls._bindSchema
+        return Insert({
+            bind.HOME_RESOURCE_ID: Parameter(&quot;homeID&quot;),
+            bind.RESOURCE_ID: Parameter(&quot;resourceID&quot;),
+            bind.RESOURCE_NAME: Parameter(&quot;name&quot;),
+            bind.BIND_MODE: Parameter(&quot;mode&quot;),
+            bind.BIND_STATUS: Parameter(&quot;bindStatus&quot;),
+            bind.BIND_UID: Parameter(&quot;bindUID&quot;),
+            bind.MESSAGE: Parameter(&quot;message&quot;),
+        })
+
+
+    @classmethod
+    def _updateBindColumnsQuery(cls, columnMap):
+        bind = cls._bindSchema
+        return Update(
+            columnMap,
+            Where=(bind.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And(
+                bind.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)),
+        )
+
+
+    @classproperty
+    def _deleteBindForResourceIDAndHomeID(cls):
+        bind = cls._bindSchema
+        return Delete(
+            From=bind,
+            Where=(bind.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And(
+                bind.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)),
+        )
+
+
+    @classmethod
+    def _bindFor(cls, condition):
+        bind = cls._bindSchema
+        columns = cls.bindColumns() + cls.additionalBindColumns()
+        return Select(
+            columns,
+            From=bind,
+            Where=condition
+        )
+
+
+    @classmethod
+    def _bindInviteFor(cls, condition):
+        home = cls._homeSchema
+        bind = cls._bindSchema
+        return Select(
+            [
+                home.OWNER_UID,
+                bind.HOME_RESOURCE_ID,
+                bind.RESOURCE_ID,
+                bind.RESOURCE_NAME,
+                bind.BIND_MODE,
+                bind.BIND_STATUS,
+                bind.MESSAGE,
+            ],
+            From=bind.join(home, on=(bind.HOME_RESOURCE_ID == home.RESOURCE_ID)),
+            Where=condition
+        )
+
+
+    @classproperty
+    def _sharedInvitationBindForResourceID(cls):
+        bind = cls._bindSchema
+        return cls._bindInviteFor(
+            (bind.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And
+            (bind.BIND_MODE != _BIND_MODE_OWN)
+        )
+
+
+    @classproperty
+    def _acceptedBindForHomeID(cls):
+        bind = cls._bindSchema
+        return cls._bindFor((bind.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;))
+                            .And(bind.BIND_STATUS == _BIND_STATUS_ACCEPTED))
+
+
+    @classproperty
+    def _bindForResourceIDAndHomeID(cls):
+        &quot;&quot;&quot;
+        DAL query that looks up home bind rows by home child
+        resource ID and home resource ID.
+        &quot;&quot;&quot;
+        bind = cls._bindSchema
+        return cls._bindFor((bind.RESOURCE_ID == Parameter(&quot;resourceID&quot;))
+                            .And(bind.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)))
+
+
+    @classproperty
+    def _bindForBindUIDAndHomeID(cls):
+        &quot;&quot;&quot;
+        DAL query that looks up home bind rows by home child
+        resource ID and home resource ID.
+        &quot;&quot;&quot;
+        bind = cls._bindSchema
+        return cls._bindFor((bind.BIND_UID == Parameter(&quot;bindUID&quot;))
+                            .And(bind.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)))
+
+
+    @classproperty
+    def _bindForNameAndHomeID(cls):
+        &quot;&quot;&quot;
+        DAL query that looks up any bind rows by home child
+        resource ID and home resource ID.
+        &quot;&quot;&quot;
+        bind = cls._bindSchema
+        return cls._bindFor((bind.RESOURCE_NAME == Parameter(&quot;name&quot;))
+                            .And(bind.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)))
+
+
+    #
+    # Higher level API
+    #
+    @inlineCallbacks
+    def inviteUIDToShare(self, shareeUID, mode, summary=None, shareName=None):
+        &quot;&quot;&quot;
+        Invite a user to share this collection - either create the share if it does not exist, or
+        update the existing share with new values. Make sure a notification is sent as well.
+
+        @param shareeUID: UID of the sharee
+        @type shareeUID: C{str}
+        @param mode: access mode
+        @type mode: C{int}
+        @param summary: share message
+        @type summary: C{str}
+        &quot;&quot;&quot;
+
+        # Look for existing invite and update its fields or create new one
+        shareeView = yield self.shareeView(shareeUID)
+        if shareeView is not None:
+            status = _BIND_STATUS_INVITED if shareeView.shareStatus() in (_BIND_STATUS_DECLINED, _BIND_STATUS_INVALID) else None
+            yield self.updateShare(shareeView, mode=mode, status=status, summary=summary)
+        else:
+            shareeView = yield self.createShare(shareeUID=shareeUID, mode=mode, summary=summary, shareName=shareName)
+
+        # Check for external
+        if shareeView.viewerHome().external():
+            yield self._sendExternalInvite(shareeView)
+        else:
+            # Send invite notification
+            yield self._sendInviteNotification(shareeView)
+        returnValue(shareeView)
+
+
+    @inlineCallbacks
+    def directShareWithUser(self, shareeUID, shareName=None):
+        &quot;&quot;&quot;
+        Create a direct share with the specified user. Note it is currently up to the app layer
+        to enforce access control - this is not ideal as we really should have control of that in
+        the store. Once we do, this api will need to verify that access is allowed for a direct share.
+
+        NB no invitations are used with direct sharing.
+
+        @param shareeUID: UID of the sharee
+        @type shareeUID: C{str}
+        &quot;&quot;&quot;
+
+        # Ignore if it already exists
+        shareeView = yield self.shareeView(shareeUID)
+        if shareeView is None:
+            shareeView = yield self.createShare(shareeUID=shareeUID, mode=_BIND_MODE_DIRECT, shareName=shareName)
+            yield shareeView.newShare()
+
+            # Check for external
+            if shareeView.viewerHome().external():
+                yield self._sendExternalInvite(shareeView)
+
+        returnValue(shareeView)
+
+
+    @inlineCallbacks
+    def uninviteUIDFromShare(self, shareeUID):
+        &quot;&quot;&quot;
+        Remove a user from a share. Make sure a notification is sent as well.
+
+        @param shareeUID: UID of the sharee
+        @type shareeUID: C{str}
+        &quot;&quot;&quot;
+        # Cancel invites - we'll just use whatever userid we are given
+
+        shareeView = yield self.shareeView(shareeUID)
+        if shareeView is not None:
+            if shareeView.viewerHome().external():
+                yield self._sendExternalUninvite(shareeView)
+            else:
+                # If current user state is accepted then we send an invite with the new state, otherwise
+                # we cancel any existing invites for the user
+                if not shareeView.direct():
+                    if shareeView.shareStatus() != _BIND_STATUS_ACCEPTED:
+                        yield self._removeInviteNotification(shareeView)
+                    else:
+                        yield self._sendInviteNotification(shareeView, notificationState=_BIND_STATUS_DELETED)
+
+            # Remove the bind
+            yield self.removeShare(shareeView)
+
+
+    @inlineCallbacks
+    def acceptShare(self, summary=None):
+        &quot;&quot;&quot;
+        This share is being accepted.
+        &quot;&quot;&quot;
+
+        if not self.direct() and self.shareStatus() != _BIND_STATUS_ACCEPTED:
+            if self.external():
+                yield self._replyExternalInvite(_BIND_STATUS_ACCEPTED, summary)
+            ownerView = yield self.ownerView()
+            yield ownerView.updateShare(self, status=_BIND_STATUS_ACCEPTED)
+            yield self.newShare(displayname=summary)
+            if not ownerView.external():
+                yield self._sendReplyNotification(ownerView, summary)
+
+
+    @inlineCallbacks
+    def declineShare(self):
+        &quot;&quot;&quot;
+        This share is being declined.
+        &quot;&quot;&quot;
+
+        if not self.direct() and self.shareStatus() != _BIND_STATUS_DECLINED:
+            if self.external():
+                yield self._replyExternalInvite(_BIND_STATUS_DECLINED)
+            ownerView = yield self.ownerView()
+            yield ownerView.updateShare(self, status=_BIND_STATUS_DECLINED)
+            if not ownerView.external():
+                yield self._sendReplyNotification(ownerView)
+
+
+    @inlineCallbacks
+    def deleteShare(self):
+        &quot;&quot;&quot;
+        This share is being deleted (by the sharee) - either decline or remove (for direct shares).
+        &quot;&quot;&quot;
+
+        ownerView = yield self.ownerView()
+        if self.direct():
+            yield ownerView.removeShare(self)
+            if ownerView.external():
+                yield self._replyExternalInvite(_BIND_STATUS_DECLINED)
+        else:
+            yield self.declineShare()
+
+
+    @inlineCallbacks
+    def ownerDeleteShare(self):
+        &quot;&quot;&quot;
+        This share is being deleted (by the owner) - either decline or remove (for direct shares).
+        &quot;&quot;&quot;
+
+        # Change status on store object
+        yield self.setShared(False)
+
+        # Remove all sharees (direct and invited)
+        for invitation in (yield self.sharingInvites()):
+            yield self.uninviteUIDFromShare(invitation.shareeUID)
+
+
+    def newShare(self, displayname=None):
+        &quot;&quot;&quot;
+        Override in derived classes to do any specific operations needed when a share
+        is first accepted.
+        &quot;&quot;&quot;
+        return succeed(None)
+
+
+    @inlineCallbacks
+    def allInvitations(self):
+        &quot;&quot;&quot;
+        Get list of all invitations (non-direct) to this object.
+        &quot;&quot;&quot;
+        invitations = yield self.sharingInvites()
+
+        # remove direct shares as those are not &quot;real&quot; invitations
+        invitations = filter(lambda x: x.mode != _BIND_MODE_DIRECT, invitations)
+        invitations.sort(key=lambda invitation: invitation.shareeUID)
+        returnValue(invitations)
+
+
+    @inlineCallbacks
+    def _sendInviteNotification(self, shareeView, notificationState=None):
+        &quot;&quot;&quot;
+        Called on the owner's resource.
+        &quot;&quot;&quot;
+        # When deleting the message is the sharee's display name
+        displayname = shareeView.shareMessage()
+        if notificationState == _BIND_STATUS_DELETED:
+            displayname = str(shareeView.properties().get(PropertyName.fromElement(element.DisplayName), displayname))
+
+        notificationtype = {
+            &quot;notification-type&quot;: &quot;invite-notification&quot;,
+            &quot;shared-type&quot;: shareeView.sharedResourceType(),
+        }
+        notificationdata = {
+            &quot;notification-type&quot;: &quot;invite-notification&quot;,
+            &quot;shared-type&quot;: shareeView.sharedResourceType(),
+            &quot;dtstamp&quot;: DateTime.getNowUTC().getText(),
+            &quot;owner&quot;: shareeView.ownerHome().uid(),
+            &quot;sharee&quot;: shareeView.viewerHome().uid(),
+            &quot;uid&quot;: shareeView.shareUID(),
+            &quot;status&quot;: shareeView.shareStatus() if notificationState is None else notificationState,
+            &quot;access&quot;: (yield shareeView.effectiveShareMode()),
+            &quot;ownerName&quot;: self.shareName(),
+            &quot;summary&quot;: displayname,
+        }
+        if hasattr(self, &quot;getSupportedComponents&quot;):
+            notificationdata[&quot;supported-components&quot;] = self.getSupportedComponents()
+
+        # Add to sharee's collection
+        notifications = yield self._txn.notificationsWithUID(shareeView.viewerHome().uid(), create=True)
+        yield notifications.writeNotificationObject(shareeView.shareUID(), notificationtype, notificationdata)
+
+
+    @inlineCallbacks
+    def _sendReplyNotification(self, ownerView, summary=None):
+        &quot;&quot;&quot;
+        Create a reply notification based on the current state of this shared resource.
+        &quot;&quot;&quot;
+
+        # Generate invite XML
+        notificationUID = &quot;%s-reply&quot; % (self.shareUID(),)
+
+        notificationtype = {
+            &quot;notification-type&quot;: &quot;invite-reply&quot;,
+            &quot;shared-type&quot;: self.sharedResourceType(),
+        }
+
+        notificationdata = {
+            &quot;notification-type&quot;: &quot;invite-reply&quot;,
+            &quot;shared-type&quot;: self.sharedResourceType(),
+            &quot;dtstamp&quot;: DateTime.getNowUTC().getText(),
+            &quot;owner&quot;: self.ownerHome().uid(),
+            &quot;sharee&quot;: self.viewerHome().uid(),
+            &quot;status&quot;: self.shareStatus(),
+            &quot;ownerName&quot;: ownerView.shareName(),
+            &quot;in-reply-to&quot;: self.shareUID(),
+            &quot;summary&quot;: summary,
+        }
+
+        # Add to owner notification collection
+        notifications = yield self._txn.notificationsWithUID(self.ownerHome().uid(), create=True)
+        yield notifications.writeNotificationObject(notificationUID, notificationtype, notificationdata)
+
+
+    @inlineCallbacks
+    def _removeInviteNotification(self, shareeView):
+        &quot;&quot;&quot;
+        Called on the owner's resource.
+        &quot;&quot;&quot;
+
+        # Remove from sharee's collection
+        notifications = yield self._txn.notificationsWithUID(shareeView.viewerHome().uid())
+        yield notifications.removeNotificationObjectWithUID(shareeView.shareUID())
+
+
+    #
+    # External/cross-pod API
+    #
+    @inlineCallbacks
+    def _sendExternalInvite(self, shareeView):
+
+        # Must make sure this collection has a BIND_UID assigned
+        if not self._bindUID:
+            self._bindUID = str(uuid4())
+            yield self._updateBindColumnsQuery({self._bindSchema.BIND_UID: self._bindUID}).on(
+                self._txn,
+                resourceID=self.id(), homeID=self.ownerHome().id()
+            )
+
+        # Now send the invite
+        yield self._txn.store().conduit.send_shareinvite(
+            self._txn,
+            shareeView.ownerHome()._homeType,
+            shareeView.ownerHome().uid(),
+            self.shareName(),
+            shareeView.viewerHome().uid(),
+            shareeView.shareUID(),
+            shareeView.shareMode(),
+            self.bindUID(),
+            shareeView.shareMessage(),
+            self.getInviteCopyProperties(),
+            supported_components=self.getSupportedComponents() if hasattr(self, &quot;getSupportedComponents&quot;) else None,
+        )
+
+
+    @inlineCallbacks
+    def _sendExternalUninvite(self, shareeView):
+
+        yield self._txn.store().conduit.send_shareuninvite(
+            self._txn,
+            shareeView.ownerHome()._homeType,
+            shareeView.ownerHome().uid(),
+            self.bindUID(),
+            shareeView.viewerHome().uid(),
+            shareeView.shareUID(),
+        )
+
+
+    @inlineCallbacks
+    def _replyExternalInvite(self, status, summary=None):
+
+        yield self._txn.store().conduit.send_sharereply(
+            self._txn,
+            self.viewerHome()._homeType,
+            self.ownerHome().uid(),
+            self.viewerHome().uid(),
+            self.shareUID(),
+            status,
+            summary,
+        )
+
+
+    #
+    # Lower level API
+    #
+    @inlineCallbacks
+    def ownerView(self):
+        &quot;&quot;&quot;
+        Return the owner resource counterpart of this shared resource.
+
+        Note we have to play a trick with the property store to coerce it to match
+        the per-user properties for the owner.
+        &quot;&quot;&quot;
+        # Get the child of the owner home that has the same resource id as the owned one
+        ownerView = yield self.ownerHome().childWithID(self.id())
+        returnValue(ownerView)
+
+
+    @inlineCallbacks
+    def shareeView(self, shareeUID):
+        &quot;&quot;&quot;
+        Return the shared resource counterpart of this owned resource for the specified sharee.
+
+        Note we have to play a trick with the property store to coerce it to match
+        the per-user properties for the sharee.
+        &quot;&quot;&quot;
+
+        # Never return the owner's own resource
+        if self._home.uid() == shareeUID:
+            returnValue(None)
+
+        # Get the child of the sharee home that has the same resource id as the owned one
+        shareeHome = yield self._txn.homeWithUID(self._home._homeType, shareeUID, authzUID=shareeUID)
+        shareeView = (yield shareeHome.allChildWithID(self.id())) if shareeHome is not None else None
+        returnValue(shareeView)
+
+
+    @inlineCallbacks
+    def shareWithUID(self, shareeUID, mode, status=None, summary=None, shareName=None):
+        &quot;&quot;&quot;
+        Share this (owned) L{CommonHomeChild} with another principal.
+
+        @param shareeUID: The UID of the sharee.
+        @type: L{str}
+
+        @param mode: The sharing mode; L{_BIND_MODE_READ} or
+            L{_BIND_MODE_WRITE} or L{_BIND_MODE_DIRECT}
+        @type mode: L{str}
+
+        @param status: The sharing status; L{_BIND_STATUS_INVITED} or
+            L{_BIND_STATUS_ACCEPTED}
+        @type: L{str}
+
+        @param summary: The proposed message to go along with the share, which
+            will be used as the default display name.
+        @type: L{str}
+
+        @return: the name of the shared calendar in the new calendar home.
+        @rtype: L{str}
+        &quot;&quot;&quot;
+        shareeHome = yield self._txn.homeWithUID(self._home._homeType, shareeUID, create=True)
+        returnValue(
+            (yield self.shareWith(shareeHome, mode, status, summary, shareName))
+        )
+
+
+    @inlineCallbacks
+    def shareWith(self, shareeHome, mode, status=None, summary=None, shareName=None):
+        &quot;&quot;&quot;
+        Share this (owned) L{CommonHomeChild} with another home.
+
+        @param shareeHome: The home of the sharee.
+        @type: L{CommonHome}
+
+        @param mode: The sharing mode; L{_BIND_MODE_READ} or
+            L{_BIND_MODE_WRITE} or L{_BIND_MODE_DIRECT}
+        @type: L{str}
+
+        @param status: The sharing status; L{_BIND_STATUS_INVITED} or
+            L{_BIND_STATUS_ACCEPTED}
+        @type: L{str}
+
+        @param summary: The proposed message to go along with the share, which
+            will be used as the default display name.
+        @type: L{str}
+
+        @param shareName: The proposed name of the new share.
+        @type: L{str}
+
+        @return: the name of the shared calendar in the new calendar home.
+        @rtype: L{str}
+        &quot;&quot;&quot;
+
+        if status is None:
+            status = _BIND_STATUS_ACCEPTED
+
+        @inlineCallbacks
+        def doInsert(subt):
+            newName = shareName if shareName is not None else self.newShareName()
+            yield self._bindInsertQuery.on(
+                subt,
+                homeID=shareeHome._resourceID,
+                resourceID=self._resourceID,
+                name=newName,
+                mode=mode,
+                bindStatus=status,
+                bindUID=None,
+                message=summary
+            )
+            returnValue(newName)
+        try:
+            bindName = yield self._txn.subtransaction(doInsert)
+        except AllRetriesFailed:
+            # FIXME: catch more specific exception
+            child = yield shareeHome.allChildWithID(self._resourceID)
+            yield self.updateShare(
+                child, mode=mode, status=status,
+                summary=summary
+            )
+            bindName = child._name
+        else:
+            if status == _BIND_STATUS_ACCEPTED:
+                shareeView = yield shareeHome.anyObjectWithShareUID(bindName)
+                yield shareeView._initSyncToken()
+                yield shareeView._initBindRevision()
+
+        # Mark this as shared
+        yield self.setShared(True)
+
+        # Must send notification to ensure cache invalidation occurs
+        yield self.notifyPropertyChanged()
+        yield shareeHome.notifyChanged()
+
+        returnValue(bindName)
+
+
+    @inlineCallbacks
+    def createShare(self, shareeUID, mode, summary=None, shareName=None):
+        &quot;&quot;&quot;
+        Create a new shared resource. If the mode is direct, the share is created in accepted state,
+        otherwise the share is created in invited state.
+        &quot;&quot;&quot;
+        shareeHome = yield self._txn.homeWithUID(self.ownerHome()._homeType, shareeUID, create=True)
+
+        yield self.shareWith(
+            shareeHome,
+            mode=mode,
+            status=_BIND_STATUS_INVITED if mode != _BIND_MODE_DIRECT else _BIND_STATUS_ACCEPTED,
+            summary=summary,
+            shareName=shareName,
+        )
+        shareeView = yield self.shareeView(shareeUID)
+        returnValue(shareeView)
+
+
+    @inlineCallbacks
+    def updateShare(self, shareeView, mode=None, status=None, summary=None):
+        &quot;&quot;&quot;
+        Update share mode, status, and message for a home child shared with
+        this (owned) L{CommonHomeChild}.
+
+        @param shareeView: The sharee home child that shares this.
+        @type shareeView: L{CommonHomeChild}
+
+        @param mode: The sharing mode; L{_BIND_MODE_READ} or
+            L{_BIND_MODE_WRITE} or None to not update
+        @type mode: L{str}
+
+        @param status: The sharing status; L{_BIND_STATUS_INVITED} or
+            L{_BIND_STATUS_ACCEPTED} or L{_BIND_STATUS_DECLINED} or
+            L{_BIND_STATUS_INVALID}  or None to not update
+        @type status: L{str}
+
+        @param summary: The proposed message to go along with the share, which
+            will be used as the default display name, or None to not update
+        @type summary: L{str}
+        &quot;&quot;&quot;
+        # TODO: raise a nice exception if shareeView is not, in fact, a shared
+        # version of this same L{CommonHomeChild}
+
+        # remove None parameters, and substitute None for empty string
+        bind = self._bindSchema
+        columnMap = {}
+        if mode != None and mode != shareeView._bindMode:
+            columnMap[bind.BIND_MODE] = mode
+        if status != None and status != shareeView._bindStatus:
+            columnMap[bind.BIND_STATUS] = status
+        if summary != None and summary != shareeView._bindMessage:
+            columnMap[bind.MESSAGE] = summary
+
+        if columnMap:
+
+            # Count accepted
+            if bind.BIND_STATUS in columnMap:
+                previouslyAcceptedCount = yield shareeView._previousAcceptCount()
+
+            yield self._updateBindColumnsQuery(columnMap).on(
+                self._txn,
+                resourceID=self._resourceID, homeID=shareeView._home._resourceID
+            )
+
+            # Update affected attributes
+            if bind.BIND_MODE in columnMap:
+                shareeView._bindMode = columnMap[bind.BIND_MODE]
+
+            if bind.BIND_STATUS in columnMap:
+                shareeView._bindStatus = columnMap[bind.BIND_STATUS]
+                yield shareeView._changedStatus(previouslyAcceptedCount)
+
+            if bind.MESSAGE in columnMap:
+                shareeView._bindMessage = columnMap[bind.MESSAGE]
+
+            yield shareeView.invalidateQueryCache()
+
+            # Must send notification to ensure cache invalidation occurs
+            yield self.notifyPropertyChanged()
+            yield shareeView.viewerHome().notifyChanged()
+
+
+    def _previousAcceptCount(self):
+        return succeed(1)
+
+
+    @inlineCallbacks
+    def _changedStatus(self, previouslyAcceptedCount):
+        if self._bindStatus == _BIND_STATUS_ACCEPTED:
+            yield self._initSyncToken()
+            yield self._initBindRevision()
+            self._home._children[self._name] = self
+            self._home._children[self._resourceID] = self
+        elif self._bindStatus in (_BIND_STATUS_INVITED, _BIND_STATUS_DECLINED):
+            yield self._deletedSyncToken(sharedRemoval=True)
+            self._home._children.pop(self._name, None)
+            self._home._children.pop(self._resourceID, None)
+
+
+    @inlineCallbacks
+    def removeShare(self, shareeView):
+        &quot;&quot;&quot;
+        Remove the shared version of this (owned) L{CommonHomeChild} from the
+        referenced L{CommonHome}.
+
+        @see: L{CommonHomeChild.shareWith}
+
+        @param shareeView: The shared resource being removed.
+
+        @return: a L{Deferred} which will fire with the previous shareUID
+        &quot;&quot;&quot;
+
+        # remove sync tokens
+        shareeHome = shareeView.viewerHome()
+        yield shareeView._deletedSyncToken(sharedRemoval=True)
+        shareeHome._children.pop(shareeView._name, None)
+        shareeHome._children.pop(shareeView._resourceID, None)
+
+        # Must send notification to ensure cache invalidation occurs
+        yield self.notifyPropertyChanged()
+        yield shareeHome.notifyChanged()
+
+        # delete binds including invites
+        yield self._deleteBindForResourceIDAndHomeID.on(
+            self._txn,
+            resourceID=self._resourceID,
+            homeID=shareeHome._resourceID,
+        )
+
+        yield shareeView.invalidateQueryCache()
+
+
+    @inlineCallbacks
+    def unshare(self):
+        &quot;&quot;&quot;
+        Unshares a collection, regardless of which &quot;direction&quot; it was shared.
+        &quot;&quot;&quot;
+        if self.owned():
+            # This collection may be shared to others
+            invites = yield self.sharingInvites()
+            for invite in invites:
+                shareeView = yield self.shareeView(invite.shareeUID)
+                yield self.removeShare(shareeView)
+        else:
+            # This collection is shared to me
+            ownerView = yield self.ownerView()
+            yield ownerView.removeShare(self)
+
+
+    @inlineCallbacks
+    def sharingInvites(self):
+        &quot;&quot;&quot;
+        Retrieve the list of all L{SharingInvitation}'s for this L{CommonHomeChild}, irrespective of mode.
+
+        @return: L{SharingInvitation} objects
+        @rtype: a L{Deferred} which fires with a L{list} of L{SharingInvitation}s.
+        &quot;&quot;&quot;
+        if not self.owned():
+            returnValue([])
+
+        # get all accepted binds
+        invitedRows = yield self._sharedInvitationBindForResourceID.on(
+            self._txn, resourceID=self._resourceID, homeID=self._home._resourceID
+        )
+
+        result = []
+        for homeUID, homeRID, _ignore_resourceID, resourceName, bindMode, bindStatus, bindMessage in invitedRows:
+            invite = SharingInvitation(
+                resourceName,
+                self.ownerHome().name(),
+                self.ownerHome().id(),
+                homeUID,
+                homeRID,
+                bindMode,
+                bindStatus,
+                bindMessage,
+            )
+            result.append(invite)
+        returnValue(result)
+
+
+    @inlineCallbacks
+    def sharingBindRecords(self):
+        &quot;&quot;&quot;
+        Return an L{dict} that maps home/directory uid to a sharing bind record.
+        &quot;&quot;&quot;
+        if not self.owned():
+            returnValue({})
+
+        records = yield self._bindRecordClass.querysimple(
+            self._txn,
+            **{self._bindResourceIDAttributeName: self.id()}
+        )
+        homeIDs = [getattr(record, self._bindHomeIDAttributeName) for record in records]
+        homes = yield self._homeRecordClass.query(
+            self._txn,
+            self._homeRecordClass.resourceID.In(homeIDs),
+        )
+        homeMap = dict((home.resourceID, home.ownerUID,) for home in homes)
+
+        returnValue(dict([(homeMap[getattr(record, self._bindHomeIDAttributeName)], record,) for record in records if record.bindMode != _BIND_MODE_OWN]))
+
+
+    def migrateBindRecords(self, bindUID):
+        &quot;&quot;&quot;
+        The user that owns this collection is being migrated to another pod. We need to switch over
+        the sharing details to point to the new external user.
+        &quot;&quot;&quot;
+        if self.owned():
+            return self.migrateSharedByRecords(bindUID)
+        else:
+            return self.migrateSharedToRecords()
+
+
+    @inlineCallbacks
+    def migrateSharedByRecords(self, bindUID):
+        &quot;&quot;&quot;
+        The user that owns this collection is being migrated to another pod. We need to switch over
+        the sharing details to point to the new external user. For sharees hosted on this pod, we
+        update their bind record to point to a new external home/calendar for the sharer. For sharees
+        hosted on other pods, we simply remove their bind entries.
+        &quot;&quot;&quot;
+
+        # Get the external home and make sure there is a &quot;fake&quot; calendar associated with it
+        home = yield self.externalHome()
+        calendar = yield home.childWithBindUID(bindUID)
+        if calendar is None:
+            calendar = yield home.createCollectionForExternalShare(
+                self.name(),
+                bindUID,
+                self.getSupportedComponents() if hasattr(self, &quot;getSupportedComponents&quot;) else None,
+            )
+
+        remaining = False
+        records = yield self._bindRecordClass.querysimple(self._txn, **{self._bindResourceIDAttributeName: self.id()})
+        for record in records:
+            if record.bindMode == _BIND_MODE_OWN:
+                continue
+            shareeHome = yield self._txn.homeWithResourceID(home._homeType, getattr(record, self._bindHomeIDAttributeName))
+            if shareeHome.normal():
+                remaining = True
+                yield record.update(**{
+                    self._bindResourceIDAttributeName: calendar.id(),
+                })
+            else:
+                # It is OK to just delete (as opposed to doing a full &quot;unshare&quot;) without adjusting other things
+                # like sync revisions since those would not have been used for an external share anyway. Also,
+                # revisions are tied to the calendar id and the original calendar will be removed after migration
+                # is complete.
+                yield record.delete()
+
+        # If there are no external shares remaining, we can remove the external calendar
+        if not remaining:
+            yield calendar.remove()
+
+
+    @inlineCallbacks
+    def migrateSharedToRecords(self):
+        &quot;&quot;&quot;
+        The user that owns this collection is being migrated to another pod. We need to switch over
+        the sharing details to point to the new external user.
+        &quot;&quot;&quot;
+
+        # Update the bind record for this calendar to point to the external home
+        records = yield self._bindRecordClass.querysimple(
+            self._txn,
+            **{
+                self._bindHomeIDAttributeName: self.viewerHome().id(),
+                self._bindResourceIDAttributeName: self.id(),
+            }
+        )
+
+        if len(records) == 1:
+
+            # What we do depends on whether the sharer is local to this pod or not
+            if self.ownerHome().normal():
+                # Get the external home for the sharee
+                home = yield self.externalHome()
+
+                yield records[0].update(**{
+                    self._bindHomeIDAttributeName: home.id(),
+                })
+            else:
+                # It is OK to just delete (as opposed to doing a full &quot;unshare&quot;) without adjusting other things
+                # like sync revisions since those would not have been used for an external share anyway. Also,
+                # revisions are tied to the sharee calendar home id and that will be removed after migration
+                # is complete.
+                yield records[0].delete()
+
+                # Clean up external calendar if no sharees left
+                calendar = yield self.ownerView()
+                invites = yield calendar.sharingInvites()
+                if len(invites) == 0:
+                    yield calendar.remove()
+        else:
+            raise AssertionError(&quot;We must have a bind record for this calendar.&quot;)
+
+
+    def externalHome(self):
+        &quot;&quot;&quot;
+        Create and return an L{CommonHome} for the user being migrated. Note that when called, the user
+        directory record may still indicate that they are hosted on this pod, so we have to forcibly create
+        a home for the external user.
+        &quot;&quot;&quot;
+        currentHome = self.viewerHome()
+        return self._txn.homeWithUID(currentHome._homeType, currentHome.uid(), status=_HOME_STATUS_EXTERNAL, create=True)
+
+
+    @inlineCallbacks
+    def _initBindRevision(self):
+        yield self.syncToken() # init self._syncTokenRevision if None
+        self._bindRevision = self._syncTokenRevision
+
+        bind = self._bindSchema
+        yield self._updateBindColumnsQuery(
+            {bind.BIND_REVISION : Parameter(&quot;revision&quot;), }
+        ).on(
+            self._txn,
+            revision=self._bindRevision,
+            resourceID=self._resourceID,
+            homeID=self.viewerHome()._resourceID,
+        )
+        yield self.invalidateQueryCache()
+
+
+    def sharedResourceType(self):
+        &quot;&quot;&quot;
+        The sharing resource type. Needs to be overridden by each type of resource that can be shared.
+
+        @return: an identifier for the type of the share.
+        @rtype: C{str}
+        &quot;&quot;&quot;
+        return &quot;&quot;
+
+
+    def newShareName(self):
+        &quot;&quot;&quot;
+        Name used when creating a new share. By default this is a UUID.
+        &quot;&quot;&quot;
+        return str(uuid4())
+
+
+    def owned(self):
+        &quot;&quot;&quot;
+        @see: L{ICalendar.owned}
+        &quot;&quot;&quot;
+        return self._bindMode == _BIND_MODE_OWN
+
+
+    def isShared(self):
+        &quot;&quot;&quot;
+        For an owned collection indicate whether it is shared.
+
+        @return: C{True} if shared, C{False} otherwise
+        @rtype: C{bool}
+        &quot;&quot;&quot;
+        return self.owned() and self._bindMessage == &quot;shared&quot;
+
+
+    @inlineCallbacks
+    def setShared(self, shared):
+        &quot;&quot;&quot;
+        Set an owned collection to shared or unshared state. Technically this is not useful as &quot;shared&quot;
+        really means it has invitees, but the current sharing spec supports a notion of a shared collection
+        that has not yet had invitees added. For the time being we will support that option by using a new
+        MESSAGE value to indicate an owned collection that is &quot;shared&quot;.
+
+        @param shared: whether or not the owned collection is &quot;shared&quot;
+        @type shared: C{bool}
+        &quot;&quot;&quot;
+        assert self.owned(), &quot;Cannot change share mode on a shared collection&quot;
+
+        # Only if change is needed
+        newMessage = &quot;shared&quot; if shared else None
+        if self._bindMessage == newMessage:
+            returnValue(None)
+
+        self._bindMessage = newMessage
+
+        bind = self._bindSchema
+        yield Update(
+            {bind.MESSAGE: self._bindMessage},
+            Where=(bind.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And(
+                bind.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)),
+        ).on(self._txn, resourceID=self._resourceID, homeID=self.viewerHome()._resourceID)
+
+        yield self.invalidateQueryCache()
+        yield self.notifyPropertyChanged()
+
+
+    def direct(self):
+        &quot;&quot;&quot;
+        Is this a &quot;direct&quot; share?
+
+        @return: a boolean indicating whether it's direct.
+        &quot;&quot;&quot;
+        return self._bindMode == _BIND_MODE_DIRECT
+
+
+    def indirect(self):
+        &quot;&quot;&quot;
+        Is this an &quot;indirect&quot; share?
+
+        @return: a boolean indicating whether it's indirect.
+        &quot;&quot;&quot;
+        return self._bindMode == _BIND_MODE_INDIRECT
+
+
+    def shareUID(self):
+        &quot;&quot;&quot;
+        @see: L{ICalendar.shareUID}
+        &quot;&quot;&quot;
+        return self.name()
+
+
+    def shareMode(self):
+        &quot;&quot;&quot;
+        @see: L{ICalendar.shareMode}
+        &quot;&quot;&quot;
+        return self._bindMode
+
+
+    def _effectiveShareMode(self, bindMode, viewerUID, txn):
+        &quot;&quot;&quot;
+        Get the effective share mode without a calendar object
+        &quot;&quot;&quot;
+        return bindMode
+
+
+    def effectiveShareMode(self):
+        &quot;&quot;&quot;
+        @see: L{ICalendar.shareMode}
+        &quot;&quot;&quot;
+        return self._bindMode
+
+
+    def shareName(self):
+        &quot;&quot;&quot;
+        This is a path like name for the resource within the home being shared. For object resource
+        shares this will be a combination of the L{CommonHomeChild} name and the L{CommonObjecrResource}
+        name. Otherwise it is just the L{CommonHomeChild} name. This is needed to expose a value to the
+        app-layer such that it can construct a URI for the actual WebDAV resource being shared.
+        &quot;&quot;&quot;
+        name = self.name()
+        if self.sharedResourceType() == &quot;group&quot;:
+            name = self.parentCollection().name() + &quot;/&quot; + name
+        return name
+
+
+    def shareStatus(self):
+        &quot;&quot;&quot;
+        @see: L{ICalendar.shareStatus}
+        &quot;&quot;&quot;
+        return self._bindStatus
+
+
+    def bindUID(self):
+        &quot;&quot;&quot;
+        @see: L{ICalendar.bindUID}
+        &quot;&quot;&quot;
+        return self._bindUID
+
+
+    def accepted(self):
+        &quot;&quot;&quot;
+        @see: L{ICalendar.shareStatus}
+        &quot;&quot;&quot;
+        return self._bindStatus == _BIND_STATUS_ACCEPTED
+
+
+    def shareMessage(self):
+        &quot;&quot;&quot;
+        @see: L{ICalendar.shareMessage}
+        &quot;&quot;&quot;
+        return self._bindMessage
+
+
+    def getInviteCopyProperties(self):
+        &quot;&quot;&quot;
+        Get a dictionary of property name/values (as strings) for properties that are shadowable and
+        need to be copied to a sharee's collection when an external (cross-pod) share is created.
+        Sub-classes should override to expose the properties they care about.
+        &quot;&quot;&quot;
+        return {}
+
+
+    def setInviteCopyProperties(self, props):
+        &quot;&quot;&quot;
+        Copy a set of shadowable properties (as name/value strings) onto this shared resource when
+        a cross-pod invite is processed. Sub-classes should override to expose the properties they
+        care about.
+        &quot;&quot;&quot;
+        pass
+
+
+    @classmethod
+    def metadataColumns(cls):
+        &quot;&quot;&quot;
+        Return a list of column name for retrieval of metadata. This allows
+        different child classes to have their own type specific data, but still make use of the
+        common base logic.
+        &quot;&quot;&quot;
+
+        # Common behavior is to have created and modified
+
+        return (
+            cls._homeChildMetaDataSchema.CREATED,
+            cls._homeChildMetaDataSchema.MODIFIED,
+        )
+
+
+    @classmethod
+    def metadataAttributes(cls):
+        &quot;&quot;&quot;
+        Return a list of attribute names for retrieval of metadata. This allows
+        different child classes to have their own type specific data, but still make use of the
+        common base logic.
+        &quot;&quot;&quot;
+
+        # Common behavior is to have created and modified
+
+        return (
+            &quot;_created&quot;,
+            &quot;_modified&quot;,
+        )
+
+
+    @classmethod
+    def bindColumns(cls):
+        &quot;&quot;&quot;
+        Return a list of column names for retrieval during creation. This allows
+        different child classes to have their own type specific data, but still make use of the
+        common base logic.
+        &quot;&quot;&quot;
+
+        return (
+            cls._bindSchema.HOME_RESOURCE_ID,
+            cls._bindSchema.RESOURCE_ID,
+            cls._bindSchema.RESOURCE_NAME,
+            cls._bindSchema.BIND_MODE,
+            cls._bindSchema.BIND_STATUS,
+            cls._bindSchema.BIND_REVISION,
+            cls._bindSchema.BIND_UID,
+            cls._bindSchema.MESSAGE
+        )
+
+
+    @classmethod
+    def bindAttributes(cls):
+        &quot;&quot;&quot;
+        Return a list of column names for retrieval during creation. This allows
+        different child classes to have their own type specific data, but still make use of the
+        common base logic.
+        &quot;&quot;&quot;
+
+        return (
+            &quot;_homeResourceID&quot;,
+            &quot;_resourceID&quot;,
+            &quot;_name&quot;,
+            &quot;_bindMode&quot;,
+            &quot;_bindStatus&quot;,
+            &quot;_bindRevision&quot;,
+            &quot;_bindUID&quot;,
+            &quot;_bindMessage&quot;,
+        )
+
+    bindColumnCount = 8
+
+    @classmethod
+    def additionalBindColumns(cls):
+        &quot;&quot;&quot;
+        Return a list of column names for retrieval during creation. This allows
+        different child classes to have their own type specific data, but still make use of the
+        common base logic.
+        &quot;&quot;&quot;
+
+        return ()
+
+
+    @classmethod
+    def additionalBindAttributes(cls):
+        &quot;&quot;&quot;
+        Return a list of attribute names for retrieval of during creation. This allows
+        different child classes to have their own type specific data, but still make use of the
+        common base logic.
+        &quot;&quot;&quot;
+
+        return ()
+
+
+    @classproperty
+    def _childrenAndMetadataForHomeID(cls):
+        bind = cls._bindSchema
+        child = cls._homeChildSchema
+        childMetaData = cls._homeChildMetaDataSchema
+
+        columns = cls.bindColumns() + cls.additionalBindColumns() + cls.metadataColumns()
+        return Select(
+            columns,
+            From=child.join(
+                bind, child.RESOURCE_ID == bind.RESOURCE_ID,
+                'left outer').join(
+                    childMetaData, childMetaData.RESOURCE_ID == bind.RESOURCE_ID,
+                    'left outer'),
+            Where=(bind.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)).And(
+                bind.BIND_STATUS == _BIND_STATUS_ACCEPTED)
+        )
+
+
+    @classmethod
+    def _revisionsForResourceIDs(cls, resourceIDs):
+        rev = cls._revisionsSchema
+        return Select(
+            [rev.RESOURCE_ID, Max(rev.REVISION)],
+            From=rev,
+            Where=rev.RESOURCE_ID.In(Parameter(&quot;resourceIDs&quot;, len(resourceIDs))).And(
+                (rev.RESOURCE_NAME != None).Or(rev.DELETED == False)),
+            GroupBy=rev.RESOURCE_ID
+        )
+
+
+    @inlineCallbacks
+    def invalidateQueryCache(self):
+        queryCacher = self._txn._queryCacher
+        if queryCacher is not None:
+            yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForHomeChildMetaData(self._resourceID))
+            yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForObjectWithName(self._home._resourceID, self._name))
+            yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForObjectWithResourceID(self._home._resourceID, self._resourceID))
+            yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForObjectWithBindUID(self._home._resourceID, self._bindUID))
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_tablespy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/sql_tables.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_tables.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/sql_tables.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -187,6 +187,8 @@
</span><span class="cx"> _HOME_STATUS_NORMAL = _homeStatus('normal')
</span><span class="cx"> _HOME_STATUS_EXTERNAL = _homeStatus('external')
</span><span class="cx"> _HOME_STATUS_PURGING = _homeStatus('purging')
</span><ins>+_HOME_STATUS_MIGRATING = _homeStatus('migrating')
+_HOME_STATUS_DISABLED = _homeStatus('disabled')
</ins><span class="cx"> 
</span><span class="cx"> _bindStatus = _schemaConstants(
</span><span class="cx">     schema.CALENDAR_BIND_STATUS.DESCRIPTION,
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_utilpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_utilpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_util.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_util.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_util.py                                (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_util.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,837 @@
</span><ins>+# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twext.enterprise.dal.syntax import Max, Select, Parameter, Delete, Insert, \
+    Update, ColumnSyntax, TableSyntax, Upper
+from twext.python.clsprop import classproperty
+from twext.python.log import Logger
+from twisted.internet.defer import succeed, inlineCallbacks, returnValue
+from txdav.base.datastore.util import normalizeUUIDOrNot
+from txdav.common.datastore.sql_tables import schema
+from txdav.common.icommondatastore import SyncTokenValidException, \
+    ENOTIFICATIONTYPE, ECALENDARTYPE, EADDRESSBOOKTYPE
+import time
+from uuid import UUID
+
+log = Logger()
+
+
+&quot;&quot;&quot;
+Classes and methods for the SQL store.
+&quot;&quot;&quot;
+
+class _EmptyCacher(object):
+
+    def set(self, key, value):
+        return succeed(True)
+
+
+    def get(self, key, withIdentifier=False):
+        return succeed(None)
+
+
+    def delete(self, key):
+        return succeed(True)
+
+
+
+class _SharedSyncLogic(object):
+    &quot;&quot;&quot;
+    Logic for maintaining sync-token shared between notification collections and
+    shared collections.
+    &quot;&quot;&quot;
+
+    @classproperty
+    def _childSyncTokenQuery(cls):
+        &quot;&quot;&quot;
+        DAL query for retrieving the sync token of a L{CommonHomeChild} based on
+        its resource ID.
+        &quot;&quot;&quot;
+        rev = cls._revisionsSchema
+        return Select([Max(rev.REVISION)], From=rev,
+                      Where=rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;))
+
+
+    def revisionFromToken(self, token):
+        if token is None:
+            return 0
+        elif isinstance(token, str) or isinstance(token, unicode):
+            _ignore_uuid, revision = token.split(&quot;_&quot;, 1)
+            return int(revision)
+        else:
+            return token
+
+
+    @inlineCallbacks
+    def syncToken(self):
+        if self._syncTokenRevision is None:
+            self._syncTokenRevision = yield self.syncTokenRevision()
+        returnValue((&quot;%s_%s&quot; % (self._resourceID, self._syncTokenRevision,)))
+
+
+    @inlineCallbacks
+    def syncTokenRevision(self):
+        revision = (yield self._childSyncTokenQuery.on(self._txn, resourceID=self._resourceID))[0][0]
+        if revision is None:
+            revision = int((yield self._txn.calendarserverValue(&quot;MIN-VALID-REVISION&quot;)))
+        returnValue(revision)
+
+
+    def objectResourcesSinceToken(self, token):
+        raise NotImplementedError()
+
+
+    @classmethod
+    def _objectNamesSinceRevisionQuery(cls, deleted=True):
+        &quot;&quot;&quot;
+        DAL query for (resource, deleted-flag)
+        &quot;&quot;&quot;
+        rev = cls._revisionsSchema
+        where = (rev.REVISION &gt; Parameter(&quot;revision&quot;)).And(rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;))
+        if not deleted:
+            where = where.And(rev.DELETED == False)
+        return Select(
+            [rev.RESOURCE_NAME, rev.DELETED],
+            From=rev,
+            Where=where,
+        )
+
+
+    def resourceNamesSinceToken(self, token):
+        &quot;&quot;&quot;
+        Return the changed and deleted resources since a particular sync-token. This simply extracts
+        the revision from from the token then calls L{resourceNamesSinceRevision}.
+
+        @param revision: the revision to determine changes since
+        @type revision: C{int}
+        &quot;&quot;&quot;
+
+        return self.resourceNamesSinceRevision(self.revisionFromToken(token))
+
+
+    @inlineCallbacks
+    def resourceNamesSinceRevision(self, revision):
+        &quot;&quot;&quot;
+        Return the changed and deleted resources since a particular revision.
+
+        @param revision: the revision to determine changes since
+        @type revision: C{int}
+        &quot;&quot;&quot;
+        changed = []
+        deleted = []
+        invalid = []
+        if revision:
+            minValidRevision = yield self._txn.calendarserverValue(&quot;MIN-VALID-REVISION&quot;)
+            if revision &lt; int(minValidRevision):
+                raise SyncTokenValidException
+
+            results = [
+                (name if name else &quot;&quot;, removed) for name, removed in (
+                    yield self._objectNamesSinceRevisionQuery().on(
+                        self._txn, revision=revision, resourceID=self._resourceID)
+                )
+            ]
+            results.sort(key=lambda x: x[1])
+
+            for name, wasdeleted in results:
+                if name:
+                    if wasdeleted:
+                        deleted.append(name)
+                    else:
+                        changed.append(name)
+        else:
+            changed = yield self.listObjectResources()
+
+        returnValue((changed, deleted, invalid))
+
+
+    @classproperty
+    def _removeDeletedRevision(cls):
+        rev = cls._revisionsSchema
+        return Delete(From=rev,
+                      Where=(rev.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)).And(
+                          rev.COLLECTION_NAME == Parameter(&quot;collectionName&quot;)))
+
+
+    @classproperty
+    def _addNewRevision(cls):
+        rev = cls._revisionsSchema
+        return Insert(
+            {
+                rev.HOME_RESOURCE_ID: Parameter(&quot;homeID&quot;),
+                rev.RESOURCE_ID: Parameter(&quot;resourceID&quot;),
+                rev.COLLECTION_NAME: Parameter(&quot;collectionName&quot;),
+                rev.RESOURCE_NAME: None,
+                # Always starts false; may be updated to be a tombstone
+                # later.
+                rev.DELETED: False
+            },
+            Return=[rev.REVISION]
+        )
+
+
+    @inlineCallbacks
+    def _initSyncToken(self):
+        yield self._removeDeletedRevision.on(
+            self._txn, homeID=self._home._resourceID, collectionName=self._name
+        )
+        self._syncTokenRevision = (yield (
+            self._addNewRevision.on(self._txn, homeID=self._home._resourceID,
+                                    resourceID=self._resourceID,
+                                    collectionName=self._name)))[0][0]
+        self._txn.bumpRevisionForObject(self)
+
+
+    @classproperty
+    def _renameSyncTokenQuery(cls):
+        &quot;&quot;&quot;
+        DAL query to change sync token for a rename (increment and adjust
+        resource name).
+        &quot;&quot;&quot;
+        rev = cls._revisionsSchema
+        return Update(
+            {
+                rev.REVISION: schema.REVISION_SEQ,
+                rev.COLLECTION_NAME: Parameter(&quot;name&quot;)
+            },
+            Where=(rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And
+                  (rev.RESOURCE_NAME == None),
+            Return=rev.REVISION
+        )
+
+
+    @inlineCallbacks
+    def _renameSyncToken(self):
+        rows = yield self._renameSyncTokenQuery.on(
+            self._txn, name=self._name, resourceID=self._resourceID)
+        if rows:
+            self._syncTokenRevision = rows[0][0]
+            self._txn.bumpRevisionForObject(self)
+        else:
+            yield self._initSyncToken()
+
+
+    @classproperty
+    def _bumpSyncTokenQuery(cls):
+        &quot;&quot;&quot;
+        DAL query to change collection sync token. Note this can impact multiple rows if the
+        collection is shared.
+        &quot;&quot;&quot;
+        rev = cls._revisionsSchema
+        return Update(
+            {rev.REVISION: schema.REVISION_SEQ, },
+            Where=(rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And
+                  (rev.RESOURCE_NAME == None)
+        )
+
+
+    @inlineCallbacks
+    def _bumpSyncToken(self):
+
+        if not self._txn.isRevisionBumpedAlready(self):
+            self._txn.bumpRevisionForObject(self)
+            yield self._bumpSyncTokenQuery.on(
+                self._txn,
+                resourceID=self._resourceID,
+            )
+            self._syncTokenRevision = None
+
+
+    @classproperty
+    def _deleteSyncTokenQuery(cls):
+        &quot;&quot;&quot;
+        DAL query to remove all child revision information. The revision for the collection
+        itself is not touched.
+        &quot;&quot;&quot;
+        rev = cls._revisionsSchema
+        return Delete(
+            From=rev,
+            Where=(rev.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)).And
+                  (rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And
+                  (rev.COLLECTION_NAME == None)
+        )
+
+
+    @classproperty
+    def _sharedRemovalQuery(cls):
+        &quot;&quot;&quot;
+        DAL query to indicate a shared collection has been deleted.
+        &quot;&quot;&quot;
+        rev = cls._revisionsSchema
+        return Update(
+            {
+                rev.RESOURCE_ID: None,
+                rev.REVISION: schema.REVISION_SEQ,
+                rev.DELETED: True
+            },
+            Where=(rev.HOME_RESOURCE_ID == Parameter(&quot;homeID&quot;)).And(
+                rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And(
+                rev.RESOURCE_NAME == None)
+        )
+
+
+    @classproperty
+    def _unsharedRemovalQuery(cls):
+        &quot;&quot;&quot;
+        DAL query to indicate an owned collection has been deleted.
+        &quot;&quot;&quot;
+        rev = cls._revisionsSchema
+        return Update(
+            {
+                rev.RESOURCE_ID: None,
+                rev.REVISION: schema.REVISION_SEQ,
+                rev.DELETED: True
+            },
+            Where=(rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And(
+                rev.RESOURCE_NAME == None),
+        )
+
+
+    @inlineCallbacks
+    def _deletedSyncToken(self, sharedRemoval=False):
+        &quot;&quot;&quot;
+        When a collection is deleted we remove all the revision information for its child resources.
+        We update the collection's sync token to indicate it has been deleted - that way a sync on
+        the home collection can report the deletion of the collection.
+
+        @param sharedRemoval: indicates whether the collection being removed is shared
+        @type sharedRemoval: L{bool}
+        &quot;&quot;&quot;
+        # Remove all child entries
+        yield self._deleteSyncTokenQuery.on(self._txn,
+                                            homeID=self._home._resourceID,
+                                            resourceID=self._resourceID)
+
+        # If this is a share being removed then we only mark this one specific
+        # home/resource-id as being deleted.  On the other hand, if it is a
+        # non-shared collection, then we need to mark all collections
+        # with the resource-id as being deleted to account for direct shares.
+        if sharedRemoval:
+            yield self._sharedRemovalQuery.on(self._txn,
+                                              homeID=self._home._resourceID,
+                                              resourceID=self._resourceID)
+        else:
+            yield self._unsharedRemovalQuery.on(self._txn,
+                                                resourceID=self._resourceID)
+        self._syncTokenRevision = None
+
+
+    def _insertRevision(self, name):
+        return self._changeRevision(&quot;insert&quot;, name)
+
+
+    def _updateRevision(self, name):
+        return self._changeRevision(&quot;update&quot;, name)
+
+
+    def _deleteRevision(self, name):
+        return self._changeRevision(&quot;delete&quot;, name)
+
+
+    @classproperty
+    def _deleteBumpTokenQuery(cls):
+        rev = cls._revisionsSchema
+        return Update(
+            {rev.REVISION: schema.REVISION_SEQ, rev.DELETED: True},
+            Where=(rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And(
+                rev.RESOURCE_NAME == Parameter(&quot;name&quot;)),
+            Return=rev.REVISION
+        )
+
+
+    @classproperty
+    def _updateBumpTokenQuery(cls):
+        rev = cls._revisionsSchema
+        return Update(
+            {rev.REVISION: schema.REVISION_SEQ},
+            Where=(rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And(
+                rev.RESOURCE_NAME == Parameter(&quot;name&quot;)),
+            Return=rev.REVISION
+        )
+
+
+    @classproperty
+    def _insertFindPreviouslyNamedQuery(cls):
+        rev = cls._revisionsSchema
+        return Select(
+            [rev.RESOURCE_ID],
+            From=rev,
+            Where=(rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And(
+                rev.RESOURCE_NAME == Parameter(&quot;name&quot;))
+        )
+
+
+    @classproperty
+    def _updatePreviouslyNamedQuery(cls):
+        rev = cls._revisionsSchema
+        return Update(
+            {rev.REVISION: schema.REVISION_SEQ, rev.DELETED: False},
+            Where=(rev.RESOURCE_ID == Parameter(&quot;resourceID&quot;)).And(
+                rev.RESOURCE_NAME == Parameter(&quot;name&quot;)),
+            Return=rev.REVISION
+        )
+
+
+    @classproperty
+    def _completelyNewRevisionQuery(cls):
+        rev = cls._revisionsSchema
+        return Insert(
+            {
+                rev.HOME_RESOURCE_ID: Parameter(&quot;homeID&quot;),
+                rev.RESOURCE_ID: Parameter(&quot;resourceID&quot;),
+                rev.RESOURCE_NAME: Parameter(&quot;name&quot;),
+                rev.REVISION: schema.REVISION_SEQ,
+                rev.DELETED: False
+            },
+            Return=rev.REVISION
+        )
+
+
+    @classproperty
+    def _completelyNewDeletedRevisionQuery(cls):
+        rev = cls._revisionsSchema
+        return Insert(
+            {
+                rev.HOME_RESOURCE_ID: Parameter(&quot;homeID&quot;),
+                rev.RESOURCE_ID: Parameter(&quot;resourceID&quot;),
+                rev.RESOURCE_NAME: Parameter(&quot;name&quot;),
+                rev.REVISION: schema.REVISION_SEQ,
+                rev.DELETED: True
+            },
+            Return=rev.REVISION
+        )
+
+
+    @inlineCallbacks
+    def _changeRevision(self, action, name):
+
+        # Need to handle the case where for some reason the revision entry is
+        # actually missing. For a &quot;delete&quot; we don't care, for an &quot;update&quot; we
+        # will turn it into an &quot;insert&quot;.
+        if action == &quot;delete&quot;:
+            rows = (
+                yield self._deleteBumpTokenQuery.on(
+                    self._txn, resourceID=self._resourceID, name=name))
+            if rows:
+                self._syncTokenRevision = rows[0][0]
+            else:
+                self._syncTokenRevision = (
+                    yield self._completelyNewDeletedRevisionQuery.on(
+                        self._txn, homeID=self.ownerHome()._resourceID,
+                        resourceID=self._resourceID, name=name)
+                )[0][0]
+
+        elif action == &quot;update&quot;:
+            rows = (
+                yield self._updateBumpTokenQuery.on(
+                    self._txn, resourceID=self._resourceID, name=name))
+            if rows:
+                self._syncTokenRevision = rows[0][0]
+            else:
+                self._syncTokenRevision = (
+                    yield self._completelyNewRevisionQuery.on(
+                        self._txn, homeID=self.ownerHome()._resourceID,
+                        resourceID=self._resourceID, name=name)
+                )[0][0]
+
+        elif action == &quot;insert&quot;:
+            # Note that an &quot;insert&quot; may happen for a resource that previously
+            # existed and then was deleted. In that case an entry in the
+            # REVISIONS table still exists so we have to detect that and do db
+            # INSERT or UPDATE as appropriate
+
+            found = bool((
+                yield self._insertFindPreviouslyNamedQuery.on(
+                    self._txn, resourceID=self._resourceID, name=name)))
+            if found:
+                self._syncTokenRevision = (
+                    yield self._updatePreviouslyNamedQuery.on(
+                        self._txn, resourceID=self._resourceID, name=name)
+                )[0][0]
+            else:
+                self._syncTokenRevision = (
+                    yield self._completelyNewRevisionQuery.on(
+                        self._txn, homeID=self.ownerHome()._resourceID,
+                        resourceID=self._resourceID, name=name)
+                )[0][0]
+        yield self._maybeNotify()
+        returnValue(self._syncTokenRevision)
+
+
+    def _maybeNotify(self):
+        &quot;&quot;&quot;
+        Maybe notify changed.  (Overridden in NotificationCollection.)
+        &quot;&quot;&quot;
+        return succeed(None)
+
+
+
+def determineNewest(uid, homeType):
+    &quot;&quot;&quot;
+    Construct a query to determine the modification time of the newest object
+    in a given home.
+
+    @param uid: the UID of the home to scan.
+    @type uid: C{str}
+
+    @param homeType: The type of home to scan; C{ECALENDARTYPE},
+        C{ENOTIFICATIONTYPE}, or C{EADDRESSBOOKTYPE}.
+    @type homeType: C{int}
+
+    @return: A select query that will return a single row containing a single
+        column which is the maximum value.
+    @rtype: L{Select}
+    &quot;&quot;&quot;
+    if homeType == ENOTIFICATIONTYPE:
+        return Select(
+            [Max(schema.NOTIFICATION.MODIFIED)],
+            From=schema.NOTIFICATION_HOME.join(
+                schema.NOTIFICATION,
+                on=schema.NOTIFICATION_HOME.RESOURCE_ID ==
+                schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID),
+            Where=schema.NOTIFICATION_HOME.OWNER_UID == uid
+        )
+    homeTypeName = {ECALENDARTYPE: &quot;CALENDAR&quot;,
+                    EADDRESSBOOKTYPE: &quot;ADDRESSBOOK&quot;}[homeType]
+    home = getattr(schema, homeTypeName + &quot;_HOME&quot;)
+    bind = getattr(schema, homeTypeName + &quot;_BIND&quot;)
+    child = getattr(schema, homeTypeName)
+    obj = getattr(schema, homeTypeName + &quot;_OBJECT&quot;)
+    return Select(
+        [Max(obj.MODIFIED)],
+        From=home.join(bind, on=bind.HOME_RESOURCE_ID == home.RESOURCE_ID).join(
+            child, on=child.RESOURCE_ID == bind.RESOURCE_ID).join(
+            obj, on=obj.PARENT_RESOURCE_ID == child.RESOURCE_ID),
+        Where=(bind.BIND_MODE == 0).And(home.OWNER_UID == uid)
+    )
+
+
+
+@inlineCallbacks
+def mergeHomes(sqlTxn, one, other, homeType):
+    &quot;&quot;&quot;
+    Merge two homes together.  This determines which of C{one} or C{two} is
+    newer - that is, has been modified more recently - and pulls all the data
+    from the older into the newer home.  Then, it changes the UID of the old
+    home to its UID, normalized and prefixed with &quot;old.&quot;, and then re-names the
+    new home to its name, normalized.
+
+    Because the UIDs of both homes have changed, B{both one and two will be
+    invalid to all other callers from the start of the invocation of this
+    function}.
+
+    @param sqlTxn: the transaction to use
+    @type sqlTxn: A L{CommonTransaction}
+
+    @param one: A calendar home.
+    @type one: L{ICalendarHome}
+
+    @param two: Another, different calendar home.
+    @type two: L{ICalendarHome}
+
+    @param homeType: The type of home to scan; L{ECALENDARTYPE} or
+        L{EADDRESSBOOKTYPE}.
+    @type homeType: C{int}
+
+    @return: a L{Deferred} which fires with with the newer of C{one} or C{two},
+        into which the data from the other home has been merged, when the merge
+        is complete.
+    &quot;&quot;&quot;
+    from txdav.caldav.datastore.util import migrateHome as migrateCalendarHome
+    from txdav.carddav.datastore.util import migrateHome as migrateABHome
+    migrateHome = {EADDRESSBOOKTYPE: migrateABHome,
+                   ECALENDARTYPE: migrateCalendarHome,
+                   ENOTIFICATIONTYPE: _dontBotherWithNotifications}[homeType]
+    homeTable = {EADDRESSBOOKTYPE: schema.ADDRESSBOOK_HOME,
+                 ECALENDARTYPE: schema.CALENDAR_HOME,
+                 ENOTIFICATIONTYPE: schema.NOTIFICATION_HOME}[homeType]
+    both = []
+    both.append([one,
+                 (yield determineNewest(one.uid(), homeType).on(sqlTxn))])
+    both.append([other,
+                 (yield determineNewest(other.uid(), homeType).on(sqlTxn))])
+    both.sort(key=lambda x: x[1])
+
+    older = both[0][0]
+    newer = both[1][0]
+    yield migrateHome(older, newer, merge=True)
+    # Rename the old one to 'old.&lt;correct-guid&gt;'
+    newNormalized = normalizeUUIDOrNot(newer.uid())
+    oldNormalized = normalizeUUIDOrNot(older.uid())
+    yield _renameHome(sqlTxn, homeTable, older.uid(), &quot;old.&quot; + oldNormalized)
+    # Rename the new one to '&lt;correct-guid&gt;'
+    if newer.uid() != newNormalized:
+        yield _renameHome(sqlTxn, homeTable, newer.uid(), newNormalized)
+    yield returnValue(newer)
+
+
+
+def _renameHome(txn, table, oldUID, newUID):
+    &quot;&quot;&quot;
+    Rename a calendar, addressbook, or notification home.  Note that this
+    function is only safe in transactions that have had caching disabled, and
+    more specifically should only ever be used during upgrades.  Running this
+    in a normal transaction will have unpredictable consequences, especially
+    with respect to memcache.
+
+    @param txn: an SQL transaction to use for this update
+    @type txn: L{twext.enterprise.ienterprise.IAsyncTransaction}
+
+    @param table: the storage table of the desired home type
+    @type table: L{TableSyntax}
+
+    @param oldUID: the old UID, the existing home's UID
+    @type oldUID: L{str}
+
+    @param newUID: the new UID, to change the UID to
+    @type newUID: L{str}
+
+    @return: a L{Deferred} which fires when the home is renamed.
+    &quot;&quot;&quot;
+    return Update({table.OWNER_UID: newUID},
+                  Where=table.OWNER_UID == oldUID).on(txn)
+
+
+
+def _dontBotherWithNotifications(older, newer, merge):
+    &quot;&quot;&quot;
+    Notifications are more transient and can be easily worked around; don't
+    bother to migrate all of them when there is a UUID case mismatch.
+    &quot;&quot;&quot;
+    pass
+
+
+
+@inlineCallbacks
+def _normalizeHomeUUIDsIn(t, homeType):
+    &quot;&quot;&quot;
+    Normalize the UUIDs in the given L{txdav.common.datastore.CommonStore}.
+
+    This changes the case of the UUIDs in the calendar home.
+
+    @param t: the transaction to normalize all the UUIDs in.
+    @type t: L{CommonStoreTransaction}
+
+    @param homeType: The type of home to scan, L{ECALENDARTYPE},
+        L{EADDRESSBOOKTYPE}, or L{ENOTIFICATIONTYPE}.
+    @type homeType: C{int}
+
+    @return: a L{Deferred} which fires with C{None} when the UUID normalization
+        is complete.
+    &quot;&quot;&quot;
+    from txdav.caldav.datastore.util import fixOneCalendarHome
+    homeTable = {EADDRESSBOOKTYPE: schema.ADDRESSBOOK_HOME,
+                 ECALENDARTYPE: schema.CALENDAR_HOME,
+                 ENOTIFICATIONTYPE: schema.NOTIFICATION_HOME}[homeType]
+    homeTypeName = homeTable.model.name.split(&quot;_&quot;)[0]
+
+    allUIDs = yield Select([homeTable.OWNER_UID],
+                           From=homeTable,
+                           OrderBy=homeTable.OWNER_UID).on(t)
+    total = len(allUIDs)
+    allElapsed = []
+    for n, [UID] in enumerate(allUIDs):
+        start = time.time()
+        if allElapsed:
+            estimate = &quot;%0.3d&quot; % ((sum(allElapsed) / len(allElapsed)) *
+                                  total - n)
+        else:
+            estimate = &quot;unknown&quot;
+        log.info(
+            &quot;Scanning UID {uid} [{homeType}] &quot;
+            &quot;({pct!0.2d}%, {estimate} seconds remaining)...&quot;,
+            uid=UID, pct=(n / float(total)) * 100, estimate=estimate,
+            homeType=homeTypeName
+        )
+        other = None
+        this = yield _getHome(t, homeType, UID)
+        if homeType == ECALENDARTYPE:
+            fixedThisHome = yield fixOneCalendarHome(this)
+        else:
+            fixedThisHome = 0
+        fixedOtherHome = 0
+        if this is None:
+            log.info(
+                &quot;{uid!r} appears to be missing, already processed&quot;, uid=UID
+            )
+        try:
+            uuidobj = UUID(UID)
+        except ValueError:
+            pass
+        else:
+            newname = str(uuidobj).upper()
+            if UID != newname:
+                log.info(
+                    &quot;Detected case variance: {uid} {newuid}[{homeType}]&quot;,
+                    uid=UID, newuid=newname, homeType=homeTypeName
+                )
+                other = yield _getHome(t, homeType, newname)
+                if other is None:
+                    # No duplicate: just fix the name.
+                    yield _renameHome(t, homeTable, UID, newname)
+                else:
+                    if homeType == ECALENDARTYPE:
+                        fixedOtherHome = yield fixOneCalendarHome(other)
+                    this = yield mergeHomes(t, this, other, homeType)
+                # NOTE: WE MUST NOT TOUCH EITHER HOME OBJECT AFTER THIS POINT.
+                # THE UIDS HAVE CHANGED AND ALL OPERATIONS WILL FAIL.
+
+        end = time.time()
+        elapsed = end - start
+        allElapsed.append(elapsed)
+        log.info(
+            &quot;Scanned UID {uid}; {elapsed} seconds elapsed,&quot;
+            &quot; {fixes} properties fixed ({duplicate} fixes in duplicate).&quot;,
+            uid=UID, elapsed=elapsed, fixes=fixedThisHome,
+            duplicate=fixedOtherHome
+        )
+    returnValue(None)
+
+
+
+def _getHome(txn, homeType, uid):
+    &quot;&quot;&quot;
+    Like L{CommonHome.homeWithUID} but also honoring ENOTIFICATIONTYPE which
+    isn't I{really} a type of home.
+
+    @param txn: the transaction to retrieve the home from
+    @type txn: L{CommonStoreTransaction}
+
+    @param homeType: L{ENOTIFICATIONTYPE}, L{ECALENDARTYPE}, or
+        L{EADDRESSBOOKTYPE}.
+
+    @param uid: the UID of the home to retrieve.
+    @type uid: L{str}
+
+    @return: a L{Deferred} that fires with the L{CommonHome} or
+        L{NotificationHome} when it has been retrieved.
+    &quot;&quot;&quot;
+    if homeType == ENOTIFICATIONTYPE:
+        return txn.notificationsWithUID(uid)
+    else:
+        return txn.homeWithUID(homeType, uid)
+
+
+
+@inlineCallbacks
+def _normalizeColumnUUIDs(txn, column):
+    &quot;&quot;&quot;
+    Upper-case the UUIDs in the given SQL DAL column.
+
+    @param txn: The transaction.
+    @type txn: L{CommonStoreTransaction}
+
+    @param column: the column, which may contain UIDs, to normalize.
+    @type column: L{ColumnSyntax}
+
+    @return: A L{Deferred} that will fire when the UUID normalization of the
+        given column has completed.
+    &quot;&quot;&quot;
+    tableModel = column.model.table
+    # Get a primary key made of column syntax objects for querying and
+    # comparison later.
+    pkey = [ColumnSyntax(columnModel)
+            for columnModel in tableModel.primaryKey]
+    for row in (yield Select([column] + pkey,
+                             From=TableSyntax(tableModel)).on(txn)):
+        before = row[0]
+        pkeyparts = row[1:]
+        after = normalizeUUIDOrNot(before)
+        if after != before:
+            where = _AndNothing
+            # Build a where clause out of the primary key and the parts of the
+            # primary key that were found.
+            for pkeycol, pkeypart in zip(pkeyparts, pkey):
+                where = where.And(pkeycol == pkeypart)
+            yield Update({column: after}, Where=where).on(txn)
+
+
+
+class _AndNothing(object):
+    &quot;&quot;&quot;
+    Simple placeholder for iteratively generating a 'Where' clause; the 'And'
+    just returns its argument, so it can be used at the start of the loop.
+    &quot;&quot;&quot;
+    @staticmethod
+    def And(self):
+        &quot;&quot;&quot;
+        Return the argument.
+        &quot;&quot;&quot;
+        return self
+
+
+
+@inlineCallbacks
+def _needsNormalizationUpgrade(txn):
+    &quot;&quot;&quot;
+    Determine whether a given store requires a UUID normalization data upgrade.
+
+    @param txn: the transaction to use
+    @type txn: L{CommonStoreTransaction}
+
+    @return: a L{Deferred} that fires with C{True} or C{False} depending on
+        whether we need the normalization upgrade or not.
+    &quot;&quot;&quot;
+    for x in [schema.CALENDAR_HOME, schema.ADDRESSBOOK_HOME,
+              schema.NOTIFICATION_HOME]:
+        slct = Select([x.OWNER_UID], From=x,
+                      Where=x.OWNER_UID != Upper(x.OWNER_UID))
+        rows = yield slct.on(txn)
+        if rows:
+            for [uid] in rows:
+                if normalizeUUIDOrNot(uid) != uid:
+                    returnValue(True)
+    returnValue(False)
+
+
+
+@inlineCallbacks
+def fixUUIDNormalization(store):
+    &quot;&quot;&quot;
+    Fix all UUIDs in the given SQL store to be in a canonical form;
+    00000000-0000-0000-0000-000000000000 format and upper-case.
+    &quot;&quot;&quot;
+    t = store.newTransaction(disableCache=True)
+
+    # First, let's see if there are any calendar, addressbook, or notification
+    # homes that have a de-normalized OWNER_UID.  If there are none, then we can
+    # early-out and avoid the tedious and potentially expensive inspection of
+    # oodles of calendar data.
+    if not (yield _needsNormalizationUpgrade(t)):
+        log.info(&quot;No potentially denormalized UUIDs detected, &quot;
+                 &quot;skipping normalization upgrade.&quot;)
+        yield t.abort()
+        returnValue(None)
+    try:
+        yield _normalizeHomeUUIDsIn(t, ECALENDARTYPE)
+        yield _normalizeHomeUUIDsIn(t, EADDRESSBOOKTYPE)
+        yield _normalizeHomeUUIDsIn(t, ENOTIFICATIONTYPE)
+        yield _normalizeColumnUUIDs(t, schema.RESOURCE_PROPERTY.VIEWER_UID)
+        yield _normalizeColumnUUIDs(t, schema.APN_SUBSCRIPTIONS.SUBSCRIBER_GUID)
+    except:
+        log.failure(&quot;Unable to normalize UUIDs&quot;)
+        yield t.abort()
+        # There's a lot of possible problems here which are very hard to test
+        # for individually; unexpected data that might cause constraint
+        # violations under one of the manipulations done by
+        # normalizeHomeUUIDsIn. Since this upgrade does not come along with a
+        # schema version bump and may be re- attempted at any time, just raise
+        # the exception and log it so that we can try again later, and the
+        # service will survive for everyone _not_ affected by this somewhat
+        # obscure bug.
+    else:
+        yield t.commit()
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoretesttest_sqlpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/test/test_sql.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/test/test_sql.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/test/test_sql.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -13,6 +13,8 @@
</span><span class="cx"> # See the License for the specific language governing permissions and
</span><span class="cx"> # limitations under the License.
</span><span class="cx"> ##
</span><ins>+from txdav.common.datastore.sql_util import _normalizeColumnUUIDs, \
+    fixUUIDNormalization
</ins><span class="cx"> 
</span><span class="cx"> &quot;&quot;&quot;
</span><span class="cx"> Tests for L{txdav.common.datastore.sql}.
</span><span class="lines">@@ -31,7 +33,6 @@
</span><span class="cx"> from txdav.common.datastore.sql_tables import schema
</span><span class="cx"> from txdav.common.datastore.test.util import CommonCommonTests
</span><span class="cx"> from txdav.common.icommondatastore import AllRetriesFailed
</span><del>-from txdav.common.datastore.sql import fixUUIDNormalization
</del><span class="cx"> from txdav.xml import element as davxml
</span><span class="cx"> 
</span><span class="cx"> from uuid import UUID
</span><span class="lines">@@ -371,7 +372,6 @@
</span><span class="cx">             rp.VIEWER_UID: &quot;aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa&quot;}
</span><span class="cx">         ).on(txn)
</span><span class="cx">         # test
</span><del>-        from txdav.common.datastore.sql import _normalizeColumnUUIDs
</del><span class="cx">         yield _normalizeColumnUUIDs(txn, rp.VIEWER_UID)
</span><span class="cx">         self.assertEqual(
</span><span class="cx">             map(
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoretestutilpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/test/util.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/test/util.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/test/util.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -30,7 +30,7 @@
</span><span class="cx"> 
</span><span class="cx"> from pycalendar.datetime import DateTime
</span><span class="cx"> 
</span><del>-from random import Random
</del><ins>+from random import Random, randint
</ins><span class="cx"> 
</span><span class="cx"> from twext.python.log import Logger
</span><span class="cx"> from twext.python.filepath import CachingFilePath as FilePath
</span><span class="lines">@@ -302,7 +302,7 @@
</span><span class="cx">         # later table.  Therefore it's OK to drop them in the (reverse) order
</span><span class="cx">         # that they happen to be in.
</span><span class="cx">         tables = [
</span><del>-            t.name for t in schema.model.tables
</del><ins>+            t.name for t in schema.model.tables #@UndefinedVariable
</ins><span class="cx">             # All tables with rows _in_ the schema are populated
</span><span class="cx">             # exclusively _by_ the schema and shouldn't be manipulated
</span><span class="cx">             # while the server is running, so we leave those populated.
</span><span class="lines">@@ -314,13 +314,20 @@
</span><span class="cx">                 yield cleanupTxn.execSQL(&quot;delete from &quot; + table, [])
</span><span class="cx">             except:
</span><span class="cx">                 log.failure(&quot;delete table {table} failed&quot;, table=table)
</span><ins>+
+        # Change the starting values of sequences to random values
+        for sequence in schema.model.sequences: #@UndefinedVariable
+            try:
+                curval = (yield cleanupTxn.execSQL(&quot;select nextval('{}')&quot;.format(sequence.name), []))[0][0]
+                yield cleanupTxn.execSQL(&quot;select setval('{}', {})&quot;.format(sequence.name, curval + randint(1, 10000)), [])
+            except:
+                log.failure(&quot;setval sequence '{}' failed&quot;, sequence=sequence.name)
+        yield cleanupTxn.execSQL(&quot;update CALENDARSERVER set VALUE = '1' where NAME = 'MIN-VALID-REVISION'&quot;, [])
+
</ins><span class="cx">         yield cleanupTxn.commit()
</span><span class="cx"> 
</span><span class="cx">         # Deal with memcached items that must be cleared
</span><del>-        from txdav.caldav.datastore.sql import CalendarHome
-        CalendarHome._cacher.flushAll()
-        from txdav.carddav.datastore.sql import AddressBookHome
-        AddressBookHome._cacher.flushAll()
</del><ins>+        storeToClean.queryCacher.flushAll()
</ins><span class="cx">         from txdav.base.propertystore.sql import PropertyStore
</span><span class="cx">         PropertyStore._cacher.flushAll()
</span><span class="cx"> 
</span><span class="lines">@@ -463,7 +470,7 @@
</span><span class="cx">         populateTxn._migrating = True
</span><span class="cx">     for homeUID in requirements:
</span><span class="cx">         calendars = requirements[homeUID]
</span><del>-        home = yield populateTxn.calendarHomeWithUID(homeUID, True)
</del><ins>+        home = yield populateTxn.calendarHomeWithUID(homeUID, create=True)
</ins><span class="cx">         if calendars is not None:
</span><span class="cx">             # We don't want the default calendar or inbox to appear unless it's
</span><span class="cx">             # explicitly listed.
</span><span class="lines">@@ -558,7 +565,7 @@
</span><span class="cx">     for homeUID in md5s:
</span><span class="cx">         calendars = md5s[homeUID]
</span><span class="cx">         if calendars is not None:
</span><del>-            home = yield populateTxn.calendarHomeWithUID(homeUID, True)
</del><ins>+            home = yield populateTxn.calendarHomeWithUID(homeUID, create=True)
</ins><span class="cx">             for calendarName in calendars:
</span><span class="cx">                 calendarObjNames = calendars[calendarName]
</span><span class="cx">                 if calendarObjNames is not None:
</span><span class="lines">@@ -591,7 +598,7 @@
</span><span class="cx">     for homeUID in requirements:
</span><span class="cx">         addressbooks = requirements[homeUID]
</span><span class="cx">         if addressbooks is not None:
</span><del>-            home = yield populateTxn.addressbookHomeWithUID(homeUID, True)
</del><ins>+            home = yield populateTxn.addressbookHomeWithUID(homeUID, create=True)
</ins><span class="cx">             # We don't want the default addressbook
</span><span class="cx">             try:
</span><span class="cx">                 yield home.removeAddressBookWithName(&quot;addressbook&quot;)
</span><span class="lines">@@ -630,7 +637,7 @@
</span><span class="cx">     for homeUID in md5s:
</span><span class="cx">         addressbooks = md5s[homeUID]
</span><span class="cx">         if addressbooks is not None:
</span><del>-            home = yield populateTxn.addressbookHomeWithUID(homeUID, True)
</del><ins>+            home = yield populateTxn.addressbookHomeWithUID(homeUID, create=True)
</ins><span class="cx">             for addressbookName in addressbooks:
</span><span class="cx">                 addressbookObjNames = addressbooks[addressbookName]
</span><span class="cx">                 if addressbookObjNames is not None:
</span><span class="lines">@@ -922,35 +929,35 @@
</span><span class="cx">         return self.store
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-    def homeUnderTest(self, txn=None, name=&quot;home1&quot;, create=False):
</del><ins>+    def homeUnderTest(self, txn=None, name=&quot;home1&quot;, status=None, create=False):
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Get the calendar home detailed by C{requirements['home1']}.
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         if txn is None:
</span><span class="cx">             txn = self.transactionUnderTest()
</span><del>-        return txn.calendarHomeWithUID(name, create=create)
</del><ins>+        return txn.calendarHomeWithUID(name, status=status, create=create)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><del>-    def calendarUnderTest(self, txn=None, name=&quot;calendar_1&quot;, home=&quot;home1&quot;):
</del><ins>+    def calendarUnderTest(self, txn=None, name=&quot;calendar_1&quot;, home=&quot;home1&quot;, status=None):
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Get the calendar detailed by C{requirements['home1']['calendar_1']}.
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        home = yield self.homeUnderTest(txn, home)
</del><ins>+        home = yield self.homeUnderTest(txn, home, status=status)
</ins><span class="cx">         calendar = yield home.calendarWithName(name)
</span><span class="cx">         returnValue(calendar)
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def calendarObjectUnderTest(
</span><del>-        self, txn=None, name=&quot;1.ics&quot;, calendar_name=&quot;calendar_1&quot;, home=&quot;home1&quot;
</del><ins>+        self, txn=None, name=&quot;1.ics&quot;, calendar_name=&quot;calendar_1&quot;, home=&quot;home1&quot;, status=None
</ins><span class="cx">     ):
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Get the calendar detailed by
</span><span class="cx">         C{requirements[home][calendar_name][name]}.
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         calendar = yield self.calendarUnderTest(
</span><del>-            txn, name=calendar_name, home=home
</del><ins>+            txn, name=calendar_name, home=home, status=status
</ins><span class="cx">         )
</span><span class="cx">         object = yield calendar.calendarObjectWithName(name)
</span><span class="cx">         returnValue(object)
</span><span class="lines">@@ -992,6 +999,12 @@
</span><span class="cx">         returnValue(object)
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+    def notificationCollectionUnderTest(self, txn=None, name=&quot;home1&quot;, status=None, create=False):
+        if txn is None:
+            txn = self.transactionUnderTest()
+        return txn.notificationsWithUID(name, status=status, create=create)
+
+
</ins><span class="cx">     def userRecordWithShortName(self, shortname):
</span><span class="cx">         return self.directory.recordWithShortName(
</span><span class="cx">             self.directory.recordType.user, shortname
</span><span class="lines">@@ -1015,11 +1028,13 @@
</span><span class="cx">         return self.directory.removeRecords([uid])
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-    def changeRecord(self, record, fieldname, value):
</del><ins>+    def changeRecord(self, record, fieldname, value, directory=None):
+        if directory is None:
+            directory = self.directory
</ins><span class="cx">         fields = record.fields.copy()
</span><span class="cx">         fields[fieldname] = value
</span><del>-        updatedRecord = DirectoryRecord(self.directory, fields)
-        return self.directory.updateRecords((updatedRecord,))
</del><ins>+        updatedRecord = DirectoryRecord(directory, fields)
+        return directory.updateRecords((updatedRecord,))
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx"> 
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoreupgradesqlupgradescalendar_upgrade_from_2_to_3py"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_2_to_3.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_2_to_3.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_2_to_3.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -21,9 +21,9 @@
</span><span class="cx"> as in calendar data and properties.
</span><span class="cx"> &quot;&quot;&quot;
</span><span class="cx"> 
</span><del>-from txdav.common.datastore.sql import fixUUIDNormalization
</del><span class="cx"> from twisted.internet.defer import inlineCallbacks
</span><span class="cx"> from txdav.common.datastore.upgrade.sql.upgrades.util import updateCalendarDataVersion
</span><ins>+from txdav.common.datastore.sql_util import fixUUIDNormalization
</ins><span class="cx"> 
</span><span class="cx"> UPGRADE_TO_VERSION = 3
</span><span class="cx"> 
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoreupgradesqlupgradestesttest_notification_upgrade_from_0_to_1py"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/test/test_notification_upgrade_from_0_to_1.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/test/test_notification_upgrade_from_0_to_1.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/test/test_notification_upgrade_from_0_to_1.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -169,7 +169,7 @@
</span><span class="cx">         )
</span><span class="cx"> 
</span><span class="cx">         for uid, notificationtype, _ignore_jtype, notificationdata, _ignore_jdata in data:
</span><del>-            notifications = yield self.transactionUnderTest().notificationsWithUID(&quot;user01&quot;)
</del><ins>+            notifications = yield self.transactionUnderTest().notificationsWithUID(&quot;user01&quot;, create=True)
</ins><span class="cx">             yield notifications.writeNotificationObject(uid, notificationtype, notificationdata)
</span><span class="cx"> 
</span><span class="cx">         # Force data version to previous
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoreworktesttest_revision_cleanuppy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/work/test/test_revision_cleanup.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/work/test/test_revision_cleanup.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/work/test/test_revision_cleanup.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -55,7 +55,7 @@
</span><span class="cx">         for homeUID in addressookRequirements:
</span><span class="cx">             addressbooks = addressookRequirements[homeUID]
</span><span class="cx">             if addressbooks is not None:
</span><del>-                home = yield populateTxn.addressbookHomeWithUID(homeUID, True)
</del><ins>+                home = yield populateTxn.addressbookHomeWithUID(homeUID, create=True)
</ins><span class="cx">                 addressbook = home.addressbook()
</span><span class="cx"> 
</span><span class="cx">                 addressbookObjNames = addressbooks[addressbook.name()]
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommonicommondatastorepy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/icommondatastore.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/icommondatastore.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/icommondatastore.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -40,6 +40,12 @@
</span><span class="cx">     &quot;InternalDataStoreError&quot;,
</span><span class="cx"> ]
</span><span class="cx"> 
</span><ins>+# Constants for top-level store types
+ECALENDARTYPE = 0
+EADDRESSBOOKTYPE = 1
+ENOTIFICATIONTYPE = 2
+
+
</ins><span class="cx"> #
</span><span class="cx"> # Exceptions
</span><span class="cx"> #
</span><span class="lines">@@ -347,7 +353,7 @@
</span><span class="cx">         @param token: The device token of the subscriber
</span><span class="cx">         @type token: C{str}
</span><span class="cx"> 
</span><del>-        @return: tuples of (key, timestamp, guid)
</del><ins>+        @return: list of L{Record}
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx"> 
</span><span class="cx">     def apnSubscriptionsByKey(key): #@NoSelf
</span><span class="lines">@@ -357,7 +363,7 @@
</span><span class="cx">         @param key: The push key
</span><span class="cx">         @type key: C{str}
</span><span class="cx"> 
</span><del>-        @return: tuples of (token, guid)
</del><ins>+        @return: list of L{Record}
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx"> 
</span><span class="cx">     def apnSubscriptionsBySubscriber(guid): #@NoSelf
</span><span class="lines">@@ -367,7 +373,7 @@
</span><span class="cx">         @param guid: The GUID of the subscribed principal
</span><span class="cx">         @type guid: C{str}
</span><span class="cx"> 
</span><del>-        @return: tuples of (token, key, timestamp, userAgent, ipAddr)
</del><ins>+        @return: list of L{Record}
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx"> 
</span><span class="cx">     def imipCreateToken(organizer, attendee, icaluid, token=None): #@NoSelf
</span><span class="lines">@@ -389,8 +395,8 @@
</span><span class="cx">         &quot;&quot;&quot;
</span><span class="cx">         Returns the organizer, attendee, and icaluid corresponding to the token
</span><span class="cx"> 
</span><del>-        @param token: the token to look up
-        @type token: C{str}
</del><ins>+        @param token: the token record
+        @type token: L{Record}
</ins><span class="cx">         &quot;&quot;&quot;
</span><span class="cx"> 
</span><span class="cx">     def imipGetToken(organizer, attendee, icaluid): #@NoSelf
</span></span></pre></div>
<a id="CalendarServertrunktxdavwhodelegatespy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/who/delegates.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/who/delegates.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/who/delegates.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -353,13 +353,8 @@
</span><span class="cx"> 
</span><span class="cx">         if delegate.recordType == BaseRecordType.group:
</span><span class="cx">             # find the groupID
</span><del>-            (
-                groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
-                _ignore_extant
-            ) = yield txn.groupByUID(
-                delegate.uid
-            )
-            yield txn.addDelegateGroup(delegator.uid, groupID, readWrite)
</del><ins>+            group = yield txn.groupByUID(delegate.uid)
+            yield txn.addDelegateGroup(delegator.uid, group.groupID, readWrite)
</ins><span class="cx">         else:
</span><span class="cx">             yield txn.addDelegate(delegator.uid, delegate.uid, readWrite)
</span><span class="cx"> 
</span><span class="lines">@@ -393,13 +388,8 @@
</span><span class="cx"> 
</span><span class="cx">         if delegate.recordType == BaseRecordType.group:
</span><span class="cx">             # find the groupID
</span><del>-            (
-                groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
-                _ignore_extant
-            ) = yield txn.groupByUID(
-                delegate.uid
-            )
-            yield txn.removeDelegateGroup(delegator.uid, groupID, readWrite)
</del><ins>+            group = yield txn.groupByUID(delegate.uid)
+            yield txn.removeDelegateGroup(delegator.uid, group.groupID, readWrite)
</ins><span class="cx">         else:
</span><span class="cx">             yield txn.removeDelegate(delegator.uid, delegate.uid, readWrite)
</span><span class="cx"> 
</span></span></pre></div>
<a id="CalendarServertrunktxdavwhogroupspy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/who/groups.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/who/groups.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/who/groups.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -20,13 +20,15 @@
</span><span class="cx"> &quot;&quot;&quot;
</span><span class="cx"> 
</span><span class="cx"> from twext.enterprise.dal.record import fromTable
</span><del>-from twext.enterprise.dal.syntax import Delete, Select, Parameter
</del><ins>+from twext.enterprise.dal.syntax import Select
</ins><span class="cx"> from twext.enterprise.jobqueue import AggregatedWorkItem, RegeneratingWorkItem
</span><span class="cx"> from twext.python.log import Logger
</span><span class="cx"> from twisted.internet.defer import inlineCallbacks, returnValue, succeed, \
</span><span class="cx">     DeferredList
</span><span class="cx"> from twistedcaldav.config import config
</span><span class="cx"> from txdav.caldav.datastore.sql import CalendarStoreFeatures
</span><ins>+from txdav.caldav.datastore.sql_directory import GroupAttendeeRecord
+from txdav.common.datastore.sql_directory import GroupsRecord
</ins><span class="cx"> from txdav.common.datastore.sql_tables import schema, _BIND_MODE_OWN
</span><span class="cx"> import datetime
</span><span class="cx"> import itertools
</span><span class="lines">@@ -85,7 +87,7 @@
</span><span class="cx"> 
</span><span class="cx"> class GroupRefreshWork(AggregatedWorkItem, fromTable(schema.GROUP_REFRESH_WORK)):
</span><span class="cx"> 
</span><del>-    group = property(lambda self: (self.table.GROUP_UID == self.groupUid))
</del><ins>+    group = property(lambda self: (self.table.GROUP_UID == self.groupUID))
</ins><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def doWork(self):
</span><span class="lines">@@ -94,27 +96,27 @@
</span><span class="cx"> 
</span><span class="cx">             try:
</span><span class="cx">                 yield groupCacher.refreshGroup(
</span><del>-                    self.transaction, self.groupUid.decode(&quot;utf-8&quot;)
</del><ins>+                    self.transaction, self.groupUID.decode(&quot;utf-8&quot;)
</ins><span class="cx">                 )
</span><span class="cx">             except Exception, e:
</span><span class="cx">                 log.error(
</span><span class="cx">                     &quot;Failed to refresh group {group} {err}&quot;,
</span><del>-                    group=self.groupUid, err=e
</del><ins>+                    group=self.groupUID, err=e
</ins><span class="cx">                 )
</span><span class="cx"> 
</span><span class="cx">         else:
</span><span class="cx">             log.debug(
</span><span class="cx">                 &quot;Rescheduling group refresh for {group}: {when}&quot;,
</span><del>-                group=self.groupUid,
</del><ins>+                group=self.groupUID,
</ins><span class="cx">                 when=datetime.datetime.utcnow() + datetime.timedelta(seconds=10)
</span><span class="cx">             )
</span><del>-            yield self.reschedule(self.transaction, 10, groupUID=self.groupUid)
</del><ins>+            yield self.reschedule(self.transaction, 10, groupUID=self.groupUID)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx"> class GroupDelegateChangesWork(AggregatedWorkItem, fromTable(schema.GROUP_DELEGATE_CHANGES_WORK)):
</span><span class="cx"> 
</span><del>-    delegator = property(lambda self: (self.table.DELEGATOR_UID == self.delegatorUid))
</del><ins>+    delegator = property(lambda self: (self.table.DELEGATOR_UID == self.delegatorUID))
</ins><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def doWork(self):
</span><span class="lines">@@ -124,14 +126,14 @@
</span><span class="cx">             try:
</span><span class="cx">                 yield groupCacher.applyExternalAssignments(
</span><span class="cx">                     self.transaction,
</span><del>-                    self.delegatorUid.decode(&quot;utf-8&quot;),
-                    self.readDelegateUid.decode(&quot;utf-8&quot;),
-                    self.writeDelegateUid.decode(&quot;utf-8&quot;)
</del><ins>+                    self.delegatorUID.decode(&quot;utf-8&quot;),
+                    self.readDelegateUID.decode(&quot;utf-8&quot;),
+                    self.writeDelegateUID.decode(&quot;utf-8&quot;)
</ins><span class="cx">                 )
</span><span class="cx">             except Exception, e:
</span><span class="cx">                 log.error(
</span><span class="cx">                     &quot;Failed to apply external delegates for {uid} {err}&quot;,
</span><del>-                    uid=self.delegatorUid, err=e
</del><ins>+                    uid=self.delegatorUID, err=e
</ins><span class="cx">                 )
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="lines">@@ -182,8 +184,8 @@
</span><span class="cx">             homeID = rows[0][0]
</span><span class="cx">             home = yield self.transaction.calendarHomeWithResourceID(homeID)
</span><span class="cx">             calendar = yield home.childWithID(self.calendarID)
</span><del>-            groupUID = ((yield self.transaction.groupByID(self.groupID)))[0]
-            yield calendar.reconcileGroupSharee(groupUID)
</del><ins>+            group = (yield self.transaction.groupByID(self.groupID))
+            yield calendar.reconcileGroupSharee(group.groupUID)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="lines">@@ -268,33 +270,28 @@
</span><span class="cx">         #     &quot;Groups to refresh: {g}&quot;, g=groupUIDs
</span><span class="cx">         # )
</span><span class="cx"> 
</span><del>-        gr = schema.GROUPS
</del><span class="cx">         if config.AutomaticPurging.Enabled and groupUIDs:
</span><span class="cx">             # remove unused groups and groups that have not been seen in a while
</span><span class="cx">             dateLimit = (
</span><span class="cx">                 datetime.datetime.utcnow() -
</span><span class="cx">                 datetime.timedelta(seconds=float(config.AutomaticPurging.GroupPurgeIntervalSeconds))
</span><span class="cx">             )
</span><del>-            rows = yield Delete(
-                From=gr,
-                Where=(
-                    (gr.EXTANT == 0).And(gr.MODIFIED &lt; dateLimit)
</del><ins>+            rows = yield GroupsRecord.deletesome(
+                txn,
+                (
+                    (GroupsRecord.extant == 0).And(GroupsRecord.modified &lt; dateLimit)
</ins><span class="cx">                 ).Or(
</span><del>-                    gr.GROUP_UID.NotIn(
-                        Parameter(&quot;groupUIDs&quot;, len(groupUIDs))
-                    )
-                ) if groupUIDs else None,
-                Return=[gr.GROUP_UID]
-            ).on(txn, groupUIDs=groupUIDs)
</del><ins>+                    GroupsRecord.groupUID.NotIn(groupUIDs)
+                ),
+                returnCols=GroupsRecord.groupUID,
+            )
</ins><span class="cx">         else:
</span><span class="cx">             # remove unused groups
</span><del>-            rows = yield Delete(
-                From=gr,
-                Where=gr.GROUP_UID.NotIn(
-                    Parameter(&quot;groupUIDs&quot;, len(groupUIDs))
-                ) if groupUIDs else None,
-                Return=[gr.GROUP_UID]
-            ).on(txn, groupUIDs=groupUIDs)
</del><ins>+            rows = yield GroupsRecord.deletesome(
+                txn,
+                GroupsRecord.groupUID.NotIn(groupUIDs) if groupUIDs else None,
+                returnCols=GroupsRecord.groupUID,
+            )
</ins><span class="cx">         deletedGroupUIDs = [row[0] for row in rows]
</span><span class="cx">         if deletedGroupUIDs:
</span><span class="cx">             self.log.debug(&quot;Deleted old or unused groups {d}&quot;, d=deletedGroupUIDs)
</span><span class="lines">@@ -302,7 +299,7 @@
</span><span class="cx">         # For each of those groups, create a per-group refresh work item
</span><span class="cx">         for groupUID in set(groupUIDs) - set(deletedGroupUIDs):
</span><span class="cx">             self.log.debug(&quot;Enqueuing group refresh for {u}&quot;, u=groupUID)
</span><del>-            yield GroupRefreshWork.reschedule(txn, 0, groupUid=groupUID)
</del><ins>+            yield GroupRefreshWork.reschedule(txn, 0, groupUID=groupUID)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="lines">@@ -335,9 +332,9 @@
</span><span class="cx">                     )
</span><span class="cx">                 else:
</span><span class="cx">                     yield GroupDelegateChangesWork.reschedule(
</span><del>-                        txn, 0, delegatorUid=delegatorUID,
-                        readDelegateUid=readDelegateUID,
-                        writeDelegateUid=writeDelegateUID
</del><ins>+                        txn, 0, delegatorUID=delegatorUID,
+                        readDelegateUID=readDelegateUID,
+                        writeDelegateUID=writeDelegateUID
</ins><span class="cx">                     )
</span><span class="cx">         if removed:
</span><span class="cx">             for delegatorUID in removed:
</span><span class="lines">@@ -351,8 +348,8 @@
</span><span class="cx">                     )
</span><span class="cx">                 else:
</span><span class="cx">                     yield GroupDelegateChangesWork.reschedule(
</span><del>-                        txn, 0, delegatorUid=delegatorUID,
-                        readDelegateUid=&quot;&quot;, writeDelegateUid=&quot;&quot;
</del><ins>+                        txn, 0, delegatorUID=delegatorUID,
+                        readDelegateUID=&quot;&quot;, writeDelegateUID=&quot;&quot;
</ins><span class="cx">                     )
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="lines">@@ -367,26 +364,20 @@
</span><span class="cx">         readDelegateGroupID = writeDelegateGroupID = None
</span><span class="cx"> 
</span><span class="cx">         if readDelegateUID:
</span><del>-            (
-                readDelegateGroupID, _ignore_name, _ignore_hash,
-                _ignore_modified, _ignore_extant
-            ) = (
-                yield txn.groupByUID(readDelegateUID)
-            )
-            if readDelegateGroupID is None:
</del><ins>+            readDelegateGroup = yield txn.groupByUID(readDelegateUID)
+            if readDelegateGroup is None:
</ins><span class="cx">                 # The group record does not actually exist
</span><span class="cx">                 readDelegateUID = None
</span><ins>+            else:
+                readDelegateGroupID = readDelegateGroup.groupID
</ins><span class="cx"> 
</span><span class="cx">         if writeDelegateUID:
</span><del>-            (
-                writeDelegateGroupID, _ignore_name, _ignore_hash,
-                _ignore_modified, _ignore_extant
-            ) = (
-                yield txn.groupByUID(writeDelegateUID)
-            )
-            if writeDelegateGroupID is None:
</del><ins>+            writeDelegateGroup = yield txn.groupByUID(writeDelegateUID)
+            if writeDelegateGroup is None:
</ins><span class="cx">                 # The group record does not actually exist
</span><span class="cx">                 writeDelegateUID = None
</span><ins>+            else:
+                writeDelegateGroupID = writeDelegateGroup.groupID
</ins><span class="cx"> 
</span><span class="cx">         yield txn.assignExternalDelegates(
</span><span class="cx">             delegatorUID, readDelegateGroupID, writeDelegateGroupID,
</span><span class="lines">@@ -411,45 +402,36 @@
</span><span class="cx">         else:
</span><span class="cx">             self.log.debug(&quot;Got group record: {u}&quot;, u=record.uid)
</span><span class="cx"> 
</span><del>-        (
-            groupID, cachedName, cachedMembershipHash, _ignore_modified,
-            cachedExtant
-        ) = yield txn.groupByUID(
-            groupUID,
-            create=(record is not None)
-        )
</del><ins>+        group = yield txn.groupByUID(groupUID, create=(record is not None))
</ins><span class="cx"> 
</span><del>-        if groupID:
-            membershipChanged, addedUIDs, removedUIDs = yield txn.refreshGroup(
-                groupUID, record, groupID,
-                cachedName, cachedMembershipHash, cachedExtant
-            )
</del><ins>+        if group:
+            membershipChanged, addedUIDs, removedUIDs = yield txn.refreshGroup(group, record)
</ins><span class="cx"> 
</span><span class="cx">             if membershipChanged:
</span><span class="cx">                 self.log.info(
</span><span class="cx">                     &quot;Membership changed for group {uid} {name}:\n\tadded {added}\n\tremoved {removed}&quot;,
</span><del>-                    uid=groupUID,
-                    name=cachedName,
</del><ins>+                    uid=group.groupUID,
+                    name=group.name,
</ins><span class="cx">                     added=&quot;,&quot;.join(addedUIDs),
</span><span class="cx">                     removed=&quot;,&quot;.join(removedUIDs),
</span><span class="cx">                 )
</span><span class="cx"> 
</span><span class="cx">                 # Send cache change notifications
</span><span class="cx">                 if self.cacheNotifier is not None:
</span><del>-                    self.cacheNotifier.changed(groupUID)
</del><ins>+                    self.cacheNotifier.changed(group.groupUID)
</ins><span class="cx">                     for uid in itertools.chain(addedUIDs, removedUIDs):
</span><span class="cx">                         self.cacheNotifier.changed(uid)
</span><span class="cx"> 
</span><span class="cx">                 # Notifier other store APIs of changes
</span><del>-                wpsAttendee = yield self.scheduleGroupAttendeeReconciliations(txn, groupID)
-                wpsShareee = yield self.scheduleGroupShareeReconciliations(txn, groupID)
</del><ins>+                wpsAttendee = yield self.scheduleGroupAttendeeReconciliations(txn, group.groupID)
+                wpsShareee = yield self.scheduleGroupShareeReconciliations(txn, group.groupID)
</ins><span class="cx"> 
</span><span class="cx">                 returnValue(wpsAttendee + wpsShareee)
</span><span class="cx">             else:
</span><span class="cx">                 self.log.debug(
</span><span class="cx">                     &quot;No membership change for group {uid} {name}&quot;,
</span><del>-                    uid=groupUID,
-                    name=cachedName
</del><ins>+                    uid=group.groupUID,
+                    name=group.name
</ins><span class="cx">                 )
</span><span class="cx"> 
</span><span class="cx">         returnValue(tuple())
</span><span class="lines">@@ -480,19 +462,15 @@
</span><span class="cx">         work items for them.
</span><span class="cx">         returns: WorkProposal
</span><span class="cx">         &quot;&quot;&quot;
</span><del>-        ga = schema.GROUP_ATTENDEE
-        rows = yield Select(
-            [ga.RESOURCE_ID, ],
-            From=ga,
-            Where=ga.GROUP_ID == groupID,
-        ).on(txn)
</del><span class="cx"> 
</span><ins>+        records = yield GroupAttendeeRecord.querysimple(txn, groupID=groupID)
+
</ins><span class="cx">         wps = []
</span><del>-        for [eventID] in rows:
</del><ins>+        for record in records:
</ins><span class="cx">             wp = yield GroupAttendeeReconciliationWork.reschedule(
</span><span class="cx">                 txn,
</span><span class="cx">                 seconds=float(config.GroupAttendees.ReconciliationDelaySeconds),
</span><del>-                resourceID=eventID,
</del><ins>+                resourceID=record.resourceID,
</ins><span class="cx">                 groupID=groupID,
</span><span class="cx">             )
</span><span class="cx">             wps.append(wp)
</span><span class="lines">@@ -546,20 +524,15 @@
</span><span class="cx">             )
</span><span class="cx"> 
</span><span class="cx">         # Get groupUIDs for all group attendees
</span><del>-        ga = schema.GROUP_ATTENDEE
-        gr = schema.GROUPS
-        rows = yield Select(
-            [gr.GROUP_UID],
-            From=gr,
-            Where=gr.GROUP_ID.In(
-                Select(
-                    [ga.GROUP_ID],
-                    From=ga,
-                    Distinct=True
-                )
-            )
-        ).on(txn)
-        attendeeGroupUIDs = frozenset([row[0] for row in rows])
</del><ins>+        groups = yield GroupsRecord.query(
+            txn,
+            GroupsRecord.groupID.In(GroupAttendeeRecord.queryExpr(
+                expr=None,
+                attributes=(GroupAttendeeRecord.groupID,),
+                distinct=True,
+            ))
+        )
+        attendeeGroupUIDs = frozenset([group.groupUID for group in groups])
</ins><span class="cx">         self.log.info(
</span><span class="cx">             &quot;There are {count} group attendees&quot;, count=len(attendeeGroupUIDs)
</span><span class="cx">         )
</span></span></pre></div>
<a id="CalendarServertrunktxdavwhotesttest_delegatespy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/who/test/test_delegates.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/who/test/test_delegates.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/who/test/test_delegates.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -19,6 +19,8 @@
</span><span class="cx"> &quot;&quot;&quot;
</span><span class="cx"> 
</span><span class="cx"> from txdav.common.datastore.sql import CommonStoreTransaction
</span><ins>+from txdav.common.datastore.sql_directory import DelegateRecord, \
+    DelegateGroupsRecord
</ins><span class="cx"> from txdav.who.delegates import Delegates, RecordType as DelegateRecordType
</span><span class="cx"> from txdav.who.groups import GroupCacher
</span><span class="cx"> from twext.who.idirectory import RecordType
</span><span class="lines">@@ -211,12 +213,9 @@
</span><span class="cx">                 yield self.directory.recordWithShortName(RecordType.user, name)
</span><span class="cx">             )
</span><span class="cx">             newSet.add(record.uid)
</span><del>-        (
-            groupID, name, _ignore_membershipHash, _ignore_modified,
-            _ignore_extant
-        ) = (yield txn.groupByUID(group1.uid))
</del><ins>+        group = yield txn.groupByUID(group1.uid)
</ins><span class="cx">         _ignore_added, _ignore_removed = (
</span><del>-            yield self.groupCacher.synchronizeMembers(txn, groupID, newSet)
</del><ins>+            yield self.groupCacher.synchronizeMembers(txn, group.groupID, newSet)
</ins><span class="cx">         )
</span><span class="cx">         delegates = (yield Delegates.delegatesOf(txn, delegator, True, expanded=True))
</span><span class="cx">         self.assertEquals(
</span><span class="lines">@@ -261,15 +260,14 @@
</span><span class="cx">         yield txn.commit()
</span><span class="cx"> 
</span><span class="cx">         txn = self.store.newTransaction(label=&quot;test_noDuplication&quot;)
</span><del>-        results = (
-            yield txn._selectDelegatesQuery.on(
-                txn,
-                delegator=delegator.uid.encode(&quot;utf-8&quot;),
-                readWrite=1
</del><ins>+        results = yield DelegateRecord.query(
+            txn,
+            (DelegateRecord.delegator == delegator.uid.encode(&quot;utf-8&quot;)).And(
+                DelegateRecord.readWrite == 1
</ins><span class="cx">             )
</span><span class="cx">         )
</span><span class="cx">         yield txn.commit()
</span><del>-        self.assertEquals([[&quot;__sagen1__&quot;]], map(list, results))
</del><ins>+        self.assertEquals([&quot;__sagen1__&quot;, ], [record.delegate for record in results])
</ins><span class="cx"> 
</span><span class="cx">         # Delegate groups:
</span><span class="cx">         group1 = yield self.directory.recordWithUID(u&quot;__top_group_1__&quot;)
</span><span class="lines">@@ -283,15 +281,13 @@
</span><span class="cx">         yield txn.commit()
</span><span class="cx"> 
</span><span class="cx">         txn = self.store.newTransaction(label=&quot;test_noDuplication&quot;)
</span><del>-        results = (
-            yield txn._selectDelegateGroupsQuery.on(
-                txn,
-                delegator=delegator.uid.encode(&quot;utf-8&quot;),
-                readWrite=1
-            )
</del><ins>+        results = yield DelegateGroupsRecord.delegateGroups(
+            txn,
+            delegator.uid,
+            True,
</ins><span class="cx">         )
</span><span class="cx">         yield txn.commit()
</span><del>-        self.assertEquals([[&quot;__top_group_1__&quot;]], map(list, results))
</del><ins>+        self.assertEquals([&quot;__top_group_1__&quot;, ], [record.groupUID for record in results])
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx"> 
</span></span></pre></div>
<a id="CalendarServertrunktxdavwhotesttest_group_attendeespy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/who/test/test_group_attendees.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/who/test/test_group_attendees.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/who/test/test_group_attendees.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -18,7 +18,6 @@
</span><span class="cx">     group attendee tests
</span><span class="cx"> &quot;&quot;&quot;
</span><span class="cx"> 
</span><del>-from twext.enterprise.dal.syntax import Insert
</del><span class="cx"> from twext.enterprise.jobqueue import JobItem
</span><span class="cx"> from twext.python.filepath import CachingFilePath as FilePath
</span><span class="cx"> from twext.who.directory import DirectoryService
</span><span class="lines">@@ -27,8 +26,8 @@
</span><span class="cx"> from twisted.trial import unittest
</span><span class="cx"> from twistedcaldav.config import config
</span><span class="cx"> from twistedcaldav.ical import Component, normalize_iCalStr
</span><ins>+from txdav.caldav.datastore.sql_directory import GroupAttendeeRecord
</ins><span class="cx"> from txdav.caldav.datastore.test.util import populateCalendarsFrom, CommonCommonTests
</span><del>-from txdav.common.datastore.sql_tables import schema
</del><span class="cx"> from txdav.who.directory import CalendarDirectoryRecordMixin
</span><span class="cx"> from txdav.who.groups import GroupCacher
</span><span class="cx"> import os
</span><span class="lines">@@ -871,16 +870,13 @@
</span><span class="cx">         # finally, simulate an event that has become old
</span><span class="cx">         self.patch(CalendarDirectoryRecordMixin, &quot;expandedMembers&quot;, unpatchedExpandedMembers)
</span><span class="cx"> 
</span><del>-        (
-            groupID, _ignore_name, _ignore_membershipHash, _ignore_modDate,
-            _ignore_extant
-        ) = yield self.transactionUnderTest().groupByUID(&quot;group01&quot;)
-        ga = schema.GROUP_ATTENDEE
-        yield Insert({
-            ga.RESOURCE_ID: cobj._resourceID,
-            ga.GROUP_ID: groupID,
-            ga.MEMBERSHIP_HASH: (-1),
-        }).on(self.transactionUnderTest())
</del><ins>+        group = yield self.transactionUnderTest().groupByUID(&quot;group01&quot;)
+        yield GroupAttendeeRecord.create(
+            self.transactionUnderTest(),
+            resourceID=cobj._resourceID,
+            groupID=group.groupID,
+            membershipHash=-1,
+        )
</ins><span class="cx">         wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), &quot;group01&quot;)
</span><span class="cx">         self.assertEqual(len(wps), 1)
</span><span class="cx">         yield self.commit()
</span><span class="lines">@@ -1033,16 +1029,13 @@
</span><span class="cx">         # finally, simulate an event that has become old
</span><span class="cx">         self.patch(CalendarDirectoryRecordMixin, &quot;expandedMembers&quot;, unpatchedExpandedMembers)
</span><span class="cx"> 
</span><del>-        (
-            groupID, _ignore_name, _ignore_membershipHash, _ignore_modDate,
-            _ignore_extant
-        ) = yield self.transactionUnderTest().groupByUID(&quot;group01&quot;)
-        ga = schema.GROUP_ATTENDEE
-        yield Insert({
-            ga.RESOURCE_ID: cobj._resourceID,
-            ga.GROUP_ID: groupID,
-            ga.MEMBERSHIP_HASH: (-1),
-        }).on(self.transactionUnderTest())
</del><ins>+        group = yield self.transactionUnderTest().groupByUID(&quot;group01&quot;)
+        yield GroupAttendeeRecord.create(
+            self.transactionUnderTest(),
+            resourceID=cobj._resourceID,
+            groupID=group.groupID,
+            membershipHash=-1,
+        )
</ins><span class="cx">         wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), &quot;group01&quot;)
</span><span class="cx">         self.assertEqual(len(wps), 1)
</span><span class="cx">         yield self.commit()
</span></span></pre></div>
<a id="CalendarServertrunktxdavwhotesttest_group_shareespy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/who/test/test_group_sharees.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/who/test/test_group_sharees.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/who/test/test_group_sharees.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -84,7 +84,7 @@
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="cx">     def _check_notifications(self, uid, items):
</span><del>-        notifyHome = yield self.transactionUnderTest().notificationsWithUID(uid)
</del><ins>+        notifyHome = yield self.transactionUnderTest().notificationsWithUID(uid, create=True)
</ins><span class="cx">         notifications = yield notifyHome.listNotificationObjects()
</span><span class="cx">         self.assertEqual(set(notifications), set(items))
</span><span class="cx"> 
</span></span></pre></div>
<a id="CalendarServertrunktxdavwhotesttest_groupspy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/who/test/test_groups.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/who/test/test_groups.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/who/test/test_groups.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -67,27 +67,24 @@
</span><span class="cx">         record = yield self.directory.recordWithUID(u&quot;__top_group_1__&quot;)
</span><span class="cx">         yield self.groupCacher.refreshGroup(txn, record.uid)
</span><span class="cx"> 
</span><del>-        (
-            groupID, _ignore_name, membershipHash, _ignore_modified,
-            extant
-        ) = (yield txn.groupByUID(record.uid))
</del><ins>+        group = (yield txn.groupByUID(record.uid))
</ins><span class="cx"> 
</span><del>-        self.assertEquals(extant, True)
-        self.assertEquals(membershipHash, &quot;553eb54e3bbb26582198ee04541dbee4&quot;)
</del><ins>+        self.assertEquals(group.extant, True)
+        self.assertEquals(group.membershipHash, &quot;553eb54e3bbb26582198ee04541dbee4&quot;)
</ins><span class="cx"> 
</span><del>-        groupUID, name, membershipHash, extant = (yield txn.groupByID(groupID))
-        self.assertEquals(groupUID, record.uid)
-        self.assertEquals(name, u&quot;Top Group 1&quot;)
-        self.assertEquals(membershipHash, &quot;553eb54e3bbb26582198ee04541dbee4&quot;)
-        self.assertEquals(extant, True)
</del><ins>+        group = yield txn.groupByID(group.groupID)
+        self.assertEquals(group.groupUID, record.uid)
+        self.assertEquals(group.name, u&quot;Top Group 1&quot;)
+        self.assertEquals(group.membershipHash, &quot;553eb54e3bbb26582198ee04541dbee4&quot;)
+        self.assertEquals(group.extant, True)
</ins><span class="cx"> 
</span><del>-        members = (yield txn.groupMemberUIDs(groupID))
</del><ins>+        members = (yield txn.groupMemberUIDs(group.groupID))
</ins><span class="cx">         self.assertEquals(
</span><span class="cx">             set([u'__cdaboo1__', u'__glyph1__', u'__sagen1__', u'__wsanchez1__']),
</span><span class="cx">             members
</span><span class="cx">         )
</span><span class="cx"> 
</span><del>-        records = (yield self.groupCacher.cachedMembers(txn, groupID))
</del><ins>+        records = (yield self.groupCacher.cachedMembers(txn, group.groupID))
</ins><span class="cx">         self.assertEquals(
</span><span class="cx">             set([r.uid for r in records]),
</span><span class="cx">             set([u'__cdaboo1__', u'__glyph1__', u'__sagen1__', u'__wsanchez1__'])
</span><span class="lines">@@ -116,10 +113,7 @@
</span><span class="cx">         # Refresh the group so it's assigned a group_id
</span><span class="cx">         uid = u&quot;__top_group_1__&quot;
</span><span class="cx">         yield self.groupCacher.refreshGroup(txn, uid)
</span><del>-        (
-            groupID, name, _ignore_membershipHash, _ignore_modified,
-            _ignore_extant
-        ) = yield txn.groupByUID(uid)
</del><ins>+        group = yield txn.groupByUID(uid)
</ins><span class="cx"> 
</span><span class="cx">         # Remove two members, and add one member
</span><span class="cx">         newSet = set()
</span><span class="lines">@@ -133,12 +127,12 @@
</span><span class="cx">             newSet.add(record.uid)
</span><span class="cx">         added, removed = (
</span><span class="cx">             yield self.groupCacher.synchronizeMembers(
</span><del>-                txn, groupID, newSet
</del><ins>+                txn, group.groupID, newSet
</ins><span class="cx">             )
</span><span class="cx">         )
</span><span class="cx">         self.assertEquals(added, set([&quot;__dre1__&quot;, ]))
</span><span class="cx">         self.assertEquals(removed, set([&quot;__glyph1__&quot;, &quot;__sagen1__&quot;, ]))
</span><del>-        records = (yield self.groupCacher.cachedMembers(txn, groupID))
</del><ins>+        records = (yield self.groupCacher.cachedMembers(txn, group.groupID))
</ins><span class="cx">         self.assertEquals(
</span><span class="cx">             set([r.shortNames[0] for r in records]),
</span><span class="cx">             set([&quot;wsanchez1&quot;, &quot;cdaboo1&quot;, &quot;dre1&quot;])
</span><span class="lines">@@ -146,11 +140,11 @@
</span><span class="cx"> 
</span><span class="cx">         # Remove all members
</span><span class="cx">         added, removed = (
</span><del>-            yield self.groupCacher.synchronizeMembers(txn, groupID, set())
</del><ins>+            yield self.groupCacher.synchronizeMembers(txn, group.groupID, set())
</ins><span class="cx">         )
</span><span class="cx">         self.assertEquals(added, set())
</span><span class="cx">         self.assertEquals(removed, set([&quot;__wsanchez1__&quot;, &quot;__cdaboo1__&quot;, &quot;__dre1__&quot;, ]))
</span><del>-        records = (yield self.groupCacher.cachedMembers(txn, groupID))
</del><ins>+        records = (yield self.groupCacher.cachedMembers(txn, group.groupID))
</ins><span class="cx">         self.assertEquals(len(records), 0)
</span><span class="cx"> 
</span><span class="cx">         yield txn.commit()
</span><span class="lines">@@ -168,12 +162,12 @@
</span><span class="cx">         uid = u&quot;__top_group_1__&quot;
</span><span class="cx">         hash = &quot;553eb54e3bbb26582198ee04541dbee4&quot;
</span><span class="cx">         yield self.groupCacher.refreshGroup(txn, uid)
</span><del>-        (
-            groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
-            _ignore_extant
-        ) = yield txn.groupByUID(uid)
-        results = yield txn.groupByID(groupID)
-        self.assertEquals((uid, u&quot;Top Group 1&quot;, hash, True), results)
</del><ins>+        group = yield txn.groupByUID(uid)
+        group = yield txn.groupByID(group.groupID)
+        self.assertEqual(group.groupUID, uid)
+        self.assertEqual(group.name, u&quot;Top Group 1&quot;)
+        self.assertEqual(group.membershipHash, hash)
+        self.assertEqual(group.extant, True)
</ins><span class="cx"> 
</span><span class="cx">         yield txn.commit()
</span><span class="cx"> 
</span><span class="lines">@@ -683,31 +677,25 @@
</span><span class="cx"> 
</span><span class="cx">             txn = store.newTransaction()
</span><span class="cx">             yield self.groupCacher.refreshGroup(txn, uid)
</span><del>-            (
-                _ignore_groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
-                extant
-            ) = (yield txn.groupByUID(uid))
</del><ins>+            group = yield txn.groupByUID(uid)
</ins><span class="cx">             yield txn.commit()
</span><span class="cx"> 
</span><del>-            self.assertTrue(extant)
</del><ins>+            self.assertTrue(group.extant)
</ins><span class="cx"> 
</span><span class="cx">             # Remove the group
</span><span class="cx">             yield self.directory.removeRecords([uid])
</span><span class="cx"> 
</span><span class="cx">             txn = store.newTransaction()
</span><span class="cx">             yield self.groupCacher.refreshGroup(txn, uid)
</span><del>-            (
-                groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
-                extant
-            ) = (yield txn.groupByUID(uid))
</del><ins>+            group = (yield txn.groupByUID(uid))
</ins><span class="cx">             yield txn.commit()
</span><span class="cx"> 
</span><span class="cx">             # Extant = False
</span><del>-            self.assertFalse(extant)
</del><ins>+            self.assertFalse(group.extant)
</ins><span class="cx"> 
</span><span class="cx">             # The list of members stored in the DB for this group is now empty
</span><span class="cx">             txn = store.newTransaction()
</span><del>-            members = yield txn.groupMemberUIDs(groupID)
</del><ins>+            members = yield txn.groupMemberUIDs(group.groupID)
</ins><span class="cx">             yield txn.commit()
</span><span class="cx">             self.assertEquals(members, set())
</span><span class="cx"> 
</span><span class="lines">@@ -732,18 +720,15 @@
</span><span class="cx"> 
</span><span class="cx">             txn = store.newTransaction()
</span><span class="cx">             yield self.groupCacher.refreshGroup(txn, uid)
</span><del>-            (
-                groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
-                extant
-            ) = (yield txn.groupByUID(uid))
</del><ins>+            group = (yield txn.groupByUID(uid))
</ins><span class="cx">             yield txn.commit()
</span><span class="cx"> 
</span><span class="cx">             # Extant = True
</span><del>-            self.assertTrue(extant)
</del><ins>+            self.assertTrue(group.extant)
</ins><span class="cx"> 
</span><span class="cx">             # The list of members stored in the DB for this group has 100 users
</span><span class="cx">             txn = store.newTransaction()
</span><del>-            members = yield txn.groupMemberUIDs(groupID)
</del><ins>+            members = yield txn.groupMemberUIDs(group.groupID)
</ins><span class="cx">             yield txn.commit()
</span><span class="cx">             self.assertEquals(len(members), 100 if uid == u&quot;testgroup&quot; else 0)
</span><span class="cx"> 
</span><span class="lines">@@ -760,27 +745,27 @@
</span><span class="cx"> 
</span><span class="cx">             txn = store.newTransaction()
</span><span class="cx">             yield self.groupCacher.refreshGroup(txn, uid)
</span><del>-            groupID = (yield txn.groupByUID(uid, create=False))[0]
</del><ins>+            group = yield txn.groupByUID(uid, create=False)
</ins><span class="cx">             yield txn.commit()
</span><span class="cx"> 
</span><del>-            self.assertNotEqual(groupID, None)
</del><ins>+            self.assertNotEqual(group, None)
</ins><span class="cx"> 
</span><span class="cx">             txn = store.newTransaction()
</span><span class="cx">             yield self.groupCacher.update(txn)
</span><del>-            groupID = (yield txn.groupByUID(uid, create=False))[0]
</del><ins>+            group = yield txn.groupByUID(uid, create=False)
</ins><span class="cx">             yield txn.commit()
</span><span class="cx"> 
</span><del>-            self.assertEqual(groupID, None)
</del><ins>+            self.assertEqual(group, None)
</ins><span class="cx"> 
</span><span class="cx">         # delegate groups not deleted
</span><span class="cx">         for uid in (u&quot;testgroup&quot;, u&quot;emptygroup&quot;,):
</span><span class="cx"> 
</span><span class="cx">             txn = store.newTransaction()
</span><del>-            groupID = (yield txn.groupByUID(uid))[0]
-            yield txn.addDelegateGroup(delegator=u&quot;sagen&quot;, delegateGroupID=groupID, readWrite=True)
</del><ins>+            group = yield txn.groupByUID(uid)
+            yield txn.addDelegateGroup(delegator=u&quot;sagen&quot;, delegateGroupID=group.groupID, readWrite=True)
</ins><span class="cx">             yield txn.commit()
</span><span class="cx"> 
</span><del>-            self.assertNotEqual(groupID, None)
</del><ins>+            self.assertNotEqual(group, None)
</ins><span class="cx"> 
</span><span class="cx">             txn = store.newTransaction()
</span><span class="cx">             yield self.groupCacher.update(txn)
</span><span class="lines">@@ -788,21 +773,21 @@
</span><span class="cx">             yield JobItem.waitEmpty(store.newTransaction, reactor, 60)
</span><span class="cx"> 
</span><span class="cx">             txn = store.newTransaction()
</span><del>-            groupID = (yield txn.groupByUID(uid, create=False))[0]
</del><ins>+            group = yield txn.groupByUID(uid, create=False)
</ins><span class="cx">             yield txn.commit()
</span><span class="cx"> 
</span><del>-            self.assertNotEqual(groupID, None)
</del><ins>+            self.assertNotEqual(group, None)
</ins><span class="cx"> 
</span><span class="cx">         # delegate group is deleted. unused group is deleted
</span><span class="cx">         txn = store.newTransaction()
</span><del>-        testGroupID = (yield txn.groupByUID(u&quot;testgroup&quot;, create=False))[0]
-        yield txn.removeDelegateGroup(delegator=u&quot;sagen&quot;, delegateGroupID=testGroupID, readWrite=True)
-        testGroupID = (yield txn.groupByUID(u&quot;testgroup&quot;, create=False))[0]
-        emptyGroupID = (yield txn.groupByUID(u&quot;emptygroup&quot;, create=False))[0]
</del><ins>+        testGroup = yield txn.groupByUID(u&quot;testgroup&quot;, create=False)
+        yield txn.removeDelegateGroup(delegator=u&quot;sagen&quot;, delegateGroupID=testGroup.groupID, readWrite=True)
+        testGroup = yield txn.groupByUID(u&quot;testgroup&quot;, create=False)
+        emptyGroup = yield txn.groupByUID(u&quot;emptygroup&quot;, create=False)
</ins><span class="cx">         yield txn.commit()
</span><span class="cx"> 
</span><del>-        self.assertNotEqual(testGroupID, None)
-        self.assertNotEqual(emptyGroupID, None)
</del><ins>+        self.assertNotEqual(testGroup, None)
+        self.assertNotEqual(emptyGroup, None)
</ins><span class="cx"> 
</span><span class="cx">         txn = store.newTransaction()
</span><span class="cx">         yield self.groupCacher.update(txn)
</span><span class="lines">@@ -810,12 +795,12 @@
</span><span class="cx">         yield JobItem.waitEmpty(store.newTransaction, reactor, 60)
</span><span class="cx"> 
</span><span class="cx">         txn = store.newTransaction()
</span><del>-        testGroupID = (yield txn.groupByUID(u&quot;testgroup&quot;, create=False))[0]
-        emptyGroupID = (yield txn.groupByUID(u&quot;emptygroup&quot;, create=False))[0]
</del><ins>+        testGroup = yield txn.groupByUID(u&quot;testgroup&quot;, create=False)
+        emptyGroup = yield txn.groupByUID(u&quot;emptygroup&quot;, create=False)
</ins><span class="cx">         yield txn.commit()
</span><span class="cx"> 
</span><del>-        self.assertEqual(testGroupID, None)
-        self.assertNotEqual(emptyGroupID, None)
</del><ins>+        self.assertEqual(testGroup, None)
+        self.assertNotEqual(emptyGroup, None)
</ins><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx">     @inlineCallbacks
</span><span class="lines">@@ -831,42 +816,33 @@
</span><span class="cx"> 
</span><span class="cx">             config.AutomaticPurging.GroupPurgeIntervalSeconds = oldGroupPurgeIntervalSeconds
</span><span class="cx">             txn = store.newTransaction()
</span><del>-            groupID = (yield txn.groupByUID(uid))[0]
-            yield txn.addDelegateGroup(delegator=u&quot;sagen&quot;, delegateGroupID=groupID, readWrite=True)
-            (
-                groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
-                extant
-            ) = yield txn.groupByUID(uid, create=False)
</del><ins>+            group = yield txn.groupByUID(uid)
+            yield txn.addDelegateGroup(delegator=u&quot;sagen&quot;, delegateGroupID=group.groupID, readWrite=True)
+            group = yield txn.groupByUID(uid, create=False)
</ins><span class="cx">             yield txn.commit()
</span><span class="cx"> 
</span><del>-            self.assertTrue(extant)
-            self.assertNotEqual(groupID, None)
</del><ins>+            self.assertNotEqual(group, None)
+            self.assertTrue(group.extant)
</ins><span class="cx"> 
</span><span class="cx">             # Remove the group, still cached
</span><span class="cx">             yield self.directory.removeRecords([uid])
</span><span class="cx">             txn = store.newTransaction()
</span><span class="cx">             yield self.groupCacher.update(txn)
</span><del>-            (
-                groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
-                extant
-            ) = yield txn.groupByUID(uid, create=False)
</del><ins>+            group = yield txn.groupByUID(uid, create=False)
</ins><span class="cx">             yield txn.commit()
</span><span class="cx">             yield JobItem.waitEmpty(store.newTransaction, reactor, 60)
</span><span class="cx"> 
</span><span class="cx">             txn = store.newTransaction()
</span><del>-            (
-                groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
-                extant
-            ) = yield txn.groupByUID(uid, create=False)
</del><ins>+            group = yield txn.groupByUID(uid, create=False)
</ins><span class="cx">             yield txn.commit()
</span><del>-            self.assertNotEqual(groupID, None)
-            self.assertFalse(extant)
</del><ins>+            self.assertNotEqual(group, None)
+            self.assertFalse(group.extant)
</ins><span class="cx"> 
</span><span class="cx">             # delete the group
</span><span class="cx">             config.AutomaticPurging.GroupPurgeIntervalSeconds = &quot;0.0&quot;
</span><span class="cx"> 
</span><span class="cx">             txn = store.newTransaction()
</span><span class="cx">             yield self.groupCacher.update(txn)
</span><del>-            groupID = (yield txn.groupByUID(uid, create=False))[0]
</del><ins>+            group = yield txn.groupByUID(uid, create=False)
</ins><span class="cx">             yield txn.commit()
</span><del>-            self.assertEqual(groupID, None)
</del><ins>+            self.assertEqual(group, None)
</ins></span></pre>
</div>
</div>

</body>
</html>